2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_fib.h>
34 #include <net/ip6_route.h>
35 #include <net/route.h>
36 #include <net/addrconf.h>
37 #include <net/l3mdev.h>
39 #define RT_FL_TOS(oldflp4) \
40 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
42 #define DRV_NAME "vrf"
43 #define DRV_VERSION "1.0"
46 struct rtable __rcu
*rth
;
47 struct rt6_info __rcu
*rt6
;
57 struct u64_stats_sync syncp
;
60 static void vrf_tx_error(struct net_device
*vrf_dev
, struct sk_buff
*skb
)
62 vrf_dev
->stats
.tx_errors
++;
66 static struct rtnl_link_stats64
*vrf_get_stats64(struct net_device
*dev
,
67 struct rtnl_link_stats64
*stats
)
71 for_each_possible_cpu(i
) {
72 const struct pcpu_dstats
*dstats
;
73 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
76 dstats
= per_cpu_ptr(dev
->dstats
, i
);
78 start
= u64_stats_fetch_begin_irq(&dstats
->syncp
);
79 tbytes
= dstats
->tx_bytes
;
80 tpkts
= dstats
->tx_pkts
;
81 tdrops
= dstats
->tx_drps
;
82 rbytes
= dstats
->rx_bytes
;
83 rpkts
= dstats
->rx_pkts
;
84 } while (u64_stats_fetch_retry_irq(&dstats
->syncp
, start
));
85 stats
->tx_bytes
+= tbytes
;
86 stats
->tx_packets
+= tpkts
;
87 stats
->tx_dropped
+= tdrops
;
88 stats
->rx_bytes
+= rbytes
;
89 stats
->rx_packets
+= rpkts
;
94 #if IS_ENABLED(CONFIG_IPV6)
95 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
96 struct net_device
*dev
)
98 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
99 struct net
*net
= dev_net(skb
->dev
);
100 struct flowi6 fl6
= {
101 /* needed to match OIF rule */
102 .flowi6_oif
= dev
->ifindex
,
103 .flowi6_iif
= LOOPBACK_IFINDEX
,
106 .flowlabel
= ip6_flowinfo(iph
),
107 .flowi6_mark
= skb
->mark
,
108 .flowi6_proto
= iph
->nexthdr
,
109 .flowi6_flags
= FLOWI_FLAG_L3MDEV_SRC
| FLOWI_FLAG_SKIP_NH_OIF
,
111 int ret
= NET_XMIT_DROP
;
112 struct dst_entry
*dst
;
113 struct dst_entry
*dst_null
= &net
->ipv6
.ip6_null_entry
->dst
;
115 dst
= ip6_route_output(net
, NULL
, &fl6
);
120 skb_dst_set(skb
, dst
);
122 ret
= ip6_local_out(net
, skb
->sk
, skb
);
123 if (unlikely(net_xmit_eval(ret
)))
124 dev
->stats
.tx_errors
++;
126 ret
= NET_XMIT_SUCCESS
;
130 vrf_tx_error(dev
, skb
);
131 return NET_XMIT_DROP
;
134 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
135 struct net_device
*dev
)
137 vrf_tx_error(dev
, skb
);
138 return NET_XMIT_DROP
;
142 static int vrf_send_v4_prep(struct sk_buff
*skb
, struct flowi4
*fl4
,
143 struct net_device
*vrf_dev
)
148 rt
= ip_route_output_flow(dev_net(vrf_dev
), fl4
, NULL
);
152 /* TO-DO: what about broadcast ? */
153 if (rt
->rt_type
!= RTN_UNICAST
&& rt
->rt_type
!= RTN_LOCAL
) {
159 skb_dst_set(skb
, &rt
->dst
);
165 static netdev_tx_t
vrf_process_v4_outbound(struct sk_buff
*skb
,
166 struct net_device
*vrf_dev
)
168 struct iphdr
*ip4h
= ip_hdr(skb
);
169 int ret
= NET_XMIT_DROP
;
170 struct flowi4 fl4
= {
171 /* needed to match OIF rule */
172 .flowi4_oif
= vrf_dev
->ifindex
,
173 .flowi4_iif
= LOOPBACK_IFINDEX
,
174 .flowi4_tos
= RT_TOS(ip4h
->tos
),
175 .flowi4_flags
= FLOWI_FLAG_ANYSRC
| FLOWI_FLAG_L3MDEV_SRC
|
176 FLOWI_FLAG_SKIP_NH_OIF
,
177 .daddr
= ip4h
->daddr
,
180 if (vrf_send_v4_prep(skb
, &fl4
, vrf_dev
))
184 ip4h
->saddr
= inet_select_addr(skb_dst(skb
)->dev
, 0,
188 ret
= ip_local_out(dev_net(skb_dst(skb
)->dev
), skb
->sk
, skb
);
189 if (unlikely(net_xmit_eval(ret
)))
190 vrf_dev
->stats
.tx_errors
++;
192 ret
= NET_XMIT_SUCCESS
;
197 vrf_tx_error(vrf_dev
, skb
);
201 static netdev_tx_t
is_ip_tx_frame(struct sk_buff
*skb
, struct net_device
*dev
)
203 /* strip the ethernet header added for pass through VRF device */
204 __skb_pull(skb
, skb_network_offset(skb
));
206 switch (skb
->protocol
) {
207 case htons(ETH_P_IP
):
208 return vrf_process_v4_outbound(skb
, dev
);
209 case htons(ETH_P_IPV6
):
210 return vrf_process_v6_outbound(skb
, dev
);
212 vrf_tx_error(dev
, skb
);
213 return NET_XMIT_DROP
;
217 static netdev_tx_t
vrf_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
219 netdev_tx_t ret
= is_ip_tx_frame(skb
, dev
);
221 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
222 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
224 u64_stats_update_begin(&dstats
->syncp
);
226 dstats
->tx_bytes
+= skb
->len
;
227 u64_stats_update_end(&dstats
->syncp
);
229 this_cpu_inc(dev
->dstats
->tx_drps
);
235 #if IS_ENABLED(CONFIG_IPV6)
236 /* modelled after ip6_finish_output2 */
237 static int vrf_finish_output6(struct net
*net
, struct sock
*sk
,
240 struct dst_entry
*dst
= skb_dst(skb
);
241 struct net_device
*dev
= dst
->dev
;
242 struct neighbour
*neigh
;
243 struct in6_addr
*nexthop
;
246 skb
->protocol
= htons(ETH_P_IPV6
);
250 nexthop
= rt6_nexthop((struct rt6_info
*)dst
, &ipv6_hdr(skb
)->daddr
);
251 neigh
= __ipv6_neigh_lookup_noref(dst
->dev
, nexthop
);
252 if (unlikely(!neigh
))
253 neigh
= __neigh_create(&nd_tbl
, nexthop
, dst
->dev
, false);
254 if (!IS_ERR(neigh
)) {
255 ret
= dst_neigh_output(dst
, neigh
, skb
);
256 rcu_read_unlock_bh();
259 rcu_read_unlock_bh();
261 IP6_INC_STATS(dev_net(dst
->dev
),
262 ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
267 /* modelled after ip6_output */
268 static int vrf_output6(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
270 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
271 net
, sk
, skb
, NULL
, skb_dst(skb
)->dev
,
273 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
277 static void vrf_rt6_release(struct net_vrf
*vrf
)
279 struct rt6_info
*rt6
= rtnl_dereference(vrf
->rt6
);
281 rcu_assign_pointer(vrf
->rt6
, NULL
);
284 dst_release(&rt6
->dst
);
287 static int vrf_rt6_create(struct net_device
*dev
)
289 struct net_vrf
*vrf
= netdev_priv(dev
);
290 struct net
*net
= dev_net(dev
);
291 struct fib6_table
*rt6i_table
;
292 struct rt6_info
*rt6
;
295 rt6i_table
= fib6_new_table(net
, vrf
->tb_id
);
299 rt6
= ip6_dst_alloc(net
, dev
,
300 DST_HOST
| DST_NOPOLICY
| DST_NOXFRM
| DST_NOCACHE
);
306 rt6
->rt6i_table
= rt6i_table
;
307 rt6
->dst
.output
= vrf_output6
;
308 rcu_assign_pointer(vrf
->rt6
, rt6
);
315 static void vrf_rt6_release(struct net_vrf
*vrf
)
319 static int vrf_rt6_create(struct net_device
*dev
)
325 /* modelled after ip_finish_output2 */
326 static int vrf_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
328 struct dst_entry
*dst
= skb_dst(skb
);
329 struct rtable
*rt
= (struct rtable
*)dst
;
330 struct net_device
*dev
= dst
->dev
;
331 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
332 struct neighbour
*neigh
;
336 /* Be paranoid, rather than too clever. */
337 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
338 struct sk_buff
*skb2
;
340 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
346 skb_set_owner_w(skb2
, skb
->sk
);
354 nexthop
= (__force u32
)rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
355 neigh
= __ipv4_neigh_lookup_noref(dev
, nexthop
);
356 if (unlikely(!neigh
))
357 neigh
= __neigh_create(&arp_tbl
, &nexthop
, dev
, false);
359 ret
= dst_neigh_output(dst
, neigh
, skb
);
361 rcu_read_unlock_bh();
363 if (unlikely(ret
< 0))
364 vrf_tx_error(skb
->dev
, skb
);
368 static int vrf_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
370 struct net_device
*dev
= skb_dst(skb
)->dev
;
372 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
375 skb
->protocol
= htons(ETH_P_IP
);
377 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
378 net
, sk
, skb
, NULL
, dev
,
380 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
384 static void vrf_rtable_release(struct net_vrf
*vrf
)
386 struct rtable
*rth
= rtnl_dereference(vrf
->rth
);
388 rcu_assign_pointer(vrf
->rth
, NULL
);
391 dst_release(&rth
->dst
);
394 static int vrf_rtable_create(struct net_device
*dev
)
396 struct net_vrf
*vrf
= netdev_priv(dev
);
399 if (!fib_new_table(dev_net(dev
), vrf
->tb_id
))
402 rth
= rt_dst_alloc(dev
, 0, RTN_UNICAST
, 1, 1, 0);
406 rth
->dst
.output
= vrf_output
;
407 rth
->rt_table_id
= vrf
->tb_id
;
409 rcu_assign_pointer(vrf
->rth
, rth
);
414 /**************************** device handling ********************/
416 /* cycle interface to flush neighbor cache and move routes across tables */
417 static void cycle_netdev(struct net_device
*dev
)
419 unsigned int flags
= dev
->flags
;
422 if (!netif_running(dev
))
425 ret
= dev_change_flags(dev
, flags
& ~IFF_UP
);
427 ret
= dev_change_flags(dev
, flags
);
431 "Failed to cycle device %s; route tables might be wrong!\n",
436 static int do_vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
440 ret
= netdev_master_upper_dev_link(port_dev
, dev
, NULL
, NULL
);
444 port_dev
->priv_flags
|= IFF_L3MDEV_SLAVE
;
445 cycle_netdev(port_dev
);
450 static int vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
452 if (netif_is_l3_master(port_dev
) || netif_is_l3_slave(port_dev
))
455 return do_vrf_add_slave(dev
, port_dev
);
458 /* inverse of do_vrf_add_slave */
459 static int do_vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
461 netdev_upper_dev_unlink(port_dev
, dev
);
462 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
464 cycle_netdev(port_dev
);
469 static int vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
471 return do_vrf_del_slave(dev
, port_dev
);
474 static void vrf_dev_uninit(struct net_device
*dev
)
476 struct net_vrf
*vrf
= netdev_priv(dev
);
477 struct net_device
*port_dev
;
478 struct list_head
*iter
;
480 vrf_rtable_release(vrf
);
481 vrf_rt6_release(vrf
);
483 netdev_for_each_lower_dev(dev
, port_dev
, iter
)
484 vrf_del_slave(dev
, port_dev
);
486 free_percpu(dev
->dstats
);
490 static int vrf_dev_init(struct net_device
*dev
)
492 struct net_vrf
*vrf
= netdev_priv(dev
);
494 dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
498 /* create the default dst which points back to us */
499 if (vrf_rtable_create(dev
) != 0)
502 if (vrf_rt6_create(dev
) != 0)
505 dev
->flags
= IFF_MASTER
| IFF_NOARP
;
510 vrf_rtable_release(vrf
);
512 free_percpu(dev
->dstats
);
518 static const struct net_device_ops vrf_netdev_ops
= {
519 .ndo_init
= vrf_dev_init
,
520 .ndo_uninit
= vrf_dev_uninit
,
521 .ndo_start_xmit
= vrf_xmit
,
522 .ndo_get_stats64
= vrf_get_stats64
,
523 .ndo_add_slave
= vrf_add_slave
,
524 .ndo_del_slave
= vrf_del_slave
,
527 static u32
vrf_fib_table(const struct net_device
*dev
)
529 struct net_vrf
*vrf
= netdev_priv(dev
);
534 static struct rtable
*vrf_get_rtable(const struct net_device
*dev
,
535 const struct flowi4
*fl4
)
537 struct rtable
*rth
= NULL
;
539 if (!(fl4
->flowi4_flags
& FLOWI_FLAG_L3MDEV_SRC
)) {
540 struct net_vrf
*vrf
= netdev_priv(dev
);
544 rth
= rcu_dereference(vrf
->rth
);
554 /* called under rcu_read_lock */
555 static int vrf_get_saddr(struct net_device
*dev
, struct flowi4
*fl4
)
557 struct fib_result res
= { .tclassid
= 0 };
558 struct net
*net
= dev_net(dev
);
559 u32 orig_tos
= fl4
->flowi4_tos
;
560 u8 flags
= fl4
->flowi4_flags
;
561 u8 scope
= fl4
->flowi4_scope
;
562 u8 tos
= RT_FL_TOS(fl4
);
565 if (unlikely(!fl4
->daddr
))
568 fl4
->flowi4_flags
|= FLOWI_FLAG_SKIP_NH_OIF
;
569 fl4
->flowi4_iif
= LOOPBACK_IFINDEX
;
570 /* make sure oif is set to VRF device for lookup */
571 fl4
->flowi4_oif
= dev
->ifindex
;
572 fl4
->flowi4_tos
= tos
& IPTOS_RT_MASK
;
573 fl4
->flowi4_scope
= ((tos
& RTO_ONLINK
) ?
574 RT_SCOPE_LINK
: RT_SCOPE_UNIVERSE
);
576 rc
= fib_lookup(net
, fl4
, &res
, 0);
578 if (res
.type
== RTN_LOCAL
)
579 fl4
->saddr
= res
.fi
->fib_prefsrc
? : fl4
->daddr
;
581 fib_select_path(net
, &res
, fl4
, -1);
584 fl4
->flowi4_flags
= flags
;
585 fl4
->flowi4_tos
= orig_tos
;
586 fl4
->flowi4_scope
= scope
;
591 #if IS_ENABLED(CONFIG_IPV6)
592 /* neighbor handling is done with actual device; do not want
593 * to flip skb->dev for those ndisc packets. This really fails
594 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
597 static bool ipv6_ndisc_frame(const struct sk_buff
*skb
)
599 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
602 if (iph
->nexthdr
== NEXTHDR_ICMP
) {
603 const struct icmp6hdr
*icmph
;
604 struct icmp6hdr _icmph
;
606 icmph
= skb_header_pointer(skb
, sizeof(*iph
),
607 sizeof(_icmph
), &_icmph
);
611 switch (icmph
->icmp6_type
) {
612 case NDISC_ROUTER_SOLICITATION
:
613 case NDISC_ROUTER_ADVERTISEMENT
:
614 case NDISC_NEIGHBOUR_SOLICITATION
:
615 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
626 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
629 /* if packet is NDISC keep the ingress interface */
630 if (!ipv6_ndisc_frame(skb
)) {
632 skb
->skb_iif
= vrf_dev
->ifindex
;
634 skb_push(skb
, skb
->mac_len
);
635 dev_queue_xmit_nit(skb
, vrf_dev
);
636 skb_pull(skb
, skb
->mac_len
);
638 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
645 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
652 static struct sk_buff
*vrf_ip_rcv(struct net_device
*vrf_dev
,
656 skb
->skb_iif
= vrf_dev
->ifindex
;
658 skb_push(skb
, skb
->mac_len
);
659 dev_queue_xmit_nit(skb
, vrf_dev
);
660 skb_pull(skb
, skb
->mac_len
);
665 /* called with rcu lock held */
666 static struct sk_buff
*vrf_l3_rcv(struct net_device
*vrf_dev
,
672 return vrf_ip_rcv(vrf_dev
, skb
);
674 return vrf_ip6_rcv(vrf_dev
, skb
);
680 #if IS_ENABLED(CONFIG_IPV6)
681 static struct dst_entry
*vrf_get_rt6_dst(const struct net_device
*dev
,
682 const struct flowi6
*fl6
)
684 struct dst_entry
*dst
= NULL
;
686 if (!(fl6
->flowi6_flags
& FLOWI_FLAG_L3MDEV_SRC
)) {
687 struct net_vrf
*vrf
= netdev_priv(dev
);
692 rt
= rcu_dereference(vrf
->rt6
);
705 static const struct l3mdev_ops vrf_l3mdev_ops
= {
706 .l3mdev_fib_table
= vrf_fib_table
,
707 .l3mdev_get_rtable
= vrf_get_rtable
,
708 .l3mdev_get_saddr
= vrf_get_saddr
,
709 .l3mdev_l3_rcv
= vrf_l3_rcv
,
710 #if IS_ENABLED(CONFIG_IPV6)
711 .l3mdev_get_rt6_dst
= vrf_get_rt6_dst
,
715 static void vrf_get_drvinfo(struct net_device
*dev
,
716 struct ethtool_drvinfo
*info
)
718 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
719 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
722 static const struct ethtool_ops vrf_ethtool_ops
= {
723 .get_drvinfo
= vrf_get_drvinfo
,
726 static void vrf_setup(struct net_device
*dev
)
730 /* Initialize the device structure. */
731 dev
->netdev_ops
= &vrf_netdev_ops
;
732 dev
->l3mdev_ops
= &vrf_l3mdev_ops
;
733 dev
->ethtool_ops
= &vrf_ethtool_ops
;
734 dev
->destructor
= free_netdev
;
736 /* Fill in device structure with ethernet-generic values. */
737 eth_hw_addr_random(dev
);
739 /* don't acquire vrf device's netif_tx_lock when transmitting */
740 dev
->features
|= NETIF_F_LLTX
;
742 /* don't allow vrf devices to change network namespaces. */
743 dev
->features
|= NETIF_F_NETNS_LOCAL
;
746 static int vrf_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
748 if (tb
[IFLA_ADDRESS
]) {
749 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
751 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
752 return -EADDRNOTAVAIL
;
757 static void vrf_dellink(struct net_device
*dev
, struct list_head
*head
)
759 unregister_netdevice_queue(dev
, head
);
762 static int vrf_newlink(struct net
*src_net
, struct net_device
*dev
,
763 struct nlattr
*tb
[], struct nlattr
*data
[])
765 struct net_vrf
*vrf
= netdev_priv(dev
);
767 if (!data
|| !data
[IFLA_VRF_TABLE
])
770 vrf
->tb_id
= nla_get_u32(data
[IFLA_VRF_TABLE
]);
772 dev
->priv_flags
|= IFF_L3MDEV_MASTER
;
774 return register_netdevice(dev
);
777 static size_t vrf_nl_getsize(const struct net_device
*dev
)
779 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_TABLE */
782 static int vrf_fillinfo(struct sk_buff
*skb
,
783 const struct net_device
*dev
)
785 struct net_vrf
*vrf
= netdev_priv(dev
);
787 return nla_put_u32(skb
, IFLA_VRF_TABLE
, vrf
->tb_id
);
790 static size_t vrf_get_slave_size(const struct net_device
*bond_dev
,
791 const struct net_device
*slave_dev
)
793 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_PORT_TABLE */
796 static int vrf_fill_slave_info(struct sk_buff
*skb
,
797 const struct net_device
*vrf_dev
,
798 const struct net_device
*slave_dev
)
800 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
802 if (nla_put_u32(skb
, IFLA_VRF_PORT_TABLE
, vrf
->tb_id
))
808 static const struct nla_policy vrf_nl_policy
[IFLA_VRF_MAX
+ 1] = {
809 [IFLA_VRF_TABLE
] = { .type
= NLA_U32
},
812 static struct rtnl_link_ops vrf_link_ops __read_mostly
= {
814 .priv_size
= sizeof(struct net_vrf
),
816 .get_size
= vrf_nl_getsize
,
817 .policy
= vrf_nl_policy
,
818 .validate
= vrf_validate
,
819 .fill_info
= vrf_fillinfo
,
821 .get_slave_size
= vrf_get_slave_size
,
822 .fill_slave_info
= vrf_fill_slave_info
,
824 .newlink
= vrf_newlink
,
825 .dellink
= vrf_dellink
,
827 .maxtype
= IFLA_VRF_MAX
,
830 static int vrf_device_event(struct notifier_block
*unused
,
831 unsigned long event
, void *ptr
)
833 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
835 /* only care about unregister events to drop slave references */
836 if (event
== NETDEV_UNREGISTER
) {
837 struct net_device
*vrf_dev
;
839 if (!netif_is_l3_slave(dev
))
842 vrf_dev
= netdev_master_upper_dev_get(dev
);
843 vrf_del_slave(vrf_dev
, dev
);
849 static struct notifier_block vrf_notifier_block __read_mostly
= {
850 .notifier_call
= vrf_device_event
,
853 static int __init
vrf_init_module(void)
857 register_netdevice_notifier(&vrf_notifier_block
);
859 rc
= rtnl_link_register(&vrf_link_ops
);
866 unregister_netdevice_notifier(&vrf_notifier_block
);
870 module_init(vrf_init_module
);
871 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
872 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
873 MODULE_LICENSE("GPL");
874 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);
875 MODULE_VERSION(DRV_VERSION
);