2 * Lightweight Autonomic Network Architecture
4 * Ethernet vlink layer. This module allows to operate virtual LANA Ethernet
5 * devices which are configurable via ifconfig et. al. and bound to a real
6 * underlying device. Similar to VLANs, multiple virtual devices can be
7 * bound to a real network device. Multiplexing and demultiplexing happens
10 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
11 * Swiss federal institute of technology (ETH Zurich)
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/netdevice.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/ethtool.h>
22 #include <linux/etherdevice.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_arp.h>
26 #include <linux/list.h>
27 #include <linux/u64_stats_sync.h>
28 #include <linux/seqlock.h>
29 #include <net/rtnetlink.h>
32 #include "xt_engine.h"
35 #include "xt_fblock.h"
37 #define IFF_VLINK_MAS 0x20000 /* Master device */
38 #define IFF_VLINK_DEV 0x40000 /* Slave device */
39 #define IFF_IS_BRIDGED 0x60000
41 /* Ethernet LANA packet with 10 Bit tag ID */
42 #define ETH_P_LANA 0xAC00
50 struct u64_stats_sync syncp
;
55 static struct net_device_ops fb_ethvlink_netdev_ops __read_mostly
;
56 static struct rtnl_link_ops fb_ethvlink_rtnl_ops __read_mostly
;
57 static struct ethtool_ops fb_ethvlink_ethtool_ops __read_mostly
;
58 static struct header_ops fb_ethvlink_header_ops __read_mostly
;
60 static LIST_HEAD(fb_ethvlink_vdevs
);
61 static DEFINE_SPINLOCK(fb_ethvlink_vdevs_lock
);
63 struct fb_ethvlink_private
;
65 struct fb_ethvlink_private_inner
{
68 struct fb_ethvlink_private
*vdev
;
71 struct fb_ethvlink_private
{
73 struct list_head list
;
74 struct net_device
*self
;
75 struct net_device
*real_dev
;
76 int (*netvif_rx
)(struct sk_buff
*skb
, struct fb_ethvlink_private
*vdev
);
80 static int fb_ethvlink_init(struct net_device
*dev
)
82 dev
->dstats
= alloc_percpu(struct pcpu_dstats
);
88 static void fb_ethvlink_uninit(struct net_device
*dev
)
90 free_percpu(dev
->dstats
);
93 static int fb_ethvlink_open(struct net_device
*dev
)
95 struct fb_ethvlink_private
*dev_priv
= netdev_priv(dev
);
97 netif_start_queue(dev
);
98 if (netif_carrier_ok(dev_priv
->real_dev
)) {
99 netif_tx_lock_bh(dev
);
100 netif_carrier_on(dev
);
101 netif_tx_unlock_bh(dev
);
107 static int fb_ethvlink_stop(struct net_device
*dev
)
109 netif_tx_lock_bh(dev
);
110 netif_carrier_off(dev
);
111 netif_tx_unlock_bh(dev
);
112 netif_stop_queue(dev
);
117 static inline int fb_eth_dev_is_bridged(struct net_device
*dev
)
119 return (dev
->priv_flags
& IFF_IS_BRIDGED
) == IFF_IS_BRIDGED
;
122 static inline int fb_ethvlink_real_dev_is_hooked(struct net_device
*dev
)
124 return (dev
->priv_flags
& IFF_VLINK_MAS
) == IFF_VLINK_MAS
;
127 static inline void fb_ethvlink_make_real_dev_hooked(struct net_device
*dev
)
129 dev
->priv_flags
|= IFF_VLINK_MAS
;
132 static inline void fb_ethvlink_make_real_dev_unhooked(struct net_device
*dev
)
134 dev
->priv_flags
&= ~IFF_VLINK_MAS
;
137 static int fb_ethvlink_event(struct notifier_block
*self
, unsigned long cmd
,
143 struct fb_ethvlink_private_inner __percpu
*fb_priv
;
146 fb
= rcu_dereference_raw(container_of(self
, struct fblock_notifier
,
148 fb_priv
= (struct fb_ethvlink_private_inner __percpu
*)
149 rcu_dereference_raw(fb
->private_data
);
153 case FBLOCK_BIND_IDP
: {
155 struct fblock_bind_msg
*msg
= args
;
157 for_each_online_cpu(cpu
) {
158 struct fb_ethvlink_private_inner
*fb_priv_cpu
;
159 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
160 if (fb_priv_cpu
->port
[msg
->dir
] == IDP_UNKNOWN
) {
161 write_seqlock(&fb_priv_cpu
->lock
);
162 fb_priv_cpu
->port
[msg
->dir
] = msg
->idp
;
163 write_sequnlock(&fb_priv_cpu
->lock
);
172 printk(KERN_INFO
"[%s::vlink] port %s bound to IDP%u\n",
173 fb
->name
, path_names
[msg
->dir
], msg
->idp
);
175 case FBLOCK_UNBIND_IDP
: {
177 struct fblock_bind_msg
*msg
= args
;
179 for_each_online_cpu(cpu
) {
180 struct fb_ethvlink_private_inner
*fb_priv_cpu
;
181 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
182 if (fb_priv_cpu
->port
[msg
->dir
] == msg
->idp
) {
183 write_seqlock(&fb_priv_cpu
->lock
);
184 fb_priv_cpu
->port
[msg
->dir
] = IDP_UNKNOWN
;
185 write_sequnlock(&fb_priv_cpu
->lock
);
194 printk(KERN_INFO
"[%s::vlink] port %s unbound\n",
195 fb
->name
, path_names
[msg
->dir
]);
204 static int fb_ethvlink_queue_xmit(struct sk_buff
*skb
,
205 struct net_device
*dev
)
207 struct fb_ethvlink_private
*dev_priv
= netdev_priv(dev
);
208 skb_set_dev(skb
, dev_priv
->real_dev
);
209 return dev_queue_xmit(skb
);
212 netdev_tx_t
fb_ethvlink_start_xmit(struct sk_buff
*skb
,
213 struct net_device
*dev
)
216 struct pcpu_dstats
*dstats
;
218 dstats
= this_cpu_ptr(dev
->dstats
);
219 ret
= fb_ethvlink_queue_xmit(skb
, dev
);
220 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
221 u64_stats_update_begin(&dstats
->syncp
);
222 dstats
->tx_packets
++;
223 dstats
->tx_bytes
+= skb
->len
;
224 u64_stats_update_end(&dstats
->syncp
);
226 this_cpu_inc(dstats
->tx_dropped
);
231 static int fb_ethvlink_netrx(const struct fblock
* const fb
,
232 struct sk_buff
* const skb
,
233 enum path_type
* const dir
)
235 struct fb_ethvlink_private_inner __percpu
*fb_priv_cpu
;
236 fb_priv_cpu
= this_cpu_ptr(rcu_dereference(fb
->private_data
));
237 skb
->dev
= fb_priv_cpu
->vdev
->self
;
238 write_next_idp_to_skb(skb
, fb
->idp
, IDP_UNKNOWN
);
243 int fb_ethvlink_handle_frame_virt(struct sk_buff
*skb
,
244 struct fb_ethvlink_private
*vdev
)
247 struct fb_ethvlink_private_inner __percpu
*fb_priv_cpu
;
251 fb_priv_cpu
= this_cpu_ptr(rcu_dereference(vdev
->fb
->private_data
));
252 if (fb_priv_cpu
->port
[TYPE_INGRESS
] == IDP_UNKNOWN
)
255 seq
= read_seqbegin(&fb_priv_cpu
->lock
);
256 write_next_idp_to_skb(skb
, vdev
->fb
->idp
,
257 fb_priv_cpu
->port
[TYPE_INGRESS
]);
258 } while (read_seqretry(&fb_priv_cpu
->lock
, seq
));
260 process_packet(skb
, TYPE_INGRESS
);
262 return NET_RX_SUCCESS
;
265 return NET_RX_SUCCESS
;
268 static rx_handler_result_t
fb_ethvlink_handle_frame(struct sk_buff
**pskb
)
270 int ret
, bypass_drop
= 0;
272 struct sk_buff
*skb
= *pskb
;
273 struct net_device
*dev
;
274 struct fb_ethvlink_private
*vdev
;
275 struct pcpu_dstats
*dstats
;
278 if (unlikely((dev
->flags
& IFF_UP
) != IFF_UP
))
281 if (unlikely(skb
->pkt_type
== PACKET_LOOPBACK
))
282 return RX_HANDLER_PASS
;
284 if (unlikely(!is_valid_ether_addr(eth_hdr(skb
)->h_source
)))
287 skb
= skb_share_check(skb
, GFP_ATOMIC
);
289 return RX_HANDLER_CONSUMED
;
291 if ((eth_hdr(skb
)->h_proto
& __constant_htons(ETH_P_LANA
)) !=
292 __constant_htons(ETH_P_LANA
))
293 return RX_HANDLER_PASS
;
295 vtag
= ntohs(eth_hdr(skb
)->h_proto
&
296 ~__constant_htons(ETH_P_LANA
));
298 list_for_each_entry_rcu(vdev
, &fb_ethvlink_vdevs
, list
) {
299 if (vtag
== vdev
->tag
&& dev
== vdev
->real_dev
) {
300 dstats
= this_cpu_ptr(vdev
->self
->dstats
);
301 ret
= vdev
->netvif_rx(skb
, vdev
);
303 if (ret
== NET_RX_SUCCESS
) {
304 u64_stats_update_begin(&dstats
->syncp
);
305 dstats
->rx_packets
++;
306 dstats
->rx_bytes
+= skb
->len
;
307 u64_stats_update_end(&dstats
->syncp
);
309 this_cpu_inc(dstats
->rx_errors
);
317 return RX_HANDLER_CONSUMED
;
320 static void fb_ethvlink_ethtool_get_drvinfo(struct net_device
*dev
,
321 struct ethtool_drvinfo
*drvinfo
)
323 snprintf(drvinfo
->driver
, sizeof(drvinfo
->driver
), "ethvlink");
324 snprintf(drvinfo
->version
, sizeof(drvinfo
->version
), "0.1");
327 static u32
fb_ethvlink_ethtool_get_rx_csum(struct net_device
*dev
)
329 const struct fb_ethvlink_private
*vdev
= netdev_priv(dev
);
330 return dev_ethtool_get_rx_csum(vdev
->real_dev
);
333 static int fb_ethvlink_ethtool_get_settings(struct net_device
*dev
,
334 struct ethtool_cmd
*cmd
)
336 const struct fb_ethvlink_private
*vdev
= netdev_priv(dev
);
337 return dev_ethtool_get_settings(vdev
->real_dev
, cmd
);
340 static u32
fb_ethvlink_ethtool_get_flags(struct net_device
*dev
)
342 const struct fb_ethvlink_private
*vdev
= netdev_priv(dev
);
343 return dev_ethtool_get_flags(vdev
->real_dev
);
346 static void fb_ethvlink_dev_setup(struct net_device
*dev
)
350 dev
->ethtool_ops
= &fb_ethvlink_ethtool_ops
;
351 dev
->netdev_ops
= &fb_ethvlink_netdev_ops
;
352 dev
->rtnl_link_ops
= &fb_ethvlink_rtnl_ops
;
353 dev
->header_ops
= &fb_ethvlink_header_ops
;
354 dev
->tx_queue_len
= 0;
355 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
356 dev
->destructor
= free_netdev
;
358 random_ether_addr(dev
->dev_addr
);
359 memset(dev
->broadcast
, 0, sizeof(dev
->broadcast
));
362 static int fb_ethvlink_validate(struct nlattr
**tb
, struct nlattr
**data
)
364 if (tb
[IFLA_ADDRESS
]) {
365 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
367 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
368 return -EADDRNOTAVAIL
;
374 static int fb_ethvlink_create_header(struct sk_buff
*skb
,
375 struct net_device
*dev
,
376 unsigned short type
, const void *daddr
,
377 const void *saddr
, unsigned len
)
379 const struct fb_ethvlink_private
*vdev
= netdev_priv(dev
);
380 return dev_hard_header(skb
, vdev
->real_dev
, type
, daddr
,
381 saddr
? : dev
->dev_addr
, len
);
384 static struct rtnl_link_stats64
*
385 fb_ethvlink_get_stats64(struct net_device
*dev
,
386 struct rtnl_link_stats64
*stats
)
390 for_each_possible_cpu(i
) {
391 u64 tbytes
, tpackets
, rbytes
, rpackets
;
393 const struct pcpu_dstats
*dstats
;
395 dstats
= per_cpu_ptr(dev
->dstats
, i
);
398 start
= u64_stats_fetch_begin(&dstats
->syncp
);
399 tbytes
= dstats
->tx_bytes
;
400 tpackets
= dstats
->tx_packets
;
401 rbytes
= dstats
->rx_bytes
;
402 rpackets
= dstats
->rx_packets
;
403 } while (u64_stats_fetch_retry(&dstats
->syncp
, start
));
405 stats
->tx_bytes
+= tbytes
;
406 stats
->tx_packets
+= tpackets
;
407 stats
->rx_bytes
+= rbytes
;
408 stats
->rx_packets
+= rpackets
;
414 static void fb_ethvlink_destroy_fblock(struct fblock
*fb
)
416 unregister_fblock_namespace_no_rcu(fb
);
418 free_percpu(rcu_dereference_raw(fb
->private_data
));
420 module_put(THIS_MODULE
);
423 static struct fblock
*fb_ethvlink_build_fblock(struct fb_ethvlink_private
*vdev
)
428 struct fb_ethvlink_private_inner __percpu
*fb_priv
;
430 fb
= alloc_fblock(GFP_ATOMIC
);
434 fb_priv
= alloc_percpu(struct fb_ethvlink_private_inner
);
439 for_each_online_cpu(cpu
) {
440 struct fb_ethvlink_private_inner
*fb_priv_cpu
;
441 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
442 seqlock_init(&fb_priv_cpu
->lock
);
443 fb_priv_cpu
->port
[0] = IDP_UNKNOWN
;
444 fb_priv_cpu
->port
[1] = IDP_UNKNOWN
;
445 fb_priv_cpu
->vdev
= vdev
;
449 ret
= init_fblock(fb
, vdev
->self
->name
, fb_priv
);
452 fb
->netfb_rx
= fb_ethvlink_netrx
;
453 fb
->event_rx
= fb_ethvlink_event
;
456 ret
= register_fblock_namespace(fb
);
459 __module_get(THIS_MODULE
);
463 cleanup_fblock_ctor(fb
);
465 free_percpu(fb_priv
);
472 static int fb_ethvlink_add_dev(struct vlinknlmsg
*vhdr
,
473 struct nlmsghdr
*nlh
)
477 struct net_device
*dev
;
478 struct net_device
*root
;
479 struct fb_ethvlink_private
*dev_priv
, *vdev
;
481 if (vhdr
->cmd
!= VLINKNLCMD_ADD_DEVICE
)
482 return NETLINK_VLINK_RX_NXT
;
484 root
= dev_get_by_name(&init_net
, vhdr
->virt_name
);
488 root
= dev_get_by_name(&init_net
, vhdr
->real_name
);
489 if (root
&& (root
->priv_flags
& IFF_VLINK_DEV
) == IFF_VLINK_DEV
)
497 list_for_each_entry_rcu(vdev
, &fb_ethvlink_vdevs
, list
) {
498 if (vdev
->tag
== vhdr
->port
) {
505 dev
= alloc_netdev(sizeof(*dev_priv
), vhdr
->virt_name
,
506 fb_ethvlink_dev_setup
);
510 ret
= dev_alloc_name(dev
, dev
->name
);
514 ret
= register_netdev(dev
);
518 dev_priv
= netdev_priv(dev
);
519 dev
->priv_flags
|= vhdr
->flags
;
520 dev
->priv_flags
|= IFF_VLINK_DEV
;
521 dev_priv
->tag
= vhdr
->port
;
522 dev_priv
->self
= dev
;
523 dev_priv
->real_dev
= root
;
524 dev_priv
->netvif_rx
= fb_ethvlink_handle_frame_virt
;
525 dev_priv
->fb
= fb_ethvlink_build_fblock(dev_priv
);
529 netif_stacked_transfer_operstate(dev_priv
->real_dev
, dev
);
530 dev_put(dev_priv
->real_dev
);
532 spin_lock_irqsave(&fb_ethvlink_vdevs_lock
, flags
);
533 list_add_rcu(&dev_priv
->list
, &fb_ethvlink_vdevs
);
534 spin_unlock_irqrestore(&fb_ethvlink_vdevs_lock
, flags
);
536 netif_tx_lock_bh(dev
);
537 netif_carrier_off(dev
);
538 netif_tx_unlock_bh(dev
);
540 printk(KERN_INFO
"[lana] %s stacked on carrier %s:%u\n",
541 vhdr
->virt_name
, vhdr
->real_name
, dev_priv
->tag
);
542 return NETLINK_VLINK_RX_STOP
;
545 unregister_netdevice(dev
);
550 return NETLINK_VLINK_RX_EMERG
;
556 static int fb_ethvlink_start_hook_dev(struct vlinknlmsg
*vhdr
,
557 struct nlmsghdr
*nlh
)
560 struct net_device
*root
;
562 if (vhdr
->cmd
!= VLINKNLCMD_START_HOOK_DEVICE
)
563 return NETLINK_VLINK_RX_NXT
;
565 root
= dev_get_by_name(&init_net
, vhdr
->real_name
);
566 if (root
&& (root
->priv_flags
& IFF_VLINK_DEV
) == IFF_VLINK_DEV
)
569 return NETLINK_VLINK_RX_EMERG
;
571 if (fb_eth_dev_is_bridged(root
))
573 if (fb_ethvlink_real_dev_is_hooked(root
))
577 ret
= netdev_rx_handler_register(root
, fb_ethvlink_handle_frame
,
583 fb_ethvlink_make_real_dev_hooked(root
);
584 printk(KERN_INFO
"[lana] hook attached to carrier %s\n",
588 return NETLINK_VLINK_RX_STOP
;
591 return NETLINK_VLINK_RX_EMERG
;
594 static int fb_ethvlink_stop_hook_dev(struct vlinknlmsg
*vhdr
,
595 struct nlmsghdr
*nlh
)
597 struct net_device
*root
;
599 if (vhdr
->cmd
!= VLINKNLCMD_STOP_HOOK_DEVICE
)
600 return NETLINK_VLINK_RX_NXT
;
602 root
= dev_get_by_name(&init_net
, vhdr
->real_name
);
603 if (root
&& (root
->priv_flags
& IFF_VLINK_DEV
) == IFF_VLINK_DEV
)
606 return NETLINK_VLINK_RX_EMERG
;
608 if (!fb_ethvlink_real_dev_is_hooked(root
))
612 netdev_rx_handler_unregister(root
);
615 fb_ethvlink_make_real_dev_unhooked(root
);
616 printk(KERN_INFO
"[lana] hook detached from carrier %s\n",
620 return NETLINK_VLINK_RX_STOP
;
623 return NETLINK_VLINK_RX_EMERG
;
626 static void fb_ethvlink_rm_dev_common(struct net_device
*dev
)
628 netif_tx_lock_bh(dev
);
629 netif_carrier_off(dev
);
630 netif_tx_unlock_bh(dev
);
632 printk(KERN_INFO
"[lana] %s unregistered\n", dev
->name
);
635 unregister_netdevice(dev
);
639 static int fb_ethvlink_rm_dev(struct vlinknlmsg
*vhdr
, struct nlmsghdr
*nlh
)
643 struct fb_ethvlink_private
*dev_priv
, *vdev
;
644 struct net_device
*dev
;
646 if (vhdr
->cmd
!= VLINKNLCMD_RM_DEVICE
)
647 return NETLINK_VLINK_RX_NXT
;
649 dev
= dev_get_by_name(&init_net
, vhdr
->virt_name
);
651 return NETLINK_VLINK_RX_EMERG
;
652 if ((dev
->priv_flags
& IFF_VLINK_DEV
) != IFF_VLINK_DEV
)
654 if ((dev
->flags
& IFF_RUNNING
) == IFF_RUNNING
)
656 dev_priv
= netdev_priv(dev
);
657 if (atomic_read(&dev_priv
->fb
->refcnt
) > 2) {
658 printk(KERN_INFO
"Cannot remove vlink dev! Still in use by "
667 list_for_each_entry_rcu(vdev
, &fb_ethvlink_vdevs
, list
)
668 if (dev_priv
->real_dev
== vdev
->real_dev
)
673 /* We're last client on carrier! */
674 if (fb_ethvlink_real_dev_is_hooked(dev_priv
->real_dev
)) {
676 netdev_rx_handler_unregister(dev_priv
->real_dev
);
679 fb_ethvlink_make_real_dev_unhooked(dev_priv
->real_dev
);
680 printk(KERN_INFO
"[lana] hook detached from %s\n",
681 dev_priv
->real_dev
->name
);
685 spin_lock_irqsave(&fb_ethvlink_vdevs_lock
, flags
);
686 list_del_rcu(&dev_priv
->list
);
687 spin_unlock_irqrestore(&fb_ethvlink_vdevs_lock
, flags
);
689 fb_ethvlink_destroy_fblock(dev_priv
->fb
);
690 fb_ethvlink_rm_dev_common(dev
);
692 return NETLINK_VLINK_RX_STOP
;
696 return NETLINK_VLINK_RX_EMERG
;
699 static int fb_ethvlink_dev_event(struct notifier_block
*self
,
700 unsigned long event
, void *ptr
)
703 struct net_device
*dev
= ptr
;
704 struct fb_ethvlink_private
*vdev
;
705 struct vlinknlmsg vhdr
;
713 list_for_each_entry_rcu(vdev
, &fb_ethvlink_vdevs
, list
)
714 if (vdev
->real_dev
== dev
)
715 netif_stacked_transfer_operstate(vdev
->real_dev
,
719 case NETDEV_FEAT_CHANGE
:
720 /* Nothing right now */
722 case NETDEV_UNREGISTER
:
723 if (dev
->reg_state
!= NETREG_UNREGISTERING
)
726 memset(&vhdr
, 0, sizeof(vhdr
));
727 vhdr
.cmd
= VLINKNLCMD_RM_DEVICE
;
728 spin_lock_irqsave(&fb_ethvlink_vdevs_lock
, flags
);
729 list_for_each_entry_rcu(vdev
, &fb_ethvlink_vdevs
, list
) {
730 if (vdev
->real_dev
== dev
) {
731 memset(vhdr
.virt_name
, 0,
732 sizeof(vhdr
.virt_name
));
733 strlcpy(vhdr
.virt_name
, vdev
->self
->name
,
734 strlen(vdev
->self
->name
));
735 fb_ethvlink_rm_dev(&vhdr
, NULL
);
738 spin_unlock_irqrestore(&fb_ethvlink_vdevs_lock
, flags
);
740 case NETDEV_PRE_TYPE_CHANGE
:
749 static struct ethtool_ops fb_ethvlink_ethtool_ops __read_mostly
= {
750 .get_link
= ethtool_op_get_link
,
751 .get_settings
= fb_ethvlink_ethtool_get_settings
,
752 .get_rx_csum
= fb_ethvlink_ethtool_get_rx_csum
,
753 .get_drvinfo
= fb_ethvlink_ethtool_get_drvinfo
,
754 .get_flags
= fb_ethvlink_ethtool_get_flags
,
757 static struct net_device_ops fb_ethvlink_netdev_ops __read_mostly
= {
758 .ndo_init
= fb_ethvlink_init
,
759 .ndo_uninit
= fb_ethvlink_uninit
,
760 .ndo_open
= fb_ethvlink_open
,
761 .ndo_stop
= fb_ethvlink_stop
,
762 .ndo_start_xmit
= fb_ethvlink_start_xmit
,
763 .ndo_get_stats64
= fb_ethvlink_get_stats64
,
764 .ndo_change_mtu
= eth_change_mtu
,
765 .ndo_set_mac_address
= eth_mac_addr
,
766 .ndo_validate_addr
= eth_validate_addr
,
769 static struct header_ops fb_ethvlink_header_ops __read_mostly
= {
770 .create
= fb_ethvlink_create_header
,
771 .rebuild
= eth_rebuild_header
,
772 .parse
= eth_header_parse
,
773 .cache
= eth_header_cache
,
774 .cache_update
= eth_header_cache_update
,
777 static struct rtnl_link_ops fb_ethvlink_rtnl_ops __read_mostly
= {
779 .priv_size
= sizeof(struct fb_ethvlink_private
),
780 .setup
= fb_ethvlink_dev_setup
,
781 .validate
= fb_ethvlink_validate
,
784 static struct vlink_subsys fb_ethvlink_sys __read_mostly
= {
785 .name
= "eth-tagged",
786 .owner
= THIS_MODULE
,
787 .type
= VLINKNLGRP_ETHERNET
,
788 .rwsem
= __RWSEM_INITIALIZER(fb_ethvlink_sys
.rwsem
),
791 static struct notifier_block fb_ethvlink_notifier_block __read_mostly
= {
792 .notifier_call
= fb_ethvlink_dev_event
,
795 static struct vlink_callback fb_ethvlink_add_dev_cb
=
796 VLINK_CALLBACK_INIT(fb_ethvlink_add_dev
, NETLINK_VLINK_PRIO_NORM
);
797 static struct vlink_callback fb_ethvlink_rm_dev_cb
=
798 VLINK_CALLBACK_INIT(fb_ethvlink_rm_dev
, NETLINK_VLINK_PRIO_NORM
);
799 static struct vlink_callback fb_ethvlink_start_hook_dev_cb
=
800 VLINK_CALLBACK_INIT(fb_ethvlink_start_hook_dev
, NETLINK_VLINK_PRIO_HIGH
);
801 static struct vlink_callback fb_ethvlink_stop_hook_dev_cb
=
802 VLINK_CALLBACK_INIT(fb_ethvlink_stop_hook_dev
, NETLINK_VLINK_PRIO_HIGH
);
804 static int __init
init_fb_ethvlink_module(void)
808 ret
= vlink_subsys_register(&fb_ethvlink_sys
);
812 vlink_add_callback(&fb_ethvlink_sys
, &fb_ethvlink_add_dev_cb
);
813 vlink_add_callback(&fb_ethvlink_sys
, &fb_ethvlink_rm_dev_cb
);
814 vlink_add_callback(&fb_ethvlink_sys
, &fb_ethvlink_start_hook_dev_cb
);
815 vlink_add_callback(&fb_ethvlink_sys
, &fb_ethvlink_stop_hook_dev_cb
);
817 ret
= rtnl_link_register(&fb_ethvlink_rtnl_ops
);
821 register_netdevice_notifier(&fb_ethvlink_notifier_block
);
823 printk(KERN_INFO
"[lana] Ethernet tagged vlink layer loaded!\n");
827 vlink_subsys_unregister_batch(&fb_ethvlink_sys
);
831 static void __exit
cleanup_fb_ethvlink_module(void)
833 struct fb_ethvlink_private
*vdev
;
836 list_for_each_entry_rcu(vdev
, &fb_ethvlink_vdevs
, list
) {
837 if (fb_ethvlink_real_dev_is_hooked(vdev
->real_dev
)) {
839 netdev_rx_handler_unregister(vdev
->real_dev
);
842 fb_ethvlink_make_real_dev_unhooked(vdev
->real_dev
);
843 printk(KERN_INFO
"[lana] hook detached from %s\n",
844 vdev
->real_dev
->name
);
847 fb_ethvlink_rm_dev_common(vdev
->self
);
851 unregister_netdevice_notifier(&fb_ethvlink_notifier_block
);
852 rtnl_link_unregister(&fb_ethvlink_rtnl_ops
);
853 vlink_subsys_unregister_batch(&fb_ethvlink_sys
);
855 printk(KERN_INFO
"[lana] Ethernet tagged vlink layer removed!\n");
858 module_init(init_fb_ethvlink_module
);
859 module_exit(cleanup_fb_ethvlink_module
);
861 MODULE_ALIAS_RTNL_LINK("lana");
862 MODULE_LICENSE("GPL");
863 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
864 MODULE_DESCRIPTION("Ethernet tagged virtual link layer driver");