jme: Do not enable NIC WoL functions on S0
[linux/fpc-iii.git] / drivers / net / macvlan.c
blobbb33b0410a2285a0259bd4b11ac795ccf0aab686
1 /*
2 * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of
7 * the License, or (at your option) any later version.
9 * The code this is based on carried the following copyright notice:
10 * ---
11 * (C) Copyright 2001-2006
12 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
13 * Re-worked by Ben Greear <greearb@candelatech.com>
14 * ---
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/rculist.h>
24 #include <linux/notifier.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_vlan.h>
30 #include <linux/if_link.h>
31 #include <linux/if_macvlan.h>
32 #include <linux/hash.h>
33 #include <linux/workqueue.h>
34 #include <net/rtnetlink.h>
35 #include <net/xfrm.h>
36 #include <linux/netpoll.h>
38 #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
39 #define MACVLAN_BC_QUEUE_LEN 1000
41 struct macvlan_port {
42 struct net_device *dev;
43 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
44 struct list_head vlans;
45 struct rcu_head rcu;
46 struct sk_buff_head bc_queue;
47 struct work_struct bc_work;
48 bool passthru;
49 int count;
52 struct macvlan_skb_cb {
53 const struct macvlan_dev *src;
56 #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
58 static void macvlan_port_destroy(struct net_device *dev);
60 static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
62 return rcu_dereference(dev->rx_handler_data);
65 static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev)
67 return rtnl_dereference(dev->rx_handler_data);
70 #define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
72 static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
73 const unsigned char *addr)
75 struct macvlan_dev *vlan;
77 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) {
78 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
79 return vlan;
81 return NULL;
84 static void macvlan_hash_add(struct macvlan_dev *vlan)
86 struct macvlan_port *port = vlan->port;
87 const unsigned char *addr = vlan->dev->dev_addr;
89 hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[addr[5]]);
92 static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync)
94 hlist_del_rcu(&vlan->hlist);
95 if (sync)
96 synchronize_rcu();
99 static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
100 const unsigned char *addr)
102 macvlan_hash_del(vlan, true);
103 /* Now that we are unhashed it is safe to change the device
104 * address without confusing packet delivery.
106 memcpy(vlan->dev->dev_addr, addr, ETH_ALEN);
107 macvlan_hash_add(vlan);
110 static int macvlan_addr_busy(const struct macvlan_port *port,
111 const unsigned char *addr)
113 /* Test to see if the specified multicast address is
114 * currently in use by the underlying device or
115 * another macvlan.
117 if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
118 return 1;
120 if (macvlan_hash_lookup(port, addr))
121 return 1;
123 return 0;
127 static int macvlan_broadcast_one(struct sk_buff *skb,
128 const struct macvlan_dev *vlan,
129 const struct ethhdr *eth, bool local)
131 struct net_device *dev = vlan->dev;
133 if (local)
134 return __dev_forward_skb(dev, skb);
136 skb->dev = dev;
137 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
138 skb->pkt_type = PACKET_BROADCAST;
139 else
140 skb->pkt_type = PACKET_MULTICAST;
142 return 0;
145 static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
147 return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT);
151 static unsigned int mc_hash(const struct macvlan_dev *vlan,
152 const unsigned char *addr)
154 u32 val = __get_unaligned_cpu32(addr + 2);
156 val ^= macvlan_hash_mix(vlan);
157 return hash_32(val, MACVLAN_MC_FILTER_BITS);
160 static void macvlan_broadcast(struct sk_buff *skb,
161 const struct macvlan_port *port,
162 struct net_device *src,
163 enum macvlan_mode mode)
165 const struct ethhdr *eth = eth_hdr(skb);
166 const struct macvlan_dev *vlan;
167 struct sk_buff *nskb;
168 unsigned int i;
169 int err;
170 unsigned int hash;
172 if (skb->protocol == htons(ETH_P_PAUSE))
173 return;
175 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
176 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
177 if (vlan->dev == src || !(vlan->mode & mode))
178 continue;
180 hash = mc_hash(vlan, eth->h_dest);
181 if (!test_bit(hash, vlan->mc_filter))
182 continue;
184 err = NET_RX_DROP;
185 nskb = skb_clone(skb, GFP_ATOMIC);
186 if (likely(nskb))
187 err = macvlan_broadcast_one(
188 nskb, vlan, eth,
189 mode == MACVLAN_MODE_BRIDGE) ?:
190 netif_rx_ni(nskb);
191 macvlan_count_rx(vlan, skb->len + ETH_HLEN,
192 err == NET_RX_SUCCESS, 1);
197 static void macvlan_process_broadcast(struct work_struct *w)
199 struct macvlan_port *port = container_of(w, struct macvlan_port,
200 bc_work);
201 struct sk_buff *skb;
202 struct sk_buff_head list;
204 __skb_queue_head_init(&list);
206 spin_lock_bh(&port->bc_queue.lock);
207 skb_queue_splice_tail_init(&port->bc_queue, &list);
208 spin_unlock_bh(&port->bc_queue.lock);
210 while ((skb = __skb_dequeue(&list))) {
211 const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
213 rcu_read_lock();
215 if (!src)
216 /* frame comes from an external address */
217 macvlan_broadcast(skb, port, NULL,
218 MACVLAN_MODE_PRIVATE |
219 MACVLAN_MODE_VEPA |
220 MACVLAN_MODE_PASSTHRU|
221 MACVLAN_MODE_BRIDGE);
222 else if (src->mode == MACVLAN_MODE_VEPA)
223 /* flood to everyone except source */
224 macvlan_broadcast(skb, port, src->dev,
225 MACVLAN_MODE_VEPA |
226 MACVLAN_MODE_BRIDGE);
227 else
229 * flood only to VEPA ports, bridge ports
230 * already saw the frame on the way out.
232 macvlan_broadcast(skb, port, src->dev,
233 MACVLAN_MODE_VEPA);
235 rcu_read_unlock();
237 kfree_skb(skb);
241 static void macvlan_broadcast_enqueue(struct macvlan_port *port,
242 struct sk_buff *skb)
244 struct sk_buff *nskb;
245 int err = -ENOMEM;
247 nskb = skb_clone(skb, GFP_ATOMIC);
248 if (!nskb)
249 goto err;
251 spin_lock(&port->bc_queue.lock);
252 if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
253 __skb_queue_tail(&port->bc_queue, nskb);
254 err = 0;
256 spin_unlock(&port->bc_queue.lock);
258 if (err)
259 goto free_nskb;
261 schedule_work(&port->bc_work);
262 return;
264 free_nskb:
265 kfree_skb(nskb);
266 err:
267 atomic_long_inc(&skb->dev->rx_dropped);
270 /* called under rcu_read_lock() from netif_receive_skb */
271 static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
273 struct macvlan_port *port;
274 struct sk_buff *skb = *pskb;
275 const struct ethhdr *eth = eth_hdr(skb);
276 const struct macvlan_dev *vlan;
277 const struct macvlan_dev *src;
278 struct net_device *dev;
279 unsigned int len = 0;
280 int ret = NET_RX_DROP;
282 port = macvlan_port_get_rcu(skb->dev);
283 if (is_multicast_ether_addr(eth->h_dest)) {
284 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
285 if (!skb)
286 return RX_HANDLER_CONSUMED;
287 *pskb = skb;
288 eth = eth_hdr(skb);
289 src = macvlan_hash_lookup(port, eth->h_source);
290 if (src && src->mode != MACVLAN_MODE_VEPA &&
291 src->mode != MACVLAN_MODE_BRIDGE) {
292 /* forward to original port. */
293 vlan = src;
294 ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
295 netif_rx(skb);
296 goto out;
299 MACVLAN_SKB_CB(skb)->src = src;
300 macvlan_broadcast_enqueue(port, skb);
302 return RX_HANDLER_PASS;
305 if (port->passthru)
306 vlan = list_first_or_null_rcu(&port->vlans,
307 struct macvlan_dev, list);
308 else
309 vlan = macvlan_hash_lookup(port, eth->h_dest);
310 if (vlan == NULL)
311 return RX_HANDLER_PASS;
313 dev = vlan->dev;
314 if (unlikely(!(dev->flags & IFF_UP))) {
315 kfree_skb(skb);
316 return RX_HANDLER_CONSUMED;
318 len = skb->len + ETH_HLEN;
319 skb = skb_share_check(skb, GFP_ATOMIC);
320 if (!skb)
321 goto out;
323 *pskb = skb;
324 skb->dev = dev;
325 skb->pkt_type = PACKET_HOST;
327 ret = netif_rx(skb);
329 out:
330 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
331 return RX_HANDLER_CONSUMED;
334 static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
336 const struct macvlan_dev *vlan = netdev_priv(dev);
337 const struct macvlan_port *port = vlan->port;
338 const struct macvlan_dev *dest;
340 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
341 const struct ethhdr *eth = (void *)skb->data;
343 /* send to other bridge ports directly */
344 if (is_multicast_ether_addr(eth->h_dest)) {
345 macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
346 goto xmit_world;
349 dest = macvlan_hash_lookup(port, eth->h_dest);
350 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
351 /* send to lowerdev first for its network taps */
352 dev_forward_skb(vlan->lowerdev, skb);
354 return NET_XMIT_SUCCESS;
358 xmit_world:
359 skb->dev = vlan->lowerdev;
360 return dev_queue_xmit(skb);
363 static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
365 #ifdef CONFIG_NET_POLL_CONTROLLER
366 if (vlan->netpoll)
367 netpoll_send_skb(vlan->netpoll, skb);
368 #else
369 BUG();
370 #endif
371 return NETDEV_TX_OK;
374 static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
375 struct net_device *dev)
377 unsigned int len = skb->len;
378 int ret;
379 struct macvlan_dev *vlan = netdev_priv(dev);
381 if (unlikely(netpoll_tx_running(dev)))
382 return macvlan_netpoll_send_skb(vlan, skb);
384 if (vlan->fwd_priv) {
385 skb->dev = vlan->lowerdev;
386 ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
387 } else {
388 ret = macvlan_queue_xmit(skb, dev);
391 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
392 struct vlan_pcpu_stats *pcpu_stats;
394 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
395 u64_stats_update_begin(&pcpu_stats->syncp);
396 pcpu_stats->tx_packets++;
397 pcpu_stats->tx_bytes += len;
398 u64_stats_update_end(&pcpu_stats->syncp);
399 } else {
400 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
402 return ret;
405 static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
406 unsigned short type, const void *daddr,
407 const void *saddr, unsigned len)
409 const struct macvlan_dev *vlan = netdev_priv(dev);
410 struct net_device *lowerdev = vlan->lowerdev;
412 return dev_hard_header(skb, lowerdev, type, daddr,
413 saddr ? : dev->dev_addr, len);
416 static const struct header_ops macvlan_hard_header_ops = {
417 .create = macvlan_hard_header,
418 .rebuild = eth_rebuild_header,
419 .parse = eth_header_parse,
420 .cache = eth_header_cache,
421 .cache_update = eth_header_cache_update,
424 static struct rtnl_link_ops macvlan_link_ops;
426 static int macvlan_open(struct net_device *dev)
428 struct macvlan_dev *vlan = netdev_priv(dev);
429 struct net_device *lowerdev = vlan->lowerdev;
430 int err;
432 if (vlan->port->passthru) {
433 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
434 err = dev_set_promiscuity(lowerdev, 1);
435 if (err < 0)
436 goto out;
438 goto hash_add;
441 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
442 dev->rtnl_link_ops == &macvlan_link_ops) {
443 vlan->fwd_priv =
444 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
446 /* If we get a NULL pointer back, or if we get an error
447 * then we should just fall through to the non accelerated path
449 if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
450 vlan->fwd_priv = NULL;
451 } else
452 return 0;
455 err = -EBUSY;
456 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
457 goto out;
459 err = dev_uc_add(lowerdev, dev->dev_addr);
460 if (err < 0)
461 goto out;
462 if (dev->flags & IFF_ALLMULTI) {
463 err = dev_set_allmulti(lowerdev, 1);
464 if (err < 0)
465 goto del_unicast;
468 hash_add:
469 macvlan_hash_add(vlan);
470 return 0;
472 del_unicast:
473 dev_uc_del(lowerdev, dev->dev_addr);
474 out:
475 if (vlan->fwd_priv) {
476 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
477 vlan->fwd_priv);
478 vlan->fwd_priv = NULL;
480 return err;
483 static int macvlan_stop(struct net_device *dev)
485 struct macvlan_dev *vlan = netdev_priv(dev);
486 struct net_device *lowerdev = vlan->lowerdev;
488 if (vlan->fwd_priv) {
489 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
490 vlan->fwd_priv);
491 vlan->fwd_priv = NULL;
492 return 0;
495 dev_uc_unsync(lowerdev, dev);
496 dev_mc_unsync(lowerdev, dev);
498 if (vlan->port->passthru) {
499 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
500 dev_set_promiscuity(lowerdev, -1);
501 goto hash_del;
504 if (dev->flags & IFF_ALLMULTI)
505 dev_set_allmulti(lowerdev, -1);
507 dev_uc_del(lowerdev, dev->dev_addr);
509 hash_del:
510 macvlan_hash_del(vlan, !dev->dismantle);
511 return 0;
514 static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
516 struct macvlan_dev *vlan = netdev_priv(dev);
517 struct net_device *lowerdev = vlan->lowerdev;
518 int err;
520 if (!(dev->flags & IFF_UP)) {
521 /* Just copy in the new address */
522 ether_addr_copy(dev->dev_addr, addr);
523 } else {
524 /* Rehash and update the device filters */
525 if (macvlan_addr_busy(vlan->port, addr))
526 return -EBUSY;
528 if (!vlan->port->passthru) {
529 err = dev_uc_add(lowerdev, addr);
530 if (err)
531 return err;
533 dev_uc_del(lowerdev, dev->dev_addr);
536 macvlan_hash_change_addr(vlan, addr);
538 return 0;
541 static int macvlan_set_mac_address(struct net_device *dev, void *p)
543 struct macvlan_dev *vlan = netdev_priv(dev);
544 struct sockaddr *addr = p;
546 if (!is_valid_ether_addr(addr->sa_data))
547 return -EADDRNOTAVAIL;
549 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
550 dev_set_mac_address(vlan->lowerdev, addr);
551 return 0;
554 return macvlan_sync_address(dev, addr->sa_data);
557 static void macvlan_change_rx_flags(struct net_device *dev, int change)
559 struct macvlan_dev *vlan = netdev_priv(dev);
560 struct net_device *lowerdev = vlan->lowerdev;
562 if (dev->flags & IFF_UP) {
563 if (change & IFF_ALLMULTI)
564 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
568 static void macvlan_set_mac_lists(struct net_device *dev)
570 struct macvlan_dev *vlan = netdev_priv(dev);
572 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
573 bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ);
574 } else {
575 struct netdev_hw_addr *ha;
576 DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
578 bitmap_zero(filter, MACVLAN_MC_FILTER_SZ);
579 netdev_for_each_mc_addr(ha, dev) {
580 __set_bit(mc_hash(vlan, ha->addr), filter);
583 __set_bit(mc_hash(vlan, dev->broadcast), filter);
585 bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ);
587 dev_uc_sync(vlan->lowerdev, dev);
588 dev_mc_sync(vlan->lowerdev, dev);
591 static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
593 struct macvlan_dev *vlan = netdev_priv(dev);
595 if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu)
596 return -EINVAL;
597 dev->mtu = new_mtu;
598 return 0;
602 * macvlan network devices have devices nesting below it and are a special
603 * "super class" of normal network devices; split their locks off into a
604 * separate class since they always nest.
606 static struct lock_class_key macvlan_netdev_xmit_lock_key;
607 static struct lock_class_key macvlan_netdev_addr_lock_key;
609 #define ALWAYS_ON_FEATURES \
610 (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX)
612 #define MACVLAN_FEATURES \
613 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
614 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
615 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
616 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
618 #define MACVLAN_STATE_MASK \
619 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
621 static int macvlan_get_nest_level(struct net_device *dev)
623 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
626 static void macvlan_set_lockdep_class_one(struct net_device *dev,
627 struct netdev_queue *txq,
628 void *_unused)
630 lockdep_set_class(&txq->_xmit_lock,
631 &macvlan_netdev_xmit_lock_key);
634 static void macvlan_set_lockdep_class(struct net_device *dev)
636 lockdep_set_class_and_subclass(&dev->addr_list_lock,
637 &macvlan_netdev_addr_lock_key,
638 macvlan_get_nest_level(dev));
639 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
642 static int macvlan_init(struct net_device *dev)
644 struct macvlan_dev *vlan = netdev_priv(dev);
645 const struct net_device *lowerdev = vlan->lowerdev;
647 dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
648 (lowerdev->state & MACVLAN_STATE_MASK);
649 dev->features = lowerdev->features & MACVLAN_FEATURES;
650 dev->features |= ALWAYS_ON_FEATURES;
651 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
652 dev->gso_max_size = lowerdev->gso_max_size;
653 dev->iflink = lowerdev->ifindex;
654 dev->hard_header_len = lowerdev->hard_header_len;
656 macvlan_set_lockdep_class(dev);
658 vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
659 if (!vlan->pcpu_stats)
660 return -ENOMEM;
662 return 0;
665 static void macvlan_uninit(struct net_device *dev)
667 struct macvlan_dev *vlan = netdev_priv(dev);
668 struct macvlan_port *port = vlan->port;
670 free_percpu(vlan->pcpu_stats);
672 port->count -= 1;
673 if (!port->count)
674 macvlan_port_destroy(port->dev);
677 static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
678 struct rtnl_link_stats64 *stats)
680 struct macvlan_dev *vlan = netdev_priv(dev);
682 if (vlan->pcpu_stats) {
683 struct vlan_pcpu_stats *p;
684 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
685 u32 rx_errors = 0, tx_dropped = 0;
686 unsigned int start;
687 int i;
689 for_each_possible_cpu(i) {
690 p = per_cpu_ptr(vlan->pcpu_stats, i);
691 do {
692 start = u64_stats_fetch_begin_irq(&p->syncp);
693 rx_packets = p->rx_packets;
694 rx_bytes = p->rx_bytes;
695 rx_multicast = p->rx_multicast;
696 tx_packets = p->tx_packets;
697 tx_bytes = p->tx_bytes;
698 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
700 stats->rx_packets += rx_packets;
701 stats->rx_bytes += rx_bytes;
702 stats->multicast += rx_multicast;
703 stats->tx_packets += tx_packets;
704 stats->tx_bytes += tx_bytes;
705 /* rx_errors & tx_dropped are u32, updated
706 * without syncp protection.
708 rx_errors += p->rx_errors;
709 tx_dropped += p->tx_dropped;
711 stats->rx_errors = rx_errors;
712 stats->rx_dropped = rx_errors;
713 stats->tx_dropped = tx_dropped;
715 return stats;
718 static int macvlan_vlan_rx_add_vid(struct net_device *dev,
719 __be16 proto, u16 vid)
721 struct macvlan_dev *vlan = netdev_priv(dev);
722 struct net_device *lowerdev = vlan->lowerdev;
724 return vlan_vid_add(lowerdev, proto, vid);
727 static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
728 __be16 proto, u16 vid)
730 struct macvlan_dev *vlan = netdev_priv(dev);
731 struct net_device *lowerdev = vlan->lowerdev;
733 vlan_vid_del(lowerdev, proto, vid);
734 return 0;
737 static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
738 struct net_device *dev,
739 const unsigned char *addr,
740 u16 flags)
742 struct macvlan_dev *vlan = netdev_priv(dev);
743 int err = -EINVAL;
745 if (!vlan->port->passthru)
746 return -EOPNOTSUPP;
748 if (flags & NLM_F_REPLACE)
749 return -EOPNOTSUPP;
751 if (is_unicast_ether_addr(addr))
752 err = dev_uc_add_excl(dev, addr);
753 else if (is_multicast_ether_addr(addr))
754 err = dev_mc_add_excl(dev, addr);
756 return err;
759 static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
760 struct net_device *dev,
761 const unsigned char *addr)
763 struct macvlan_dev *vlan = netdev_priv(dev);
764 int err = -EINVAL;
766 if (!vlan->port->passthru)
767 return -EOPNOTSUPP;
769 if (is_unicast_ether_addr(addr))
770 err = dev_uc_del(dev, addr);
771 else if (is_multicast_ether_addr(addr))
772 err = dev_mc_del(dev, addr);
774 return err;
777 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
778 struct ethtool_drvinfo *drvinfo)
780 strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
781 strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
784 static int macvlan_ethtool_get_settings(struct net_device *dev,
785 struct ethtool_cmd *cmd)
787 const struct macvlan_dev *vlan = netdev_priv(dev);
789 return __ethtool_get_settings(vlan->lowerdev, cmd);
792 static netdev_features_t macvlan_fix_features(struct net_device *dev,
793 netdev_features_t features)
795 struct macvlan_dev *vlan = netdev_priv(dev);
796 netdev_features_t mask;
798 features |= NETIF_F_ALL_FOR_ALL;
799 features &= (vlan->set_features | ~MACVLAN_FEATURES);
800 mask = features;
802 features = netdev_increment_features(vlan->lowerdev->features,
803 features,
804 mask);
805 features |= ALWAYS_ON_FEATURES;
806 features &= ~NETIF_F_NETNS_LOCAL;
808 return features;
811 #ifdef CONFIG_NET_POLL_CONTROLLER
812 static void macvlan_dev_poll_controller(struct net_device *dev)
814 return;
817 static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
819 struct macvlan_dev *vlan = netdev_priv(dev);
820 struct net_device *real_dev = vlan->lowerdev;
821 struct netpoll *netpoll;
822 int err = 0;
824 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
825 err = -ENOMEM;
826 if (!netpoll)
827 goto out;
829 err = __netpoll_setup(netpoll, real_dev);
830 if (err) {
831 kfree(netpoll);
832 goto out;
835 vlan->netpoll = netpoll;
837 out:
838 return err;
841 static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
843 struct macvlan_dev *vlan = netdev_priv(dev);
844 struct netpoll *netpoll = vlan->netpoll;
846 if (!netpoll)
847 return;
849 vlan->netpoll = NULL;
851 __netpoll_free_async(netpoll);
853 #endif /* CONFIG_NET_POLL_CONTROLLER */
855 static const struct ethtool_ops macvlan_ethtool_ops = {
856 .get_link = ethtool_op_get_link,
857 .get_settings = macvlan_ethtool_get_settings,
858 .get_drvinfo = macvlan_ethtool_get_drvinfo,
861 static const struct net_device_ops macvlan_netdev_ops = {
862 .ndo_init = macvlan_init,
863 .ndo_uninit = macvlan_uninit,
864 .ndo_open = macvlan_open,
865 .ndo_stop = macvlan_stop,
866 .ndo_start_xmit = macvlan_start_xmit,
867 .ndo_change_mtu = macvlan_change_mtu,
868 .ndo_fix_features = macvlan_fix_features,
869 .ndo_change_rx_flags = macvlan_change_rx_flags,
870 .ndo_set_mac_address = macvlan_set_mac_address,
871 .ndo_set_rx_mode = macvlan_set_mac_lists,
872 .ndo_get_stats64 = macvlan_dev_get_stats64,
873 .ndo_validate_addr = eth_validate_addr,
874 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
875 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid,
876 .ndo_fdb_add = macvlan_fdb_add,
877 .ndo_fdb_del = macvlan_fdb_del,
878 .ndo_fdb_dump = ndo_dflt_fdb_dump,
879 .ndo_get_lock_subclass = macvlan_get_nest_level,
880 #ifdef CONFIG_NET_POLL_CONTROLLER
881 .ndo_poll_controller = macvlan_dev_poll_controller,
882 .ndo_netpoll_setup = macvlan_dev_netpoll_setup,
883 .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
884 #endif
887 void macvlan_common_setup(struct net_device *dev)
889 ether_setup(dev);
891 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
892 dev->priv_flags |= IFF_UNICAST_FLT;
893 dev->netdev_ops = &macvlan_netdev_ops;
894 dev->destructor = free_netdev;
895 dev->header_ops = &macvlan_hard_header_ops;
896 dev->ethtool_ops = &macvlan_ethtool_ops;
898 EXPORT_SYMBOL_GPL(macvlan_common_setup);
900 static void macvlan_setup(struct net_device *dev)
902 macvlan_common_setup(dev);
903 dev->tx_queue_len = 0;
906 static int macvlan_port_create(struct net_device *dev)
908 struct macvlan_port *port;
909 unsigned int i;
910 int err;
912 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
913 return -EINVAL;
915 port = kzalloc(sizeof(*port), GFP_KERNEL);
916 if (port == NULL)
917 return -ENOMEM;
919 port->passthru = false;
920 port->dev = dev;
921 INIT_LIST_HEAD(&port->vlans);
922 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
923 INIT_HLIST_HEAD(&port->vlan_hash[i]);
925 skb_queue_head_init(&port->bc_queue);
926 INIT_WORK(&port->bc_work, macvlan_process_broadcast);
928 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
929 if (err)
930 kfree(port);
931 else
932 dev->priv_flags |= IFF_MACVLAN_PORT;
933 return err;
936 static void macvlan_port_destroy(struct net_device *dev)
938 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
940 dev->priv_flags &= ~IFF_MACVLAN_PORT;
941 netdev_rx_handler_unregister(dev);
943 /* After this point, no packet can schedule bc_work anymore,
944 * but we need to cancel it and purge left skbs if any.
946 cancel_work_sync(&port->bc_work);
947 __skb_queue_purge(&port->bc_queue);
949 kfree_rcu(port, rcu);
952 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
954 if (tb[IFLA_ADDRESS]) {
955 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
956 return -EINVAL;
957 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
958 return -EADDRNOTAVAIL;
961 if (data && data[IFLA_MACVLAN_FLAGS] &&
962 nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
963 return -EINVAL;
965 if (data && data[IFLA_MACVLAN_MODE]) {
966 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
967 case MACVLAN_MODE_PRIVATE:
968 case MACVLAN_MODE_VEPA:
969 case MACVLAN_MODE_BRIDGE:
970 case MACVLAN_MODE_PASSTHRU:
971 break;
972 default:
973 return -EINVAL;
976 return 0;
979 int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
980 struct nlattr *tb[], struct nlattr *data[])
982 struct macvlan_dev *vlan = netdev_priv(dev);
983 struct macvlan_port *port;
984 struct net_device *lowerdev;
985 int err;
987 if (!tb[IFLA_LINK])
988 return -EINVAL;
990 lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
991 if (lowerdev == NULL)
992 return -ENODEV;
994 /* When creating macvlans or macvtaps on top of other macvlans - use
995 * the real device as the lowerdev.
997 if (netif_is_macvlan(lowerdev))
998 lowerdev = macvlan_dev_real_dev(lowerdev);
1000 if (!tb[IFLA_MTU])
1001 dev->mtu = lowerdev->mtu;
1002 else if (dev->mtu > lowerdev->mtu)
1003 return -EINVAL;
1005 if (!tb[IFLA_ADDRESS])
1006 eth_hw_addr_random(dev);
1008 if (!macvlan_port_exists(lowerdev)) {
1009 err = macvlan_port_create(lowerdev);
1010 if (err < 0)
1011 return err;
1013 port = macvlan_port_get_rtnl(lowerdev);
1015 /* Only 1 macvlan device can be created in passthru mode */
1016 if (port->passthru)
1017 return -EINVAL;
1019 vlan->lowerdev = lowerdev;
1020 vlan->dev = dev;
1021 vlan->port = port;
1022 vlan->set_features = MACVLAN_FEATURES;
1023 vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
1025 vlan->mode = MACVLAN_MODE_VEPA;
1026 if (data && data[IFLA_MACVLAN_MODE])
1027 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
1029 if (data && data[IFLA_MACVLAN_FLAGS])
1030 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
1032 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
1033 if (port->count)
1034 return -EINVAL;
1035 port->passthru = true;
1036 eth_hw_addr_inherit(dev, lowerdev);
1039 port->count += 1;
1040 err = register_netdevice(dev);
1041 if (err < 0)
1042 goto destroy_port;
1044 dev->priv_flags |= IFF_MACVLAN;
1045 err = netdev_upper_dev_link(lowerdev, dev);
1046 if (err)
1047 goto unregister_netdev;
1049 list_add_tail_rcu(&vlan->list, &port->vlans);
1050 netif_stacked_transfer_operstate(lowerdev, dev);
1052 return 0;
1054 unregister_netdev:
1055 unregister_netdevice(dev);
1056 destroy_port:
1057 port->count -= 1;
1058 if (!port->count)
1059 macvlan_port_destroy(lowerdev);
1061 return err;
1063 EXPORT_SYMBOL_GPL(macvlan_common_newlink);
1065 static int macvlan_newlink(struct net *src_net, struct net_device *dev,
1066 struct nlattr *tb[], struct nlattr *data[])
1068 return macvlan_common_newlink(src_net, dev, tb, data);
1071 void macvlan_dellink(struct net_device *dev, struct list_head *head)
1073 struct macvlan_dev *vlan = netdev_priv(dev);
1075 list_del_rcu(&vlan->list);
1076 unregister_netdevice_queue(dev, head);
1077 netdev_upper_dev_unlink(vlan->lowerdev, dev);
1079 EXPORT_SYMBOL_GPL(macvlan_dellink);
1081 static int macvlan_changelink(struct net_device *dev,
1082 struct nlattr *tb[], struct nlattr *data[])
1084 struct macvlan_dev *vlan = netdev_priv(dev);
1085 enum macvlan_mode mode;
1086 bool set_mode = false;
1088 /* Validate mode, but don't set yet: setting flags may fail. */
1089 if (data && data[IFLA_MACVLAN_MODE]) {
1090 set_mode = true;
1091 mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
1092 /* Passthrough mode can't be set or cleared dynamically */
1093 if ((mode == MACVLAN_MODE_PASSTHRU) !=
1094 (vlan->mode == MACVLAN_MODE_PASSTHRU))
1095 return -EINVAL;
1098 if (data && data[IFLA_MACVLAN_FLAGS]) {
1099 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
1100 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
1101 if (vlan->port->passthru && promisc) {
1102 int err;
1104 if (flags & MACVLAN_FLAG_NOPROMISC)
1105 err = dev_set_promiscuity(vlan->lowerdev, -1);
1106 else
1107 err = dev_set_promiscuity(vlan->lowerdev, 1);
1108 if (err < 0)
1109 return err;
1111 vlan->flags = flags;
1113 if (set_mode)
1114 vlan->mode = mode;
1115 return 0;
1118 static size_t macvlan_get_size(const struct net_device *dev)
1120 return (0
1121 + nla_total_size(4) /* IFLA_MACVLAN_MODE */
1122 + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
1126 static int macvlan_fill_info(struct sk_buff *skb,
1127 const struct net_device *dev)
1129 struct macvlan_dev *vlan = netdev_priv(dev);
1131 if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
1132 goto nla_put_failure;
1133 if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
1134 goto nla_put_failure;
1135 return 0;
1137 nla_put_failure:
1138 return -EMSGSIZE;
1141 static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
1142 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
1143 [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
1146 int macvlan_link_register(struct rtnl_link_ops *ops)
1148 /* common fields */
1149 ops->priv_size = sizeof(struct macvlan_dev);
1150 ops->validate = macvlan_validate;
1151 ops->maxtype = IFLA_MACVLAN_MAX;
1152 ops->policy = macvlan_policy;
1153 ops->changelink = macvlan_changelink;
1154 ops->get_size = macvlan_get_size;
1155 ops->fill_info = macvlan_fill_info;
1157 return rtnl_link_register(ops);
1159 EXPORT_SYMBOL_GPL(macvlan_link_register);
1161 static struct rtnl_link_ops macvlan_link_ops = {
1162 .kind = "macvlan",
1163 .setup = macvlan_setup,
1164 .newlink = macvlan_newlink,
1165 .dellink = macvlan_dellink,
1168 static int macvlan_device_event(struct notifier_block *unused,
1169 unsigned long event, void *ptr)
1171 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1172 struct macvlan_dev *vlan, *next;
1173 struct macvlan_port *port;
1174 LIST_HEAD(list_kill);
1176 if (!macvlan_port_exists(dev))
1177 return NOTIFY_DONE;
1179 port = macvlan_port_get_rtnl(dev);
1181 switch (event) {
1182 case NETDEV_CHANGE:
1183 list_for_each_entry(vlan, &port->vlans, list)
1184 netif_stacked_transfer_operstate(vlan->lowerdev,
1185 vlan->dev);
1186 break;
1187 case NETDEV_FEAT_CHANGE:
1188 list_for_each_entry(vlan, &port->vlans, list) {
1189 vlan->dev->gso_max_size = dev->gso_max_size;
1190 netdev_update_features(vlan->dev);
1192 break;
1193 case NETDEV_CHANGEMTU:
1194 list_for_each_entry(vlan, &port->vlans, list) {
1195 if (vlan->dev->mtu <= dev->mtu)
1196 continue;
1197 dev_set_mtu(vlan->dev, dev->mtu);
1199 break;
1200 case NETDEV_CHANGEADDR:
1201 if (!port->passthru)
1202 return NOTIFY_DONE;
1204 vlan = list_first_entry_or_null(&port->vlans,
1205 struct macvlan_dev,
1206 list);
1208 if (macvlan_sync_address(vlan->dev, dev->dev_addr))
1209 return NOTIFY_BAD;
1211 break;
1212 case NETDEV_UNREGISTER:
1213 /* twiddle thumbs on netns device moves */
1214 if (dev->reg_state != NETREG_UNREGISTERING)
1215 break;
1217 list_for_each_entry_safe(vlan, next, &port->vlans, list)
1218 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
1219 unregister_netdevice_many(&list_kill);
1220 break;
1221 case NETDEV_PRE_TYPE_CHANGE:
1222 /* Forbid underlaying device to change its type. */
1223 return NOTIFY_BAD;
1225 case NETDEV_NOTIFY_PEERS:
1226 case NETDEV_BONDING_FAILOVER:
1227 case NETDEV_RESEND_IGMP:
1228 /* Propagate to all vlans */
1229 list_for_each_entry(vlan, &port->vlans, list)
1230 call_netdevice_notifiers(event, vlan->dev);
1232 return NOTIFY_DONE;
1235 static struct notifier_block macvlan_notifier_block __read_mostly = {
1236 .notifier_call = macvlan_device_event,
1239 static int __init macvlan_init_module(void)
1241 int err;
1243 register_netdevice_notifier(&macvlan_notifier_block);
1245 err = macvlan_link_register(&macvlan_link_ops);
1246 if (err < 0)
1247 goto err1;
1248 return 0;
1249 err1:
1250 unregister_netdevice_notifier(&macvlan_notifier_block);
1251 return err;
1254 static void __exit macvlan_cleanup_module(void)
1256 rtnl_link_unregister(&macvlan_link_ops);
1257 unregister_netdevice_notifier(&macvlan_notifier_block);
1260 module_init(macvlan_init_module);
1261 module_exit(macvlan_cleanup_module);
1263 MODULE_LICENSE("GPL");
1264 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
1265 MODULE_DESCRIPTION("Driver for MAC address based VLANs");
1266 MODULE_ALIAS_RTNL_LINK("macvlan");