[NET]: Make the device list and device lookups per namespace.
[linux-2.6/verdex.git] / net / core / dev.c
blob3a3d5ee739098b66f4a84ca0d708aff275a3b64a
1 /*
2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
85 #include <linux/mm.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/notifier.h>
94 #include <linux/skbuff.h>
95 #include <net/net_namespace.h>
96 #include <net/sock.h>
97 #include <linux/rtnetlink.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <linux/stat.h>
101 #include <linux/if_bridge.h>
102 #include <linux/if_macvlan.h>
103 #include <net/dst.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <linux/highmem.h>
107 #include <linux/init.h>
108 #include <linux/kmod.h>
109 #include <linux/module.h>
110 #include <linux/kallsyms.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
124 * The list of packet types we will receive (as opposed to discard)
125 * and the routines to invoke.
127 * Why 16. Because with 16 the only overlap we get on a hash of the
128 * low nibble of the protocol value is RARP/SNAP/X.25.
130 * NOTE: That is no longer true with the addition of VLAN tags. Not
131 * sure which should go first, but I bet it won't make much
132 * difference if we are running VLANs. The good news is that
133 * this protocol won't be in the list unless compiled in, so
134 * the average user (w/out VLANs) will not be adversely affected.
135 * --BLG
137 * 0800 IP
138 * 8100 802.1Q VLAN
139 * 0001 802.3
140 * 0002 AX.25
141 * 0004 802.2
142 * 8035 RARP
143 * 0005 SNAP
144 * 0805 X.25
145 * 0806 ARP
146 * 8137 IPX
147 * 0009 Localtalk
148 * 86DD IPv6
151 static DEFINE_SPINLOCK(ptype_lock);
152 static struct list_head ptype_base[16] __read_mostly; /* 16 way hashed list */
153 static struct list_head ptype_all __read_mostly; /* Taps */
155 #ifdef CONFIG_NET_DMA
156 struct net_dma {
157 struct dma_client client;
158 spinlock_t lock;
159 cpumask_t channel_mask;
160 struct dma_chan *channels[NR_CPUS];
163 static enum dma_state_client
164 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
165 enum dma_state state);
167 static struct net_dma net_dma = {
168 .client = {
169 .event_callback = netdev_dma_event,
172 #endif
175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
176 * semaphore.
178 * Pure readers hold dev_base_lock for reading.
180 * Writers must hold the rtnl semaphore while they loop through the
181 * dev_base_head list, and hold dev_base_lock for writing when they do the
182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
193 DEFINE_RWLOCK(dev_base_lock);
195 EXPORT_SYMBOL(dev_base_lock);
197 #define NETDEV_HASHBITS 8
198 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
200 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
206 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
212 * Our notifier list
215 static RAW_NOTIFIER_HEAD(netdev_chain);
218 * Device drivers call our routines to queue packets here. We empty the
219 * queue in the local softnet handler.
222 DEFINE_PER_CPU(struct softnet_data, softnet_data);
224 #ifdef CONFIG_SYSFS
225 extern int netdev_sysfs_init(void);
226 extern int netdev_register_sysfs(struct net_device *);
227 extern void netdev_unregister_sysfs(struct net_device *);
228 #else
229 #define netdev_sysfs_init() (0)
230 #define netdev_register_sysfs(dev) (0)
231 #define netdev_unregister_sysfs(dev) do { } while(0)
232 #endif
234 #ifdef CONFIG_DEBUG_LOCK_ALLOC
236 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
237 * according to dev->type
239 static const unsigned short netdev_lock_type[] =
240 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
241 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
242 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
243 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
244 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
245 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
246 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
247 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
248 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
249 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
250 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
251 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
252 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
253 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
254 ARPHRD_NONE};
256 static const char *netdev_lock_name[] =
257 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
258 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
259 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
260 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
261 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
262 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
263 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
264 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
265 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
266 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
267 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
268 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
269 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
270 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
271 "_xmit_NONE"};
273 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
275 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
277 int i;
279 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
280 if (netdev_lock_type[i] == dev_type)
281 return i;
282 /* the last key is used by default */
283 return ARRAY_SIZE(netdev_lock_type) - 1;
286 static inline void netdev_set_lockdep_class(spinlock_t *lock,
287 unsigned short dev_type)
289 int i;
291 i = netdev_lock_pos(dev_type);
292 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
293 netdev_lock_name[i]);
295 #else
296 static inline void netdev_set_lockdep_class(spinlock_t *lock,
297 unsigned short dev_type)
300 #endif
302 /*******************************************************************************
304 Protocol management and registration routines
306 *******************************************************************************/
309 * Add a protocol ID to the list. Now that the input handler is
310 * smarter we can dispense with all the messy stuff that used to be
311 * here.
313 * BEWARE!!! Protocol handlers, mangling input packets,
314 * MUST BE last in hash buckets and checking protocol handlers
315 * MUST start from promiscuous ptype_all chain in net_bh.
316 * It is true now, do not change it.
317 * Explanation follows: if protocol handler, mangling packet, will
318 * be the first on list, it is not able to sense, that packet
319 * is cloned and should be copied-on-write, so that it will
320 * change it and subsequent readers will get broken packet.
321 * --ANK (980803)
325 * dev_add_pack - add packet handler
326 * @pt: packet type declaration
328 * Add a protocol handler to the networking stack. The passed &packet_type
329 * is linked into kernel lists and may not be freed until it has been
330 * removed from the kernel lists.
332 * This call does not sleep therefore it can not
333 * guarantee all CPU's that are in middle of receiving packets
334 * will see the new packet type (until the next received packet).
337 void dev_add_pack(struct packet_type *pt)
339 int hash;
341 spin_lock_bh(&ptype_lock);
342 if (pt->type == htons(ETH_P_ALL))
343 list_add_rcu(&pt->list, &ptype_all);
344 else {
345 hash = ntohs(pt->type) & 15;
346 list_add_rcu(&pt->list, &ptype_base[hash]);
348 spin_unlock_bh(&ptype_lock);
352 * __dev_remove_pack - remove packet handler
353 * @pt: packet type declaration
355 * Remove a protocol handler that was previously added to the kernel
356 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
357 * from the kernel lists and can be freed or reused once this function
358 * returns.
360 * The packet type might still be in use by receivers
361 * and must not be freed until after all the CPU's have gone
362 * through a quiescent state.
364 void __dev_remove_pack(struct packet_type *pt)
366 struct list_head *head;
367 struct packet_type *pt1;
369 spin_lock_bh(&ptype_lock);
371 if (pt->type == htons(ETH_P_ALL))
372 head = &ptype_all;
373 else
374 head = &ptype_base[ntohs(pt->type) & 15];
376 list_for_each_entry(pt1, head, list) {
377 if (pt == pt1) {
378 list_del_rcu(&pt->list);
379 goto out;
383 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
384 out:
385 spin_unlock_bh(&ptype_lock);
388 * dev_remove_pack - remove packet handler
389 * @pt: packet type declaration
391 * Remove a protocol handler that was previously added to the kernel
392 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
393 * from the kernel lists and can be freed or reused once this function
394 * returns.
396 * This call sleeps to guarantee that no CPU is looking at the packet
397 * type after return.
399 void dev_remove_pack(struct packet_type *pt)
401 __dev_remove_pack(pt);
403 synchronize_net();
406 /******************************************************************************
408 Device Boot-time Settings Routines
410 *******************************************************************************/
412 /* Boot time configuration table */
413 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
416 * netdev_boot_setup_add - add new setup entry
417 * @name: name of the device
418 * @map: configured settings for the device
420 * Adds new setup entry to the dev_boot_setup list. The function
421 * returns 0 on error and 1 on success. This is a generic routine to
422 * all netdevices.
424 static int netdev_boot_setup_add(char *name, struct ifmap *map)
426 struct netdev_boot_setup *s;
427 int i;
429 s = dev_boot_setup;
430 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
431 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
432 memset(s[i].name, 0, sizeof(s[i].name));
433 strcpy(s[i].name, name);
434 memcpy(&s[i].map, map, sizeof(s[i].map));
435 break;
439 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
443 * netdev_boot_setup_check - check boot time settings
444 * @dev: the netdevice
446 * Check boot time settings for the device.
447 * The found settings are set for the device to be used
448 * later in the device probing.
449 * Returns 0 if no settings found, 1 if they are.
451 int netdev_boot_setup_check(struct net_device *dev)
453 struct netdev_boot_setup *s = dev_boot_setup;
454 int i;
456 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
457 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
458 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
459 dev->irq = s[i].map.irq;
460 dev->base_addr = s[i].map.base_addr;
461 dev->mem_start = s[i].map.mem_start;
462 dev->mem_end = s[i].map.mem_end;
463 return 1;
466 return 0;
471 * netdev_boot_base - get address from boot time settings
472 * @prefix: prefix for network device
473 * @unit: id for network device
475 * Check boot time settings for the base address of device.
476 * The found settings are set for the device to be used
477 * later in the device probing.
478 * Returns 0 if no settings found.
480 unsigned long netdev_boot_base(const char *prefix, int unit)
482 const struct netdev_boot_setup *s = dev_boot_setup;
483 char name[IFNAMSIZ];
484 int i;
486 sprintf(name, "%s%d", prefix, unit);
489 * If device already registered then return base of 1
490 * to indicate not to probe for this interface
492 if (__dev_get_by_name(&init_net, name))
493 return 1;
495 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
496 if (!strcmp(name, s[i].name))
497 return s[i].map.base_addr;
498 return 0;
502 * Saves at boot time configured settings for any netdevice.
504 int __init netdev_boot_setup(char *str)
506 int ints[5];
507 struct ifmap map;
509 str = get_options(str, ARRAY_SIZE(ints), ints);
510 if (!str || !*str)
511 return 0;
513 /* Save settings */
514 memset(&map, 0, sizeof(map));
515 if (ints[0] > 0)
516 map.irq = ints[1];
517 if (ints[0] > 1)
518 map.base_addr = ints[2];
519 if (ints[0] > 2)
520 map.mem_start = ints[3];
521 if (ints[0] > 3)
522 map.mem_end = ints[4];
524 /* Add new entry to the list */
525 return netdev_boot_setup_add(str, &map);
528 __setup("netdev=", netdev_boot_setup);
530 /*******************************************************************************
532 Device Interface Subroutines
534 *******************************************************************************/
537 * __dev_get_by_name - find a device by its name
538 * @name: name to find
540 * Find an interface by name. Must be called under RTNL semaphore
541 * or @dev_base_lock. If the name is found a pointer to the device
542 * is returned. If the name is not found then %NULL is returned. The
543 * reference counters are not incremented so the caller must be
544 * careful with locks.
547 struct net_device *__dev_get_by_name(struct net *net, const char *name)
549 struct hlist_node *p;
551 hlist_for_each(p, dev_name_hash(net, name)) {
552 struct net_device *dev
553 = hlist_entry(p, struct net_device, name_hlist);
554 if (!strncmp(dev->name, name, IFNAMSIZ))
555 return dev;
557 return NULL;
561 * dev_get_by_name - find a device by its name
562 * @name: name to find
564 * Find an interface by name. This can be called from any
565 * context and does its own locking. The returned handle has
566 * the usage count incremented and the caller must use dev_put() to
567 * release it when it is no longer needed. %NULL is returned if no
568 * matching device is found.
571 struct net_device *dev_get_by_name(struct net *net, const char *name)
573 struct net_device *dev;
575 read_lock(&dev_base_lock);
576 dev = __dev_get_by_name(net, name);
577 if (dev)
578 dev_hold(dev);
579 read_unlock(&dev_base_lock);
580 return dev;
584 * __dev_get_by_index - find a device by its ifindex
585 * @ifindex: index of device
587 * Search for an interface by index. Returns %NULL if the device
588 * is not found or a pointer to the device. The device has not
589 * had its reference counter increased so the caller must be careful
590 * about locking. The caller must hold either the RTNL semaphore
591 * or @dev_base_lock.
594 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
596 struct hlist_node *p;
598 hlist_for_each(p, dev_index_hash(net, ifindex)) {
599 struct net_device *dev
600 = hlist_entry(p, struct net_device, index_hlist);
601 if (dev->ifindex == ifindex)
602 return dev;
604 return NULL;
609 * dev_get_by_index - find a device by its ifindex
610 * @ifindex: index of device
612 * Search for an interface by index. Returns NULL if the device
613 * is not found or a pointer to the device. The device returned has
614 * had a reference added and the pointer is safe until the user calls
615 * dev_put to indicate they have finished with it.
618 struct net_device *dev_get_by_index(struct net *net, int ifindex)
620 struct net_device *dev;
622 read_lock(&dev_base_lock);
623 dev = __dev_get_by_index(net, ifindex);
624 if (dev)
625 dev_hold(dev);
626 read_unlock(&dev_base_lock);
627 return dev;
631 * dev_getbyhwaddr - find a device by its hardware address
632 * @type: media type of device
633 * @ha: hardware address
635 * Search for an interface by MAC address. Returns NULL if the device
636 * is not found or a pointer to the device. The caller must hold the
637 * rtnl semaphore. The returned device has not had its ref count increased
638 * and the caller must therefore be careful about locking
640 * BUGS:
641 * If the API was consistent this would be __dev_get_by_hwaddr
644 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
646 struct net_device *dev;
648 ASSERT_RTNL();
650 for_each_netdev(&init_net, dev)
651 if (dev->type == type &&
652 !memcmp(dev->dev_addr, ha, dev->addr_len))
653 return dev;
655 return NULL;
658 EXPORT_SYMBOL(dev_getbyhwaddr);
660 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
662 struct net_device *dev;
664 ASSERT_RTNL();
665 for_each_netdev(net, dev)
666 if (dev->type == type)
667 return dev;
669 return NULL;
672 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
674 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
676 struct net_device *dev;
678 rtnl_lock();
679 dev = __dev_getfirstbyhwtype(net, type);
680 if (dev)
681 dev_hold(dev);
682 rtnl_unlock();
683 return dev;
686 EXPORT_SYMBOL(dev_getfirstbyhwtype);
689 * dev_get_by_flags - find any device with given flags
690 * @if_flags: IFF_* values
691 * @mask: bitmask of bits in if_flags to check
693 * Search for any interface with the given flags. Returns NULL if a device
694 * is not found or a pointer to the device. The device returned has
695 * had a reference added and the pointer is safe until the user calls
696 * dev_put to indicate they have finished with it.
699 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
701 struct net_device *dev, *ret;
703 ret = NULL;
704 read_lock(&dev_base_lock);
705 for_each_netdev(net, dev) {
706 if (((dev->flags ^ if_flags) & mask) == 0) {
707 dev_hold(dev);
708 ret = dev;
709 break;
712 read_unlock(&dev_base_lock);
713 return ret;
717 * dev_valid_name - check if name is okay for network device
718 * @name: name string
720 * Network device names need to be valid file names to
721 * to allow sysfs to work. We also disallow any kind of
722 * whitespace.
724 int dev_valid_name(const char *name)
726 if (*name == '\0')
727 return 0;
728 if (strlen(name) >= IFNAMSIZ)
729 return 0;
730 if (!strcmp(name, ".") || !strcmp(name, ".."))
731 return 0;
733 while (*name) {
734 if (*name == '/' || isspace(*name))
735 return 0;
736 name++;
738 return 1;
742 * dev_alloc_name - allocate a name for a device
743 * @dev: device
744 * @name: name format string
746 * Passed a format string - eg "lt%d" it will try and find a suitable
747 * id. It scans list of devices to build up a free map, then chooses
748 * the first empty slot. The caller must hold the dev_base or rtnl lock
749 * while allocating the name and adding the device in order to avoid
750 * duplicates.
751 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
752 * Returns the number of the unit assigned or a negative errno code.
755 int dev_alloc_name(struct net_device *dev, const char *name)
757 int i = 0;
758 char buf[IFNAMSIZ];
759 const char *p;
760 const int max_netdevices = 8*PAGE_SIZE;
761 long *inuse;
762 struct net_device *d;
763 struct net *net;
765 BUG_ON(!dev->nd_net);
766 net = dev->nd_net;
768 p = strnchr(name, IFNAMSIZ-1, '%');
769 if (p) {
771 * Verify the string as this thing may have come from
772 * the user. There must be either one "%d" and no other "%"
773 * characters.
775 if (p[1] != 'd' || strchr(p + 2, '%'))
776 return -EINVAL;
778 /* Use one page as a bit array of possible slots */
779 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
780 if (!inuse)
781 return -ENOMEM;
783 for_each_netdev(net, d) {
784 if (!sscanf(d->name, name, &i))
785 continue;
786 if (i < 0 || i >= max_netdevices)
787 continue;
789 /* avoid cases where sscanf is not exact inverse of printf */
790 snprintf(buf, sizeof(buf), name, i);
791 if (!strncmp(buf, d->name, IFNAMSIZ))
792 set_bit(i, inuse);
795 i = find_first_zero_bit(inuse, max_netdevices);
796 free_page((unsigned long) inuse);
799 snprintf(buf, sizeof(buf), name, i);
800 if (!__dev_get_by_name(net, buf)) {
801 strlcpy(dev->name, buf, IFNAMSIZ);
802 return i;
805 /* It is possible to run out of possible slots
806 * when the name is long and there isn't enough space left
807 * for the digits, or if all bits are used.
809 return -ENFILE;
814 * dev_change_name - change name of a device
815 * @dev: device
816 * @newname: name (or format string) must be at least IFNAMSIZ
818 * Change name of a device, can pass format strings "eth%d".
819 * for wildcarding.
821 int dev_change_name(struct net_device *dev, char *newname)
823 char oldname[IFNAMSIZ];
824 int err = 0;
825 int ret;
826 struct net *net;
828 ASSERT_RTNL();
829 BUG_ON(!dev->nd_net);
831 net = dev->nd_net;
832 if (dev->flags & IFF_UP)
833 return -EBUSY;
835 if (!dev_valid_name(newname))
836 return -EINVAL;
838 memcpy(oldname, dev->name, IFNAMSIZ);
840 if (strchr(newname, '%')) {
841 err = dev_alloc_name(dev, newname);
842 if (err < 0)
843 return err;
844 strcpy(newname, dev->name);
846 else if (__dev_get_by_name(net, newname))
847 return -EEXIST;
848 else
849 strlcpy(dev->name, newname, IFNAMSIZ);
851 rollback:
852 device_rename(&dev->dev, dev->name);
854 write_lock_bh(&dev_base_lock);
855 hlist_del(&dev->name_hlist);
856 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
857 write_unlock_bh(&dev_base_lock);
859 ret = raw_notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
860 ret = notifier_to_errno(ret);
862 if (ret) {
863 if (err) {
864 printk(KERN_ERR
865 "%s: name change rollback failed: %d.\n",
866 dev->name, ret);
867 } else {
868 err = ret;
869 memcpy(dev->name, oldname, IFNAMSIZ);
870 goto rollback;
874 return err;
878 * netdev_features_change - device changes features
879 * @dev: device to cause notification
881 * Called to indicate a device has changed features.
883 void netdev_features_change(struct net_device *dev)
885 raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
887 EXPORT_SYMBOL(netdev_features_change);
890 * netdev_state_change - device changes state
891 * @dev: device to cause notification
893 * Called to indicate a device has changed state. This function calls
894 * the notifier chains for netdev_chain and sends a NEWLINK message
895 * to the routing socket.
897 void netdev_state_change(struct net_device *dev)
899 if (dev->flags & IFF_UP) {
900 raw_notifier_call_chain(&netdev_chain,
901 NETDEV_CHANGE, dev);
902 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
907 * dev_load - load a network module
908 * @name: name of interface
910 * If a network interface is not present and the process has suitable
911 * privileges this function loads the module. If module loading is not
912 * available in this kernel then it becomes a nop.
915 void dev_load(struct net *net, const char *name)
917 struct net_device *dev;
919 read_lock(&dev_base_lock);
920 dev = __dev_get_by_name(net, name);
921 read_unlock(&dev_base_lock);
923 if (!dev && capable(CAP_SYS_MODULE))
924 request_module("%s", name);
927 static int default_rebuild_header(struct sk_buff *skb)
929 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
930 skb->dev ? skb->dev->name : "NULL!!!");
931 kfree_skb(skb);
932 return 1;
936 * dev_open - prepare an interface for use.
937 * @dev: device to open
939 * Takes a device from down to up state. The device's private open
940 * function is invoked and then the multicast lists are loaded. Finally
941 * the device is moved into the up state and a %NETDEV_UP message is
942 * sent to the netdev notifier chain.
944 * Calling this function on an active interface is a nop. On a failure
945 * a negative errno code is returned.
947 int dev_open(struct net_device *dev)
949 int ret = 0;
952 * Is it already up?
955 if (dev->flags & IFF_UP)
956 return 0;
959 * Is it even present?
961 if (!netif_device_present(dev))
962 return -ENODEV;
965 * Call device private open method
967 set_bit(__LINK_STATE_START, &dev->state);
968 if (dev->open) {
969 ret = dev->open(dev);
970 if (ret)
971 clear_bit(__LINK_STATE_START, &dev->state);
975 * If it went open OK then:
978 if (!ret) {
980 * Set the flags.
982 dev->flags |= IFF_UP;
985 * Initialize multicasting status
987 dev_set_rx_mode(dev);
990 * Wakeup transmit queue engine
992 dev_activate(dev);
995 * ... and announce new interface.
997 raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
999 return ret;
1003 * dev_close - shutdown an interface.
1004 * @dev: device to shutdown
1006 * This function moves an active device into down state. A
1007 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1008 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1009 * chain.
1011 int dev_close(struct net_device *dev)
1013 if (!(dev->flags & IFF_UP))
1014 return 0;
1017 * Tell people we are going down, so that they can
1018 * prepare to death, when device is still operating.
1020 raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
1022 dev_deactivate(dev);
1024 clear_bit(__LINK_STATE_START, &dev->state);
1026 /* Synchronize to scheduled poll. We cannot touch poll list,
1027 * it can be even on different cpu. So just clear netif_running().
1029 * dev->stop() will invoke napi_disable() on all of it's
1030 * napi_struct instances on this device.
1032 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1035 * Call the device specific close. This cannot fail.
1036 * Only if device is UP
1038 * We allow it to be called even after a DETACH hot-plug
1039 * event.
1041 if (dev->stop)
1042 dev->stop(dev);
1045 * Device is now down.
1048 dev->flags &= ~IFF_UP;
1051 * Tell people we are down
1053 raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
1055 return 0;
1059 static int dev_boot_phase = 1;
1062 * Device change register/unregister. These are not inline or static
1063 * as we export them to the world.
1067 * register_netdevice_notifier - register a network notifier block
1068 * @nb: notifier
1070 * Register a notifier to be called when network device events occur.
1071 * The notifier passed is linked into the kernel structures and must
1072 * not be reused until it has been unregistered. A negative errno code
1073 * is returned on a failure.
1075 * When registered all registration and up events are replayed
1076 * to the new notifier to allow device to have a race free
1077 * view of the network device list.
1080 int register_netdevice_notifier(struct notifier_block *nb)
1082 struct net_device *dev;
1083 struct net_device *last;
1084 struct net *net;
1085 int err;
1087 rtnl_lock();
1088 err = raw_notifier_chain_register(&netdev_chain, nb);
1089 if (err)
1090 goto unlock;
1091 if (dev_boot_phase)
1092 goto unlock;
1093 for_each_net(net) {
1094 for_each_netdev(net, dev) {
1095 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1096 err = notifier_to_errno(err);
1097 if (err)
1098 goto rollback;
1100 if (!(dev->flags & IFF_UP))
1101 continue;
1103 nb->notifier_call(nb, NETDEV_UP, dev);
1107 unlock:
1108 rtnl_unlock();
1109 return err;
1111 rollback:
1112 last = dev;
1113 for_each_net(net) {
1114 for_each_netdev(net, dev) {
1115 if (dev == last)
1116 break;
1118 if (dev->flags & IFF_UP) {
1119 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1120 nb->notifier_call(nb, NETDEV_DOWN, dev);
1122 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1125 goto unlock;
1129 * unregister_netdevice_notifier - unregister a network notifier block
1130 * @nb: notifier
1132 * Unregister a notifier previously registered by
1133 * register_netdevice_notifier(). The notifier is unlinked into the
1134 * kernel structures and may then be reused. A negative errno code
1135 * is returned on a failure.
1138 int unregister_netdevice_notifier(struct notifier_block *nb)
1140 int err;
1142 rtnl_lock();
1143 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1144 rtnl_unlock();
1145 return err;
1149 * call_netdevice_notifiers - call all network notifier blocks
1150 * @val: value passed unmodified to notifier function
1151 * @v: pointer passed unmodified to notifier function
1153 * Call all network notifier blocks. Parameters and return value
1154 * are as for raw_notifier_call_chain().
1157 int call_netdevice_notifiers(unsigned long val, void *v)
1159 return raw_notifier_call_chain(&netdev_chain, val, v);
1162 /* When > 0 there are consumers of rx skb time stamps */
1163 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1165 void net_enable_timestamp(void)
1167 atomic_inc(&netstamp_needed);
1170 void net_disable_timestamp(void)
1172 atomic_dec(&netstamp_needed);
1175 static inline void net_timestamp(struct sk_buff *skb)
1177 if (atomic_read(&netstamp_needed))
1178 __net_timestamp(skb);
1179 else
1180 skb->tstamp.tv64 = 0;
1184 * Support routine. Sends outgoing frames to any network
1185 * taps currently in use.
1188 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1190 struct packet_type *ptype;
1192 net_timestamp(skb);
1194 rcu_read_lock();
1195 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1196 /* Never send packets back to the socket
1197 * they originated from - MvS (miquels@drinkel.ow.org)
1199 if ((ptype->dev == dev || !ptype->dev) &&
1200 (ptype->af_packet_priv == NULL ||
1201 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1202 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1203 if (!skb2)
1204 break;
1206 /* skb->nh should be correctly
1207 set by sender, so that the second statement is
1208 just protection against buggy protocols.
1210 skb_reset_mac_header(skb2);
1212 if (skb_network_header(skb2) < skb2->data ||
1213 skb2->network_header > skb2->tail) {
1214 if (net_ratelimit())
1215 printk(KERN_CRIT "protocol %04x is "
1216 "buggy, dev %s\n",
1217 skb2->protocol, dev->name);
1218 skb_reset_network_header(skb2);
1221 skb2->transport_header = skb2->network_header;
1222 skb2->pkt_type = PACKET_OUTGOING;
1223 ptype->func(skb2, skb->dev, ptype, skb->dev);
1226 rcu_read_unlock();
1230 void __netif_schedule(struct net_device *dev)
1232 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1233 unsigned long flags;
1234 struct softnet_data *sd;
1236 local_irq_save(flags);
1237 sd = &__get_cpu_var(softnet_data);
1238 dev->next_sched = sd->output_queue;
1239 sd->output_queue = dev;
1240 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1241 local_irq_restore(flags);
1244 EXPORT_SYMBOL(__netif_schedule);
1246 void dev_kfree_skb_irq(struct sk_buff *skb)
1248 if (atomic_dec_and_test(&skb->users)) {
1249 struct softnet_data *sd;
1250 unsigned long flags;
1252 local_irq_save(flags);
1253 sd = &__get_cpu_var(softnet_data);
1254 skb->next = sd->completion_queue;
1255 sd->completion_queue = skb;
1256 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1257 local_irq_restore(flags);
1260 EXPORT_SYMBOL(dev_kfree_skb_irq);
1262 void dev_kfree_skb_any(struct sk_buff *skb)
1264 if (in_irq() || irqs_disabled())
1265 dev_kfree_skb_irq(skb);
1266 else
1267 dev_kfree_skb(skb);
1269 EXPORT_SYMBOL(dev_kfree_skb_any);
1273 * netif_device_detach - mark device as removed
1274 * @dev: network device
1276 * Mark device as removed from system and therefore no longer available.
1278 void netif_device_detach(struct net_device *dev)
1280 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1281 netif_running(dev)) {
1282 netif_stop_queue(dev);
1285 EXPORT_SYMBOL(netif_device_detach);
1288 * netif_device_attach - mark device as attached
1289 * @dev: network device
1291 * Mark device as attached from system and restart if needed.
1293 void netif_device_attach(struct net_device *dev)
1295 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1296 netif_running(dev)) {
1297 netif_wake_queue(dev);
1298 __netdev_watchdog_up(dev);
1301 EXPORT_SYMBOL(netif_device_attach);
1305 * Invalidate hardware checksum when packet is to be mangled, and
1306 * complete checksum manually on outgoing path.
1308 int skb_checksum_help(struct sk_buff *skb)
1310 __wsum csum;
1311 int ret = 0, offset;
1313 if (skb->ip_summed == CHECKSUM_COMPLETE)
1314 goto out_set_summed;
1316 if (unlikely(skb_shinfo(skb)->gso_size)) {
1317 /* Let GSO fix up the checksum. */
1318 goto out_set_summed;
1321 if (skb_cloned(skb)) {
1322 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1323 if (ret)
1324 goto out;
1327 offset = skb->csum_start - skb_headroom(skb);
1328 BUG_ON(offset > (int)skb->len);
1329 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1331 offset = skb_headlen(skb) - offset;
1332 BUG_ON(offset <= 0);
1333 BUG_ON(skb->csum_offset + 2 > offset);
1335 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) =
1336 csum_fold(csum);
1337 out_set_summed:
1338 skb->ip_summed = CHECKSUM_NONE;
1339 out:
1340 return ret;
1344 * skb_gso_segment - Perform segmentation on skb.
1345 * @skb: buffer to segment
1346 * @features: features for the output path (see dev->features)
1348 * This function segments the given skb and returns a list of segments.
1350 * It may return NULL if the skb requires no segmentation. This is
1351 * only possible when GSO is used for verifying header integrity.
1353 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1355 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1356 struct packet_type *ptype;
1357 __be16 type = skb->protocol;
1358 int err;
1360 BUG_ON(skb_shinfo(skb)->frag_list);
1362 skb_reset_mac_header(skb);
1363 skb->mac_len = skb->network_header - skb->mac_header;
1364 __skb_pull(skb, skb->mac_len);
1366 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1367 if (skb_header_cloned(skb) &&
1368 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1369 return ERR_PTR(err);
1372 rcu_read_lock();
1373 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1374 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1375 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1376 err = ptype->gso_send_check(skb);
1377 segs = ERR_PTR(err);
1378 if (err || skb_gso_ok(skb, features))
1379 break;
1380 __skb_push(skb, (skb->data -
1381 skb_network_header(skb)));
1383 segs = ptype->gso_segment(skb, features);
1384 break;
1387 rcu_read_unlock();
1389 __skb_push(skb, skb->data - skb_mac_header(skb));
1391 return segs;
1394 EXPORT_SYMBOL(skb_gso_segment);
1396 /* Take action when hardware reception checksum errors are detected. */
1397 #ifdef CONFIG_BUG
1398 void netdev_rx_csum_fault(struct net_device *dev)
1400 if (net_ratelimit()) {
1401 printk(KERN_ERR "%s: hw csum failure.\n",
1402 dev ? dev->name : "<unknown>");
1403 dump_stack();
1406 EXPORT_SYMBOL(netdev_rx_csum_fault);
1407 #endif
1409 /* Actually, we should eliminate this check as soon as we know, that:
1410 * 1. IOMMU is present and allows to map all the memory.
1411 * 2. No high memory really exists on this machine.
1414 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1416 #ifdef CONFIG_HIGHMEM
1417 int i;
1419 if (dev->features & NETIF_F_HIGHDMA)
1420 return 0;
1422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1423 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1424 return 1;
1426 #endif
1427 return 0;
1430 struct dev_gso_cb {
1431 void (*destructor)(struct sk_buff *skb);
1434 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1436 static void dev_gso_skb_destructor(struct sk_buff *skb)
1438 struct dev_gso_cb *cb;
1440 do {
1441 struct sk_buff *nskb = skb->next;
1443 skb->next = nskb->next;
1444 nskb->next = NULL;
1445 kfree_skb(nskb);
1446 } while (skb->next);
1448 cb = DEV_GSO_CB(skb);
1449 if (cb->destructor)
1450 cb->destructor(skb);
1454 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1455 * @skb: buffer to segment
1457 * This function segments the given skb and stores the list of segments
1458 * in skb->next.
1460 static int dev_gso_segment(struct sk_buff *skb)
1462 struct net_device *dev = skb->dev;
1463 struct sk_buff *segs;
1464 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1465 NETIF_F_SG : 0);
1467 segs = skb_gso_segment(skb, features);
1469 /* Verifying header integrity only. */
1470 if (!segs)
1471 return 0;
1473 if (unlikely(IS_ERR(segs)))
1474 return PTR_ERR(segs);
1476 skb->next = segs;
1477 DEV_GSO_CB(skb)->destructor = skb->destructor;
1478 skb->destructor = dev_gso_skb_destructor;
1480 return 0;
1483 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1485 if (likely(!skb->next)) {
1486 if (!list_empty(&ptype_all))
1487 dev_queue_xmit_nit(skb, dev);
1489 if (netif_needs_gso(dev, skb)) {
1490 if (unlikely(dev_gso_segment(skb)))
1491 goto out_kfree_skb;
1492 if (skb->next)
1493 goto gso;
1496 return dev->hard_start_xmit(skb, dev);
1499 gso:
1500 do {
1501 struct sk_buff *nskb = skb->next;
1502 int rc;
1504 skb->next = nskb->next;
1505 nskb->next = NULL;
1506 rc = dev->hard_start_xmit(nskb, dev);
1507 if (unlikely(rc)) {
1508 nskb->next = skb->next;
1509 skb->next = nskb;
1510 return rc;
1512 if (unlikely((netif_queue_stopped(dev) ||
1513 netif_subqueue_stopped(dev, skb->queue_mapping)) &&
1514 skb->next))
1515 return NETDEV_TX_BUSY;
1516 } while (skb->next);
1518 skb->destructor = DEV_GSO_CB(skb)->destructor;
1520 out_kfree_skb:
1521 kfree_skb(skb);
1522 return 0;
1525 #define HARD_TX_LOCK(dev, cpu) { \
1526 if ((dev->features & NETIF_F_LLTX) == 0) { \
1527 netif_tx_lock(dev); \
1531 #define HARD_TX_UNLOCK(dev) { \
1532 if ((dev->features & NETIF_F_LLTX) == 0) { \
1533 netif_tx_unlock(dev); \
1538 * dev_queue_xmit - transmit a buffer
1539 * @skb: buffer to transmit
1541 * Queue a buffer for transmission to a network device. The caller must
1542 * have set the device and priority and built the buffer before calling
1543 * this function. The function can be called from an interrupt.
1545 * A negative errno code is returned on a failure. A success does not
1546 * guarantee the frame will be transmitted as it may be dropped due
1547 * to congestion or traffic shaping.
1549 * -----------------------------------------------------------------------------------
1550 * I notice this method can also return errors from the queue disciplines,
1551 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1552 * be positive.
1554 * Regardless of the return value, the skb is consumed, so it is currently
1555 * difficult to retry a send to this method. (You can bump the ref count
1556 * before sending to hold a reference for retry if you are careful.)
1558 * When calling this method, interrupts MUST be enabled. This is because
1559 * the BH enable code must have IRQs enabled so that it will not deadlock.
1560 * --BLG
1563 int dev_queue_xmit(struct sk_buff *skb)
1565 struct net_device *dev = skb->dev;
1566 struct Qdisc *q;
1567 int rc = -ENOMEM;
1569 /* GSO will handle the following emulations directly. */
1570 if (netif_needs_gso(dev, skb))
1571 goto gso;
1573 if (skb_shinfo(skb)->frag_list &&
1574 !(dev->features & NETIF_F_FRAGLIST) &&
1575 __skb_linearize(skb))
1576 goto out_kfree_skb;
1578 /* Fragmented skb is linearized if device does not support SG,
1579 * or if at least one of fragments is in highmem and device
1580 * does not support DMA from it.
1582 if (skb_shinfo(skb)->nr_frags &&
1583 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1584 __skb_linearize(skb))
1585 goto out_kfree_skb;
1587 /* If packet is not checksummed and device does not support
1588 * checksumming for this protocol, complete checksumming here.
1590 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1591 skb_set_transport_header(skb, skb->csum_start -
1592 skb_headroom(skb));
1594 if (!(dev->features & NETIF_F_GEN_CSUM) &&
1595 !((dev->features & NETIF_F_IP_CSUM) &&
1596 skb->protocol == htons(ETH_P_IP)) &&
1597 !((dev->features & NETIF_F_IPV6_CSUM) &&
1598 skb->protocol == htons(ETH_P_IPV6)))
1599 if (skb_checksum_help(skb))
1600 goto out_kfree_skb;
1603 gso:
1604 spin_lock_prefetch(&dev->queue_lock);
1606 /* Disable soft irqs for various locks below. Also
1607 * stops preemption for RCU.
1609 rcu_read_lock_bh();
1611 /* Updates of qdisc are serialized by queue_lock.
1612 * The struct Qdisc which is pointed to by qdisc is now a
1613 * rcu structure - it may be accessed without acquiring
1614 * a lock (but the structure may be stale.) The freeing of the
1615 * qdisc will be deferred until it's known that there are no
1616 * more references to it.
1618 * If the qdisc has an enqueue function, we still need to
1619 * hold the queue_lock before calling it, since queue_lock
1620 * also serializes access to the device queue.
1623 q = rcu_dereference(dev->qdisc);
1624 #ifdef CONFIG_NET_CLS_ACT
1625 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1626 #endif
1627 if (q->enqueue) {
1628 /* Grab device queue */
1629 spin_lock(&dev->queue_lock);
1630 q = dev->qdisc;
1631 if (q->enqueue) {
1632 /* reset queue_mapping to zero */
1633 skb->queue_mapping = 0;
1634 rc = q->enqueue(skb, q);
1635 qdisc_run(dev);
1636 spin_unlock(&dev->queue_lock);
1638 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1639 goto out;
1641 spin_unlock(&dev->queue_lock);
1644 /* The device has no queue. Common case for software devices:
1645 loopback, all the sorts of tunnels...
1647 Really, it is unlikely that netif_tx_lock protection is necessary
1648 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1649 counters.)
1650 However, it is possible, that they rely on protection
1651 made by us here.
1653 Check this and shot the lock. It is not prone from deadlocks.
1654 Either shot noqueue qdisc, it is even simpler 8)
1656 if (dev->flags & IFF_UP) {
1657 int cpu = smp_processor_id(); /* ok because BHs are off */
1659 if (dev->xmit_lock_owner != cpu) {
1661 HARD_TX_LOCK(dev, cpu);
1663 if (!netif_queue_stopped(dev) &&
1664 !netif_subqueue_stopped(dev, skb->queue_mapping)) {
1665 rc = 0;
1666 if (!dev_hard_start_xmit(skb, dev)) {
1667 HARD_TX_UNLOCK(dev);
1668 goto out;
1671 HARD_TX_UNLOCK(dev);
1672 if (net_ratelimit())
1673 printk(KERN_CRIT "Virtual device %s asks to "
1674 "queue packet!\n", dev->name);
1675 } else {
1676 /* Recursion is detected! It is possible,
1677 * unfortunately */
1678 if (net_ratelimit())
1679 printk(KERN_CRIT "Dead loop on virtual device "
1680 "%s, fix it urgently!\n", dev->name);
1684 rc = -ENETDOWN;
1685 rcu_read_unlock_bh();
1687 out_kfree_skb:
1688 kfree_skb(skb);
1689 return rc;
1690 out:
1691 rcu_read_unlock_bh();
1692 return rc;
1696 /*=======================================================================
1697 Receiver routines
1698 =======================================================================*/
1700 int netdev_max_backlog __read_mostly = 1000;
1701 int netdev_budget __read_mostly = 300;
1702 int weight_p __read_mostly = 64; /* old backlog weight */
1704 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1708 * netif_rx - post buffer to the network code
1709 * @skb: buffer to post
1711 * This function receives a packet from a device driver and queues it for
1712 * the upper (protocol) levels to process. It always succeeds. The buffer
1713 * may be dropped during processing for congestion control or by the
1714 * protocol layers.
1716 * return values:
1717 * NET_RX_SUCCESS (no congestion)
1718 * NET_RX_CN_LOW (low congestion)
1719 * NET_RX_CN_MOD (moderate congestion)
1720 * NET_RX_CN_HIGH (high congestion)
1721 * NET_RX_DROP (packet was dropped)
1725 int netif_rx(struct sk_buff *skb)
1727 struct softnet_data *queue;
1728 unsigned long flags;
1730 /* if netpoll wants it, pretend we never saw it */
1731 if (netpoll_rx(skb))
1732 return NET_RX_DROP;
1734 if (!skb->tstamp.tv64)
1735 net_timestamp(skb);
1738 * The code is rearranged so that the path is the most
1739 * short when CPU is congested, but is still operating.
1741 local_irq_save(flags);
1742 queue = &__get_cpu_var(softnet_data);
1744 __get_cpu_var(netdev_rx_stat).total++;
1745 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1746 if (queue->input_pkt_queue.qlen) {
1747 enqueue:
1748 dev_hold(skb->dev);
1749 __skb_queue_tail(&queue->input_pkt_queue, skb);
1750 local_irq_restore(flags);
1751 return NET_RX_SUCCESS;
1754 napi_schedule(&queue->backlog);
1755 goto enqueue;
1758 __get_cpu_var(netdev_rx_stat).dropped++;
1759 local_irq_restore(flags);
1761 kfree_skb(skb);
1762 return NET_RX_DROP;
1765 int netif_rx_ni(struct sk_buff *skb)
1767 int err;
1769 preempt_disable();
1770 err = netif_rx(skb);
1771 if (local_softirq_pending())
1772 do_softirq();
1773 preempt_enable();
1775 return err;
1778 EXPORT_SYMBOL(netif_rx_ni);
1780 static inline struct net_device *skb_bond(struct sk_buff *skb)
1782 struct net_device *dev = skb->dev;
1784 if (dev->master) {
1785 if (skb_bond_should_drop(skb)) {
1786 kfree_skb(skb);
1787 return NULL;
1789 skb->dev = dev->master;
1792 return dev;
1796 static void net_tx_action(struct softirq_action *h)
1798 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1800 if (sd->completion_queue) {
1801 struct sk_buff *clist;
1803 local_irq_disable();
1804 clist = sd->completion_queue;
1805 sd->completion_queue = NULL;
1806 local_irq_enable();
1808 while (clist) {
1809 struct sk_buff *skb = clist;
1810 clist = clist->next;
1812 BUG_TRAP(!atomic_read(&skb->users));
1813 __kfree_skb(skb);
1817 if (sd->output_queue) {
1818 struct net_device *head;
1820 local_irq_disable();
1821 head = sd->output_queue;
1822 sd->output_queue = NULL;
1823 local_irq_enable();
1825 while (head) {
1826 struct net_device *dev = head;
1827 head = head->next_sched;
1829 smp_mb__before_clear_bit();
1830 clear_bit(__LINK_STATE_SCHED, &dev->state);
1832 if (spin_trylock(&dev->queue_lock)) {
1833 qdisc_run(dev);
1834 spin_unlock(&dev->queue_lock);
1835 } else {
1836 netif_schedule(dev);
1842 static inline int deliver_skb(struct sk_buff *skb,
1843 struct packet_type *pt_prev,
1844 struct net_device *orig_dev)
1846 atomic_inc(&skb->users);
1847 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1850 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1851 /* These hooks defined here for ATM */
1852 struct net_bridge;
1853 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1854 unsigned char *addr);
1855 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
1858 * If bridge module is loaded call bridging hook.
1859 * returns NULL if packet was consumed.
1861 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
1862 struct sk_buff *skb) __read_mostly;
1863 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1864 struct packet_type **pt_prev, int *ret,
1865 struct net_device *orig_dev)
1867 struct net_bridge_port *port;
1869 if (skb->pkt_type == PACKET_LOOPBACK ||
1870 (port = rcu_dereference(skb->dev->br_port)) == NULL)
1871 return skb;
1873 if (*pt_prev) {
1874 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1875 *pt_prev = NULL;
1878 return br_handle_frame_hook(port, skb);
1880 #else
1881 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
1882 #endif
1884 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1885 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1886 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1888 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1889 struct packet_type **pt_prev,
1890 int *ret,
1891 struct net_device *orig_dev)
1893 if (skb->dev->macvlan_port == NULL)
1894 return skb;
1896 if (*pt_prev) {
1897 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1898 *pt_prev = NULL;
1900 return macvlan_handle_frame_hook(skb);
1902 #else
1903 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
1904 #endif
1906 #ifdef CONFIG_NET_CLS_ACT
1907 /* TODO: Maybe we should just force sch_ingress to be compiled in
1908 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1909 * a compare and 2 stores extra right now if we dont have it on
1910 * but have CONFIG_NET_CLS_ACT
1911 * NOTE: This doesnt stop any functionality; if you dont have
1912 * the ingress scheduler, you just cant add policies on ingress.
1915 static int ing_filter(struct sk_buff *skb)
1917 struct Qdisc *q;
1918 struct net_device *dev = skb->dev;
1919 int result = TC_ACT_OK;
1921 if (dev->qdisc_ingress) {
1922 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1923 if (MAX_RED_LOOP < ttl++) {
1924 printk(KERN_WARNING "Redir loop detected Dropping packet (%d->%d)\n",
1925 skb->iif, skb->dev->ifindex);
1926 return TC_ACT_SHOT;
1929 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1931 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1933 spin_lock(&dev->ingress_lock);
1934 if ((q = dev->qdisc_ingress) != NULL)
1935 result = q->enqueue(skb, q);
1936 spin_unlock(&dev->ingress_lock);
1940 return result;
1942 #endif
1944 int netif_receive_skb(struct sk_buff *skb)
1946 struct packet_type *ptype, *pt_prev;
1947 struct net_device *orig_dev;
1948 int ret = NET_RX_DROP;
1949 __be16 type;
1951 /* if we've gotten here through NAPI, check netpoll */
1952 if (netpoll_receive_skb(skb))
1953 return NET_RX_DROP;
1955 if (!skb->tstamp.tv64)
1956 net_timestamp(skb);
1958 if (!skb->iif)
1959 skb->iif = skb->dev->ifindex;
1961 orig_dev = skb_bond(skb);
1963 if (!orig_dev)
1964 return NET_RX_DROP;
1966 __get_cpu_var(netdev_rx_stat).total++;
1968 skb_reset_network_header(skb);
1969 skb_reset_transport_header(skb);
1970 skb->mac_len = skb->network_header - skb->mac_header;
1972 pt_prev = NULL;
1974 rcu_read_lock();
1976 #ifdef CONFIG_NET_CLS_ACT
1977 if (skb->tc_verd & TC_NCLS) {
1978 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1979 goto ncls;
1981 #endif
1983 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1984 if (!ptype->dev || ptype->dev == skb->dev) {
1985 if (pt_prev)
1986 ret = deliver_skb(skb, pt_prev, orig_dev);
1987 pt_prev = ptype;
1991 #ifdef CONFIG_NET_CLS_ACT
1992 if (pt_prev) {
1993 ret = deliver_skb(skb, pt_prev, orig_dev);
1994 pt_prev = NULL; /* noone else should process this after*/
1995 } else {
1996 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1999 ret = ing_filter(skb);
2001 if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
2002 kfree_skb(skb);
2003 goto out;
2006 skb->tc_verd = 0;
2007 ncls:
2008 #endif
2010 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2011 if (!skb)
2012 goto out;
2013 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2014 if (!skb)
2015 goto out;
2017 type = skb->protocol;
2018 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
2019 if (ptype->type == type &&
2020 (!ptype->dev || ptype->dev == skb->dev)) {
2021 if (pt_prev)
2022 ret = deliver_skb(skb, pt_prev, orig_dev);
2023 pt_prev = ptype;
2027 if (pt_prev) {
2028 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2029 } else {
2030 kfree_skb(skb);
2031 /* Jamal, now you will not able to escape explaining
2032 * me how you were going to use this. :-)
2034 ret = NET_RX_DROP;
2037 out:
2038 rcu_read_unlock();
2039 return ret;
2042 static int process_backlog(struct napi_struct *napi, int quota)
2044 int work = 0;
2045 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2046 unsigned long start_time = jiffies;
2048 napi->weight = weight_p;
2049 do {
2050 struct sk_buff *skb;
2051 struct net_device *dev;
2053 local_irq_disable();
2054 skb = __skb_dequeue(&queue->input_pkt_queue);
2055 if (!skb) {
2056 __napi_complete(napi);
2057 local_irq_enable();
2058 break;
2061 local_irq_enable();
2063 dev = skb->dev;
2065 netif_receive_skb(skb);
2067 dev_put(dev);
2068 } while (++work < quota && jiffies == start_time);
2070 return work;
2074 * __napi_schedule - schedule for receive
2075 * @napi: entry to schedule
2077 * The entry's receive function will be scheduled to run
2079 void fastcall __napi_schedule(struct napi_struct *n)
2081 unsigned long flags;
2083 local_irq_save(flags);
2084 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2085 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2086 local_irq_restore(flags);
2088 EXPORT_SYMBOL(__napi_schedule);
2091 static void net_rx_action(struct softirq_action *h)
2093 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2094 unsigned long start_time = jiffies;
2095 int budget = netdev_budget;
2096 void *have;
2098 local_irq_disable();
2100 while (!list_empty(list)) {
2101 struct napi_struct *n;
2102 int work, weight;
2104 /* If softirq window is exhuasted then punt.
2106 * Note that this is a slight policy change from the
2107 * previous NAPI code, which would allow up to 2
2108 * jiffies to pass before breaking out. The test
2109 * used to be "jiffies - start_time > 1".
2111 if (unlikely(budget <= 0 || jiffies != start_time))
2112 goto softnet_break;
2114 local_irq_enable();
2116 /* Even though interrupts have been re-enabled, this
2117 * access is safe because interrupts can only add new
2118 * entries to the tail of this list, and only ->poll()
2119 * calls can remove this head entry from the list.
2121 n = list_entry(list->next, struct napi_struct, poll_list);
2123 have = netpoll_poll_lock(n);
2125 weight = n->weight;
2127 work = n->poll(n, weight);
2129 WARN_ON_ONCE(work > weight);
2131 budget -= work;
2133 local_irq_disable();
2135 /* Drivers must not modify the NAPI state if they
2136 * consume the entire weight. In such cases this code
2137 * still "owns" the NAPI instance and therefore can
2138 * move the instance around on the list at-will.
2140 if (unlikely(work == weight))
2141 list_move_tail(&n->poll_list, list);
2143 netpoll_poll_unlock(have);
2145 out:
2146 local_irq_enable();
2148 #ifdef CONFIG_NET_DMA
2150 * There may not be any more sk_buffs coming right now, so push
2151 * any pending DMA copies to hardware
2153 if (!cpus_empty(net_dma.channel_mask)) {
2154 int chan_idx;
2155 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2156 struct dma_chan *chan = net_dma.channels[chan_idx];
2157 if (chan)
2158 dma_async_memcpy_issue_pending(chan);
2161 #endif
2163 return;
2165 softnet_break:
2166 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2167 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2168 goto out;
2171 static gifconf_func_t * gifconf_list [NPROTO];
2174 * register_gifconf - register a SIOCGIF handler
2175 * @family: Address family
2176 * @gifconf: Function handler
2178 * Register protocol dependent address dumping routines. The handler
2179 * that is passed must not be freed or reused until it has been replaced
2180 * by another handler.
2182 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2184 if (family >= NPROTO)
2185 return -EINVAL;
2186 gifconf_list[family] = gifconf;
2187 return 0;
2192 * Map an interface index to its name (SIOCGIFNAME)
2196 * We need this ioctl for efficient implementation of the
2197 * if_indextoname() function required by the IPv6 API. Without
2198 * it, we would have to search all the interfaces to find a
2199 * match. --pb
2202 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2204 struct net_device *dev;
2205 struct ifreq ifr;
2208 * Fetch the caller's info block.
2211 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2212 return -EFAULT;
2214 read_lock(&dev_base_lock);
2215 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2216 if (!dev) {
2217 read_unlock(&dev_base_lock);
2218 return -ENODEV;
2221 strcpy(ifr.ifr_name, dev->name);
2222 read_unlock(&dev_base_lock);
2224 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2225 return -EFAULT;
2226 return 0;
2230 * Perform a SIOCGIFCONF call. This structure will change
2231 * size eventually, and there is nothing I can do about it.
2232 * Thus we will need a 'compatibility mode'.
2235 static int dev_ifconf(struct net *net, char __user *arg)
2237 struct ifconf ifc;
2238 struct net_device *dev;
2239 char __user *pos;
2240 int len;
2241 int total;
2242 int i;
2245 * Fetch the caller's info block.
2248 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2249 return -EFAULT;
2251 pos = ifc.ifc_buf;
2252 len = ifc.ifc_len;
2255 * Loop over the interfaces, and write an info block for each.
2258 total = 0;
2259 for_each_netdev(net, dev) {
2260 for (i = 0; i < NPROTO; i++) {
2261 if (gifconf_list[i]) {
2262 int done;
2263 if (!pos)
2264 done = gifconf_list[i](dev, NULL, 0);
2265 else
2266 done = gifconf_list[i](dev, pos + total,
2267 len - total);
2268 if (done < 0)
2269 return -EFAULT;
2270 total += done;
2276 * All done. Write the updated control block back to the caller.
2278 ifc.ifc_len = total;
2281 * Both BSD and Solaris return 0 here, so we do too.
2283 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2286 #ifdef CONFIG_PROC_FS
2288 * This is invoked by the /proc filesystem handler to display a device
2289 * in detail.
2291 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2293 struct net *net = seq->private;
2294 loff_t off;
2295 struct net_device *dev;
2297 read_lock(&dev_base_lock);
2298 if (!*pos)
2299 return SEQ_START_TOKEN;
2301 off = 1;
2302 for_each_netdev(net, dev)
2303 if (off++ == *pos)
2304 return dev;
2306 return NULL;
2309 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2311 struct net *net = seq->private;
2312 ++*pos;
2313 return v == SEQ_START_TOKEN ?
2314 first_net_device(net) : next_net_device((struct net_device *)v);
2317 void dev_seq_stop(struct seq_file *seq, void *v)
2319 read_unlock(&dev_base_lock);
2322 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2324 struct net_device_stats *stats = dev->get_stats(dev);
2326 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2327 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2328 dev->name, stats->rx_bytes, stats->rx_packets,
2329 stats->rx_errors,
2330 stats->rx_dropped + stats->rx_missed_errors,
2331 stats->rx_fifo_errors,
2332 stats->rx_length_errors + stats->rx_over_errors +
2333 stats->rx_crc_errors + stats->rx_frame_errors,
2334 stats->rx_compressed, stats->multicast,
2335 stats->tx_bytes, stats->tx_packets,
2336 stats->tx_errors, stats->tx_dropped,
2337 stats->tx_fifo_errors, stats->collisions,
2338 stats->tx_carrier_errors +
2339 stats->tx_aborted_errors +
2340 stats->tx_window_errors +
2341 stats->tx_heartbeat_errors,
2342 stats->tx_compressed);
2346 * Called from the PROCfs module. This now uses the new arbitrary sized
2347 * /proc/net interface to create /proc/net/dev
2349 static int dev_seq_show(struct seq_file *seq, void *v)
2351 if (v == SEQ_START_TOKEN)
2352 seq_puts(seq, "Inter-| Receive "
2353 " | Transmit\n"
2354 " face |bytes packets errs drop fifo frame "
2355 "compressed multicast|bytes packets errs "
2356 "drop fifo colls carrier compressed\n");
2357 else
2358 dev_seq_printf_stats(seq, v);
2359 return 0;
2362 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2364 struct netif_rx_stats *rc = NULL;
2366 while (*pos < NR_CPUS)
2367 if (cpu_online(*pos)) {
2368 rc = &per_cpu(netdev_rx_stat, *pos);
2369 break;
2370 } else
2371 ++*pos;
2372 return rc;
2375 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2377 return softnet_get_online(pos);
2380 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2382 ++*pos;
2383 return softnet_get_online(pos);
2386 static void softnet_seq_stop(struct seq_file *seq, void *v)
2390 static int softnet_seq_show(struct seq_file *seq, void *v)
2392 struct netif_rx_stats *s = v;
2394 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2395 s->total, s->dropped, s->time_squeeze, 0,
2396 0, 0, 0, 0, /* was fastroute */
2397 s->cpu_collision );
2398 return 0;
2401 static const struct seq_operations dev_seq_ops = {
2402 .start = dev_seq_start,
2403 .next = dev_seq_next,
2404 .stop = dev_seq_stop,
2405 .show = dev_seq_show,
2408 static int dev_seq_open(struct inode *inode, struct file *file)
2410 struct seq_file *seq;
2411 int res;
2412 res = seq_open(file, &dev_seq_ops);
2413 if (!res) {
2414 seq = file->private_data;
2415 seq->private = get_net(PROC_NET(inode));
2417 return res;
2420 static int dev_seq_release(struct inode *inode, struct file *file)
2422 struct seq_file *seq = file->private_data;
2423 struct net *net = seq->private;
2424 put_net(net);
2425 return seq_release(inode, file);
2428 static const struct file_operations dev_seq_fops = {
2429 .owner = THIS_MODULE,
2430 .open = dev_seq_open,
2431 .read = seq_read,
2432 .llseek = seq_lseek,
2433 .release = dev_seq_release,
2436 static const struct seq_operations softnet_seq_ops = {
2437 .start = softnet_seq_start,
2438 .next = softnet_seq_next,
2439 .stop = softnet_seq_stop,
2440 .show = softnet_seq_show,
2443 static int softnet_seq_open(struct inode *inode, struct file *file)
2445 return seq_open(file, &softnet_seq_ops);
2448 static const struct file_operations softnet_seq_fops = {
2449 .owner = THIS_MODULE,
2450 .open = softnet_seq_open,
2451 .read = seq_read,
2452 .llseek = seq_lseek,
2453 .release = seq_release,
2456 static void *ptype_get_idx(loff_t pos)
2458 struct packet_type *pt = NULL;
2459 loff_t i = 0;
2460 int t;
2462 list_for_each_entry_rcu(pt, &ptype_all, list) {
2463 if (i == pos)
2464 return pt;
2465 ++i;
2468 for (t = 0; t < 16; t++) {
2469 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2470 if (i == pos)
2471 return pt;
2472 ++i;
2475 return NULL;
2478 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2480 rcu_read_lock();
2481 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2484 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2486 struct packet_type *pt;
2487 struct list_head *nxt;
2488 int hash;
2490 ++*pos;
2491 if (v == SEQ_START_TOKEN)
2492 return ptype_get_idx(0);
2494 pt = v;
2495 nxt = pt->list.next;
2496 if (pt->type == htons(ETH_P_ALL)) {
2497 if (nxt != &ptype_all)
2498 goto found;
2499 hash = 0;
2500 nxt = ptype_base[0].next;
2501 } else
2502 hash = ntohs(pt->type) & 15;
2504 while (nxt == &ptype_base[hash]) {
2505 if (++hash >= 16)
2506 return NULL;
2507 nxt = ptype_base[hash].next;
2509 found:
2510 return list_entry(nxt, struct packet_type, list);
2513 static void ptype_seq_stop(struct seq_file *seq, void *v)
2515 rcu_read_unlock();
2518 static void ptype_seq_decode(struct seq_file *seq, void *sym)
2520 #ifdef CONFIG_KALLSYMS
2521 unsigned long offset = 0, symsize;
2522 const char *symname;
2523 char *modname;
2524 char namebuf[128];
2526 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2527 &modname, namebuf);
2529 if (symname) {
2530 char *delim = ":";
2532 if (!modname)
2533 modname = delim = "";
2534 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2535 symname, offset);
2536 return;
2538 #endif
2540 seq_printf(seq, "[%p]", sym);
2543 static int ptype_seq_show(struct seq_file *seq, void *v)
2545 struct packet_type *pt = v;
2547 if (v == SEQ_START_TOKEN)
2548 seq_puts(seq, "Type Device Function\n");
2549 else {
2550 if (pt->type == htons(ETH_P_ALL))
2551 seq_puts(seq, "ALL ");
2552 else
2553 seq_printf(seq, "%04x", ntohs(pt->type));
2555 seq_printf(seq, " %-8s ",
2556 pt->dev ? pt->dev->name : "");
2557 ptype_seq_decode(seq, pt->func);
2558 seq_putc(seq, '\n');
2561 return 0;
2564 static const struct seq_operations ptype_seq_ops = {
2565 .start = ptype_seq_start,
2566 .next = ptype_seq_next,
2567 .stop = ptype_seq_stop,
2568 .show = ptype_seq_show,
2571 static int ptype_seq_open(struct inode *inode, struct file *file)
2573 return seq_open(file, &ptype_seq_ops);
2576 static const struct file_operations ptype_seq_fops = {
2577 .owner = THIS_MODULE,
2578 .open = ptype_seq_open,
2579 .read = seq_read,
2580 .llseek = seq_lseek,
2581 .release = seq_release,
2585 static int dev_proc_net_init(struct net *net)
2587 int rc = -ENOMEM;
2589 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2590 goto out;
2591 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2592 goto out_dev;
2593 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2594 goto out_softnet;
2596 if (wext_proc_init(net))
2597 goto out_ptype;
2598 rc = 0;
2599 out:
2600 return rc;
2601 out_ptype:
2602 proc_net_remove(net, "ptype");
2603 out_softnet:
2604 proc_net_remove(net, "softnet_stat");
2605 out_dev:
2606 proc_net_remove(net, "dev");
2607 goto out;
2610 static void dev_proc_net_exit(struct net *net)
2612 wext_proc_exit(net);
2614 proc_net_remove(net, "ptype");
2615 proc_net_remove(net, "softnet_stat");
2616 proc_net_remove(net, "dev");
2619 static struct pernet_operations dev_proc_ops = {
2620 .init = dev_proc_net_init,
2621 .exit = dev_proc_net_exit,
2624 static int __init dev_proc_init(void)
2626 return register_pernet_subsys(&dev_proc_ops);
2628 #else
2629 #define dev_proc_init() 0
2630 #endif /* CONFIG_PROC_FS */
2634 * netdev_set_master - set up master/slave pair
2635 * @slave: slave device
2636 * @master: new master device
2638 * Changes the master device of the slave. Pass %NULL to break the
2639 * bonding. The caller must hold the RTNL semaphore. On a failure
2640 * a negative errno code is returned. On success the reference counts
2641 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2642 * function returns zero.
2644 int netdev_set_master(struct net_device *slave, struct net_device *master)
2646 struct net_device *old = slave->master;
2648 ASSERT_RTNL();
2650 if (master) {
2651 if (old)
2652 return -EBUSY;
2653 dev_hold(master);
2656 slave->master = master;
2658 synchronize_net();
2660 if (old)
2661 dev_put(old);
2663 if (master)
2664 slave->flags |= IFF_SLAVE;
2665 else
2666 slave->flags &= ~IFF_SLAVE;
2668 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2669 return 0;
2672 static void __dev_set_promiscuity(struct net_device *dev, int inc)
2674 unsigned short old_flags = dev->flags;
2676 ASSERT_RTNL();
2678 if ((dev->promiscuity += inc) == 0)
2679 dev->flags &= ~IFF_PROMISC;
2680 else
2681 dev->flags |= IFF_PROMISC;
2682 if (dev->flags != old_flags) {
2683 printk(KERN_INFO "device %s %s promiscuous mode\n",
2684 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2685 "left");
2686 audit_log(current->audit_context, GFP_ATOMIC,
2687 AUDIT_ANOM_PROMISCUOUS,
2688 "dev=%s prom=%d old_prom=%d auid=%u",
2689 dev->name, (dev->flags & IFF_PROMISC),
2690 (old_flags & IFF_PROMISC),
2691 audit_get_loginuid(current->audit_context));
2693 if (dev->change_rx_flags)
2694 dev->change_rx_flags(dev, IFF_PROMISC);
2699 * dev_set_promiscuity - update promiscuity count on a device
2700 * @dev: device
2701 * @inc: modifier
2703 * Add or remove promiscuity from a device. While the count in the device
2704 * remains above zero the interface remains promiscuous. Once it hits zero
2705 * the device reverts back to normal filtering operation. A negative inc
2706 * value is used to drop promiscuity on the device.
2708 void dev_set_promiscuity(struct net_device *dev, int inc)
2710 unsigned short old_flags = dev->flags;
2712 __dev_set_promiscuity(dev, inc);
2713 if (dev->flags != old_flags)
2714 dev_set_rx_mode(dev);
2718 * dev_set_allmulti - update allmulti count on a device
2719 * @dev: device
2720 * @inc: modifier
2722 * Add or remove reception of all multicast frames to a device. While the
2723 * count in the device remains above zero the interface remains listening
2724 * to all interfaces. Once it hits zero the device reverts back to normal
2725 * filtering operation. A negative @inc value is used to drop the counter
2726 * when releasing a resource needing all multicasts.
2729 void dev_set_allmulti(struct net_device *dev, int inc)
2731 unsigned short old_flags = dev->flags;
2733 ASSERT_RTNL();
2735 dev->flags |= IFF_ALLMULTI;
2736 if ((dev->allmulti += inc) == 0)
2737 dev->flags &= ~IFF_ALLMULTI;
2738 if (dev->flags ^ old_flags) {
2739 if (dev->change_rx_flags)
2740 dev->change_rx_flags(dev, IFF_ALLMULTI);
2741 dev_set_rx_mode(dev);
2746 * Upload unicast and multicast address lists to device and
2747 * configure RX filtering. When the device doesn't support unicast
2748 * filtering it is put in promiscous mode while unicast addresses
2749 * are present.
2751 void __dev_set_rx_mode(struct net_device *dev)
2753 /* dev_open will call this function so the list will stay sane. */
2754 if (!(dev->flags&IFF_UP))
2755 return;
2757 if (!netif_device_present(dev))
2758 return;
2760 if (dev->set_rx_mode)
2761 dev->set_rx_mode(dev);
2762 else {
2763 /* Unicast addresses changes may only happen under the rtnl,
2764 * therefore calling __dev_set_promiscuity here is safe.
2766 if (dev->uc_count > 0 && !dev->uc_promisc) {
2767 __dev_set_promiscuity(dev, 1);
2768 dev->uc_promisc = 1;
2769 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2770 __dev_set_promiscuity(dev, -1);
2771 dev->uc_promisc = 0;
2774 if (dev->set_multicast_list)
2775 dev->set_multicast_list(dev);
2779 void dev_set_rx_mode(struct net_device *dev)
2781 netif_tx_lock_bh(dev);
2782 __dev_set_rx_mode(dev);
2783 netif_tx_unlock_bh(dev);
2786 int __dev_addr_delete(struct dev_addr_list **list, int *count,
2787 void *addr, int alen, int glbl)
2789 struct dev_addr_list *da;
2791 for (; (da = *list) != NULL; list = &da->next) {
2792 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2793 alen == da->da_addrlen) {
2794 if (glbl) {
2795 int old_glbl = da->da_gusers;
2796 da->da_gusers = 0;
2797 if (old_glbl == 0)
2798 break;
2800 if (--da->da_users)
2801 return 0;
2803 *list = da->next;
2804 kfree(da);
2805 (*count)--;
2806 return 0;
2809 return -ENOENT;
2812 int __dev_addr_add(struct dev_addr_list **list, int *count,
2813 void *addr, int alen, int glbl)
2815 struct dev_addr_list *da;
2817 for (da = *list; da != NULL; da = da->next) {
2818 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2819 da->da_addrlen == alen) {
2820 if (glbl) {
2821 int old_glbl = da->da_gusers;
2822 da->da_gusers = 1;
2823 if (old_glbl)
2824 return 0;
2826 da->da_users++;
2827 return 0;
2831 da = kmalloc(sizeof(*da), GFP_ATOMIC);
2832 if (da == NULL)
2833 return -ENOMEM;
2834 memcpy(da->da_addr, addr, alen);
2835 da->da_addrlen = alen;
2836 da->da_users = 1;
2837 da->da_gusers = glbl ? 1 : 0;
2838 da->next = *list;
2839 *list = da;
2840 (*count)++;
2841 return 0;
2845 * dev_unicast_delete - Release secondary unicast address.
2846 * @dev: device
2847 * @addr: address to delete
2848 * @alen: length of @addr
2850 * Release reference to a secondary unicast address and remove it
2851 * from the device if the reference count drops to zero.
2853 * The caller must hold the rtnl_mutex.
2855 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
2857 int err;
2859 ASSERT_RTNL();
2861 netif_tx_lock_bh(dev);
2862 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2863 if (!err)
2864 __dev_set_rx_mode(dev);
2865 netif_tx_unlock_bh(dev);
2866 return err;
2868 EXPORT_SYMBOL(dev_unicast_delete);
2871 * dev_unicast_add - add a secondary unicast address
2872 * @dev: device
2873 * @addr: address to delete
2874 * @alen: length of @addr
2876 * Add a secondary unicast address to the device or increase
2877 * the reference count if it already exists.
2879 * The caller must hold the rtnl_mutex.
2881 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2883 int err;
2885 ASSERT_RTNL();
2887 netif_tx_lock_bh(dev);
2888 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2889 if (!err)
2890 __dev_set_rx_mode(dev);
2891 netif_tx_unlock_bh(dev);
2892 return err;
2894 EXPORT_SYMBOL(dev_unicast_add);
2896 static void __dev_addr_discard(struct dev_addr_list **list)
2898 struct dev_addr_list *tmp;
2900 while (*list != NULL) {
2901 tmp = *list;
2902 *list = tmp->next;
2903 if (tmp->da_users > tmp->da_gusers)
2904 printk("__dev_addr_discard: address leakage! "
2905 "da_users=%d\n", tmp->da_users);
2906 kfree(tmp);
2910 static void dev_addr_discard(struct net_device *dev)
2912 netif_tx_lock_bh(dev);
2914 __dev_addr_discard(&dev->uc_list);
2915 dev->uc_count = 0;
2917 __dev_addr_discard(&dev->mc_list);
2918 dev->mc_count = 0;
2920 netif_tx_unlock_bh(dev);
2923 unsigned dev_get_flags(const struct net_device *dev)
2925 unsigned flags;
2927 flags = (dev->flags & ~(IFF_PROMISC |
2928 IFF_ALLMULTI |
2929 IFF_RUNNING |
2930 IFF_LOWER_UP |
2931 IFF_DORMANT)) |
2932 (dev->gflags & (IFF_PROMISC |
2933 IFF_ALLMULTI));
2935 if (netif_running(dev)) {
2936 if (netif_oper_up(dev))
2937 flags |= IFF_RUNNING;
2938 if (netif_carrier_ok(dev))
2939 flags |= IFF_LOWER_UP;
2940 if (netif_dormant(dev))
2941 flags |= IFF_DORMANT;
2944 return flags;
2947 int dev_change_flags(struct net_device *dev, unsigned flags)
2949 int ret, changes;
2950 int old_flags = dev->flags;
2952 ASSERT_RTNL();
2955 * Set the flags on our device.
2958 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2959 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2960 IFF_AUTOMEDIA)) |
2961 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2962 IFF_ALLMULTI));
2965 * Load in the correct multicast list now the flags have changed.
2968 if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST)
2969 dev->change_rx_flags(dev, IFF_MULTICAST);
2971 dev_set_rx_mode(dev);
2974 * Have we downed the interface. We handle IFF_UP ourselves
2975 * according to user attempts to set it, rather than blindly
2976 * setting it.
2979 ret = 0;
2980 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2981 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2983 if (!ret)
2984 dev_set_rx_mode(dev);
2987 if (dev->flags & IFF_UP &&
2988 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2989 IFF_VOLATILE)))
2990 raw_notifier_call_chain(&netdev_chain,
2991 NETDEV_CHANGE, dev);
2993 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2994 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2995 dev->gflags ^= IFF_PROMISC;
2996 dev_set_promiscuity(dev, inc);
2999 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3000 is important. Some (broken) drivers set IFF_PROMISC, when
3001 IFF_ALLMULTI is requested not asking us and not reporting.
3003 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3004 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3005 dev->gflags ^= IFF_ALLMULTI;
3006 dev_set_allmulti(dev, inc);
3009 /* Exclude state transition flags, already notified */
3010 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3011 if (changes)
3012 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3014 return ret;
3017 int dev_set_mtu(struct net_device *dev, int new_mtu)
3019 int err;
3021 if (new_mtu == dev->mtu)
3022 return 0;
3024 /* MTU must be positive. */
3025 if (new_mtu < 0)
3026 return -EINVAL;
3028 if (!netif_device_present(dev))
3029 return -ENODEV;
3031 err = 0;
3032 if (dev->change_mtu)
3033 err = dev->change_mtu(dev, new_mtu);
3034 else
3035 dev->mtu = new_mtu;
3036 if (!err && dev->flags & IFF_UP)
3037 raw_notifier_call_chain(&netdev_chain,
3038 NETDEV_CHANGEMTU, dev);
3039 return err;
3042 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3044 int err;
3046 if (!dev->set_mac_address)
3047 return -EOPNOTSUPP;
3048 if (sa->sa_family != dev->type)
3049 return -EINVAL;
3050 if (!netif_device_present(dev))
3051 return -ENODEV;
3052 err = dev->set_mac_address(dev, sa);
3053 if (!err)
3054 raw_notifier_call_chain(&netdev_chain,
3055 NETDEV_CHANGEADDR, dev);
3056 return err;
3060 * Perform the SIOCxIFxxx calls.
3062 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3064 int err;
3065 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3067 if (!dev)
3068 return -ENODEV;
3070 switch (cmd) {
3071 case SIOCGIFFLAGS: /* Get interface flags */
3072 ifr->ifr_flags = dev_get_flags(dev);
3073 return 0;
3075 case SIOCSIFFLAGS: /* Set interface flags */
3076 return dev_change_flags(dev, ifr->ifr_flags);
3078 case SIOCGIFMETRIC: /* Get the metric on the interface
3079 (currently unused) */
3080 ifr->ifr_metric = 0;
3081 return 0;
3083 case SIOCSIFMETRIC: /* Set the metric on the interface
3084 (currently unused) */
3085 return -EOPNOTSUPP;
3087 case SIOCGIFMTU: /* Get the MTU of a device */
3088 ifr->ifr_mtu = dev->mtu;
3089 return 0;
3091 case SIOCSIFMTU: /* Set the MTU of a device */
3092 return dev_set_mtu(dev, ifr->ifr_mtu);
3094 case SIOCGIFHWADDR:
3095 if (!dev->addr_len)
3096 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3097 else
3098 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3099 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3100 ifr->ifr_hwaddr.sa_family = dev->type;
3101 return 0;
3103 case SIOCSIFHWADDR:
3104 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3106 case SIOCSIFHWBROADCAST:
3107 if (ifr->ifr_hwaddr.sa_family != dev->type)
3108 return -EINVAL;
3109 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3110 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3111 raw_notifier_call_chain(&netdev_chain,
3112 NETDEV_CHANGEADDR, dev);
3113 return 0;
3115 case SIOCGIFMAP:
3116 ifr->ifr_map.mem_start = dev->mem_start;
3117 ifr->ifr_map.mem_end = dev->mem_end;
3118 ifr->ifr_map.base_addr = dev->base_addr;
3119 ifr->ifr_map.irq = dev->irq;
3120 ifr->ifr_map.dma = dev->dma;
3121 ifr->ifr_map.port = dev->if_port;
3122 return 0;
3124 case SIOCSIFMAP:
3125 if (dev->set_config) {
3126 if (!netif_device_present(dev))
3127 return -ENODEV;
3128 return dev->set_config(dev, &ifr->ifr_map);
3130 return -EOPNOTSUPP;
3132 case SIOCADDMULTI:
3133 if (!dev->set_multicast_list ||
3134 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3135 return -EINVAL;
3136 if (!netif_device_present(dev))
3137 return -ENODEV;
3138 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3139 dev->addr_len, 1);
3141 case SIOCDELMULTI:
3142 if (!dev->set_multicast_list ||
3143 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3144 return -EINVAL;
3145 if (!netif_device_present(dev))
3146 return -ENODEV;
3147 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3148 dev->addr_len, 1);
3150 case SIOCGIFINDEX:
3151 ifr->ifr_ifindex = dev->ifindex;
3152 return 0;
3154 case SIOCGIFTXQLEN:
3155 ifr->ifr_qlen = dev->tx_queue_len;
3156 return 0;
3158 case SIOCSIFTXQLEN:
3159 if (ifr->ifr_qlen < 0)
3160 return -EINVAL;
3161 dev->tx_queue_len = ifr->ifr_qlen;
3162 return 0;
3164 case SIOCSIFNAME:
3165 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3166 return dev_change_name(dev, ifr->ifr_newname);
3169 * Unknown or private ioctl
3172 default:
3173 if ((cmd >= SIOCDEVPRIVATE &&
3174 cmd <= SIOCDEVPRIVATE + 15) ||
3175 cmd == SIOCBONDENSLAVE ||
3176 cmd == SIOCBONDRELEASE ||
3177 cmd == SIOCBONDSETHWADDR ||
3178 cmd == SIOCBONDSLAVEINFOQUERY ||
3179 cmd == SIOCBONDINFOQUERY ||
3180 cmd == SIOCBONDCHANGEACTIVE ||
3181 cmd == SIOCGMIIPHY ||
3182 cmd == SIOCGMIIREG ||
3183 cmd == SIOCSMIIREG ||
3184 cmd == SIOCBRADDIF ||
3185 cmd == SIOCBRDELIF ||
3186 cmd == SIOCWANDEV) {
3187 err = -EOPNOTSUPP;
3188 if (dev->do_ioctl) {
3189 if (netif_device_present(dev))
3190 err = dev->do_ioctl(dev, ifr,
3191 cmd);
3192 else
3193 err = -ENODEV;
3195 } else
3196 err = -EINVAL;
3199 return err;
3203 * This function handles all "interface"-type I/O control requests. The actual
3204 * 'doing' part of this is dev_ifsioc above.
3208 * dev_ioctl - network device ioctl
3209 * @cmd: command to issue
3210 * @arg: pointer to a struct ifreq in user space
3212 * Issue ioctl functions to devices. This is normally called by the
3213 * user space syscall interfaces but can sometimes be useful for
3214 * other purposes. The return value is the return from the syscall if
3215 * positive or a negative errno code on error.
3218 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3220 struct ifreq ifr;
3221 int ret;
3222 char *colon;
3224 /* One special case: SIOCGIFCONF takes ifconf argument
3225 and requires shared lock, because it sleeps writing
3226 to user space.
3229 if (cmd == SIOCGIFCONF) {
3230 rtnl_lock();
3231 ret = dev_ifconf(net, (char __user *) arg);
3232 rtnl_unlock();
3233 return ret;
3235 if (cmd == SIOCGIFNAME)
3236 return dev_ifname(net, (struct ifreq __user *)arg);
3238 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3239 return -EFAULT;
3241 ifr.ifr_name[IFNAMSIZ-1] = 0;
3243 colon = strchr(ifr.ifr_name, ':');
3244 if (colon)
3245 *colon = 0;
3248 * See which interface the caller is talking about.
3251 switch (cmd) {
3253 * These ioctl calls:
3254 * - can be done by all.
3255 * - atomic and do not require locking.
3256 * - return a value
3258 case SIOCGIFFLAGS:
3259 case SIOCGIFMETRIC:
3260 case SIOCGIFMTU:
3261 case SIOCGIFHWADDR:
3262 case SIOCGIFSLAVE:
3263 case SIOCGIFMAP:
3264 case SIOCGIFINDEX:
3265 case SIOCGIFTXQLEN:
3266 dev_load(net, ifr.ifr_name);
3267 read_lock(&dev_base_lock);
3268 ret = dev_ifsioc(net, &ifr, cmd);
3269 read_unlock(&dev_base_lock);
3270 if (!ret) {
3271 if (colon)
3272 *colon = ':';
3273 if (copy_to_user(arg, &ifr,
3274 sizeof(struct ifreq)))
3275 ret = -EFAULT;
3277 return ret;
3279 case SIOCETHTOOL:
3280 dev_load(net, ifr.ifr_name);
3281 rtnl_lock();
3282 ret = dev_ethtool(net, &ifr);
3283 rtnl_unlock();
3284 if (!ret) {
3285 if (colon)
3286 *colon = ':';
3287 if (copy_to_user(arg, &ifr,
3288 sizeof(struct ifreq)))
3289 ret = -EFAULT;
3291 return ret;
3294 * These ioctl calls:
3295 * - require superuser power.
3296 * - require strict serialization.
3297 * - return a value
3299 case SIOCGMIIPHY:
3300 case SIOCGMIIREG:
3301 case SIOCSIFNAME:
3302 if (!capable(CAP_NET_ADMIN))
3303 return -EPERM;
3304 dev_load(net, ifr.ifr_name);
3305 rtnl_lock();
3306 ret = dev_ifsioc(net, &ifr, cmd);
3307 rtnl_unlock();
3308 if (!ret) {
3309 if (colon)
3310 *colon = ':';
3311 if (copy_to_user(arg, &ifr,
3312 sizeof(struct ifreq)))
3313 ret = -EFAULT;
3315 return ret;
3318 * These ioctl calls:
3319 * - require superuser power.
3320 * - require strict serialization.
3321 * - do not return a value
3323 case SIOCSIFFLAGS:
3324 case SIOCSIFMETRIC:
3325 case SIOCSIFMTU:
3326 case SIOCSIFMAP:
3327 case SIOCSIFHWADDR:
3328 case SIOCSIFSLAVE:
3329 case SIOCADDMULTI:
3330 case SIOCDELMULTI:
3331 case SIOCSIFHWBROADCAST:
3332 case SIOCSIFTXQLEN:
3333 case SIOCSMIIREG:
3334 case SIOCBONDENSLAVE:
3335 case SIOCBONDRELEASE:
3336 case SIOCBONDSETHWADDR:
3337 case SIOCBONDCHANGEACTIVE:
3338 case SIOCBRADDIF:
3339 case SIOCBRDELIF:
3340 if (!capable(CAP_NET_ADMIN))
3341 return -EPERM;
3342 /* fall through */
3343 case SIOCBONDSLAVEINFOQUERY:
3344 case SIOCBONDINFOQUERY:
3345 dev_load(net, ifr.ifr_name);
3346 rtnl_lock();
3347 ret = dev_ifsioc(net, &ifr, cmd);
3348 rtnl_unlock();
3349 return ret;
3351 case SIOCGIFMEM:
3352 /* Get the per device memory space. We can add this but
3353 * currently do not support it */
3354 case SIOCSIFMEM:
3355 /* Set the per device memory buffer space.
3356 * Not applicable in our case */
3357 case SIOCSIFLINK:
3358 return -EINVAL;
3361 * Unknown or private ioctl.
3363 default:
3364 if (cmd == SIOCWANDEV ||
3365 (cmd >= SIOCDEVPRIVATE &&
3366 cmd <= SIOCDEVPRIVATE + 15)) {
3367 dev_load(net, ifr.ifr_name);
3368 rtnl_lock();
3369 ret = dev_ifsioc(net, &ifr, cmd);
3370 rtnl_unlock();
3371 if (!ret && copy_to_user(arg, &ifr,
3372 sizeof(struct ifreq)))
3373 ret = -EFAULT;
3374 return ret;
3376 /* Take care of Wireless Extensions */
3377 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3378 return wext_handle_ioctl(net, &ifr, cmd, arg);
3379 return -EINVAL;
3385 * dev_new_index - allocate an ifindex
3387 * Returns a suitable unique value for a new device interface
3388 * number. The caller must hold the rtnl semaphore or the
3389 * dev_base_lock to be sure it remains unique.
3391 static int dev_new_index(struct net *net)
3393 static int ifindex;
3394 for (;;) {
3395 if (++ifindex <= 0)
3396 ifindex = 1;
3397 if (!__dev_get_by_index(net, ifindex))
3398 return ifindex;
3402 /* Delayed registration/unregisteration */
3403 static DEFINE_SPINLOCK(net_todo_list_lock);
3404 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
3406 static void net_set_todo(struct net_device *dev)
3408 spin_lock(&net_todo_list_lock);
3409 list_add_tail(&dev->todo_list, &net_todo_list);
3410 spin_unlock(&net_todo_list_lock);
3414 * register_netdevice - register a network device
3415 * @dev: device to register
3417 * Take a completed network device structure and add it to the kernel
3418 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3419 * chain. 0 is returned on success. A negative errno code is returned
3420 * on a failure to set up the device, or if the name is a duplicate.
3422 * Callers must hold the rtnl semaphore. You may want
3423 * register_netdev() instead of this.
3425 * BUGS:
3426 * The locking appears insufficient to guarantee two parallel registers
3427 * will not get the same name.
3430 int register_netdevice(struct net_device *dev)
3432 struct hlist_head *head;
3433 struct hlist_node *p;
3434 int ret;
3435 struct net *net;
3437 BUG_ON(dev_boot_phase);
3438 ASSERT_RTNL();
3440 might_sleep();
3442 /* When net_device's are persistent, this will be fatal. */
3443 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3444 BUG_ON(!dev->nd_net);
3445 net = dev->nd_net;
3447 spin_lock_init(&dev->queue_lock);
3448 spin_lock_init(&dev->_xmit_lock);
3449 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
3450 dev->xmit_lock_owner = -1;
3451 spin_lock_init(&dev->ingress_lock);
3453 dev->iflink = -1;
3455 /* Init, if this function is available */
3456 if (dev->init) {
3457 ret = dev->init(dev);
3458 if (ret) {
3459 if (ret > 0)
3460 ret = -EIO;
3461 goto out;
3465 if (!dev_valid_name(dev->name)) {
3466 ret = -EINVAL;
3467 goto err_uninit;
3470 dev->ifindex = dev_new_index(net);
3471 if (dev->iflink == -1)
3472 dev->iflink = dev->ifindex;
3474 /* Check for existence of name */
3475 head = dev_name_hash(net, dev->name);
3476 hlist_for_each(p, head) {
3477 struct net_device *d
3478 = hlist_entry(p, struct net_device, name_hlist);
3479 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3480 ret = -EEXIST;
3481 goto err_uninit;
3485 /* Fix illegal checksum combinations */
3486 if ((dev->features & NETIF_F_HW_CSUM) &&
3487 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3488 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3489 dev->name);
3490 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3493 if ((dev->features & NETIF_F_NO_CSUM) &&
3494 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3495 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3496 dev->name);
3497 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3501 /* Fix illegal SG+CSUM combinations. */
3502 if ((dev->features & NETIF_F_SG) &&
3503 !(dev->features & NETIF_F_ALL_CSUM)) {
3504 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
3505 dev->name);
3506 dev->features &= ~NETIF_F_SG;
3509 /* TSO requires that SG is present as well. */
3510 if ((dev->features & NETIF_F_TSO) &&
3511 !(dev->features & NETIF_F_SG)) {
3512 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
3513 dev->name);
3514 dev->features &= ~NETIF_F_TSO;
3516 if (dev->features & NETIF_F_UFO) {
3517 if (!(dev->features & NETIF_F_HW_CSUM)) {
3518 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3519 "NETIF_F_HW_CSUM feature.\n",
3520 dev->name);
3521 dev->features &= ~NETIF_F_UFO;
3523 if (!(dev->features & NETIF_F_SG)) {
3524 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3525 "NETIF_F_SG feature.\n",
3526 dev->name);
3527 dev->features &= ~NETIF_F_UFO;
3532 * nil rebuild_header routine,
3533 * that should be never called and used as just bug trap.
3536 if (!dev->rebuild_header)
3537 dev->rebuild_header = default_rebuild_header;
3539 ret = netdev_register_sysfs(dev);
3540 if (ret)
3541 goto err_uninit;
3542 dev->reg_state = NETREG_REGISTERED;
3545 * Default initial state at registry is that the
3546 * device is present.
3549 set_bit(__LINK_STATE_PRESENT, &dev->state);
3551 dev_init_scheduler(dev);
3552 write_lock_bh(&dev_base_lock);
3553 list_add_tail(&dev->dev_list, &net->dev_base_head);
3554 hlist_add_head(&dev->name_hlist, head);
3555 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
3556 dev_hold(dev);
3557 write_unlock_bh(&dev_base_lock);
3559 /* Notify protocols, that a new device appeared. */
3560 ret = raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
3561 ret = notifier_to_errno(ret);
3562 if (ret)
3563 unregister_netdevice(dev);
3565 out:
3566 return ret;
3568 err_uninit:
3569 if (dev->uninit)
3570 dev->uninit(dev);
3571 goto out;
3575 * register_netdev - register a network device
3576 * @dev: device to register
3578 * Take a completed network device structure and add it to the kernel
3579 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3580 * chain. 0 is returned on success. A negative errno code is returned
3581 * on a failure to set up the device, or if the name is a duplicate.
3583 * This is a wrapper around register_netdevice that takes the rtnl semaphore
3584 * and expands the device name if you passed a format string to
3585 * alloc_netdev.
3587 int register_netdev(struct net_device *dev)
3589 int err;
3591 rtnl_lock();
3594 * If the name is a format string the caller wants us to do a
3595 * name allocation.
3597 if (strchr(dev->name, '%')) {
3598 err = dev_alloc_name(dev, dev->name);
3599 if (err < 0)
3600 goto out;
3603 err = register_netdevice(dev);
3604 out:
3605 rtnl_unlock();
3606 return err;
3608 EXPORT_SYMBOL(register_netdev);
3611 * netdev_wait_allrefs - wait until all references are gone.
3613 * This is called when unregistering network devices.
3615 * Any protocol or device that holds a reference should register
3616 * for netdevice notification, and cleanup and put back the
3617 * reference if they receive an UNREGISTER event.
3618 * We can get stuck here if buggy protocols don't correctly
3619 * call dev_put.
3621 static void netdev_wait_allrefs(struct net_device *dev)
3623 unsigned long rebroadcast_time, warning_time;
3625 rebroadcast_time = warning_time = jiffies;
3626 while (atomic_read(&dev->refcnt) != 0) {
3627 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
3628 rtnl_lock();
3630 /* Rebroadcast unregister notification */
3631 raw_notifier_call_chain(&netdev_chain,
3632 NETDEV_UNREGISTER, dev);
3634 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
3635 &dev->state)) {
3636 /* We must not have linkwatch events
3637 * pending on unregister. If this
3638 * happens, we simply run the queue
3639 * unscheduled, resulting in a noop
3640 * for this device.
3642 linkwatch_run_queue();
3645 __rtnl_unlock();
3647 rebroadcast_time = jiffies;
3650 msleep(250);
3652 if (time_after(jiffies, warning_time + 10 * HZ)) {
3653 printk(KERN_EMERG "unregister_netdevice: "
3654 "waiting for %s to become free. Usage "
3655 "count = %d\n",
3656 dev->name, atomic_read(&dev->refcnt));
3657 warning_time = jiffies;
3662 /* The sequence is:
3664 * rtnl_lock();
3665 * ...
3666 * register_netdevice(x1);
3667 * register_netdevice(x2);
3668 * ...
3669 * unregister_netdevice(y1);
3670 * unregister_netdevice(y2);
3671 * ...
3672 * rtnl_unlock();
3673 * free_netdev(y1);
3674 * free_netdev(y2);
3676 * We are invoked by rtnl_unlock() after it drops the semaphore.
3677 * This allows us to deal with problems:
3678 * 1) We can delete sysfs objects which invoke hotplug
3679 * without deadlocking with linkwatch via keventd.
3680 * 2) Since we run with the RTNL semaphore not held, we can sleep
3681 * safely in order to wait for the netdev refcnt to drop to zero.
3683 static DEFINE_MUTEX(net_todo_run_mutex);
3684 void netdev_run_todo(void)
3686 struct list_head list;
3688 /* Need to guard against multiple cpu's getting out of order. */
3689 mutex_lock(&net_todo_run_mutex);
3691 /* Not safe to do outside the semaphore. We must not return
3692 * until all unregister events invoked by the local processor
3693 * have been completed (either by this todo run, or one on
3694 * another cpu).
3696 if (list_empty(&net_todo_list))
3697 goto out;
3699 /* Snapshot list, allow later requests */
3700 spin_lock(&net_todo_list_lock);
3701 list_replace_init(&net_todo_list, &list);
3702 spin_unlock(&net_todo_list_lock);
3704 while (!list_empty(&list)) {
3705 struct net_device *dev
3706 = list_entry(list.next, struct net_device, todo_list);
3707 list_del(&dev->todo_list);
3709 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
3710 printk(KERN_ERR "network todo '%s' but state %d\n",
3711 dev->name, dev->reg_state);
3712 dump_stack();
3713 continue;
3716 dev->reg_state = NETREG_UNREGISTERED;
3718 netdev_wait_allrefs(dev);
3720 /* paranoia */
3721 BUG_ON(atomic_read(&dev->refcnt));
3722 BUG_TRAP(!dev->ip_ptr);
3723 BUG_TRAP(!dev->ip6_ptr);
3724 BUG_TRAP(!dev->dn_ptr);
3726 if (dev->destructor)
3727 dev->destructor(dev);
3729 /* Free network device */
3730 kobject_put(&dev->dev.kobj);
3733 out:
3734 mutex_unlock(&net_todo_run_mutex);
3737 static struct net_device_stats *internal_stats(struct net_device *dev)
3739 return &dev->stats;
3743 * alloc_netdev_mq - allocate network device
3744 * @sizeof_priv: size of private data to allocate space for
3745 * @name: device name format string
3746 * @setup: callback to initialize device
3747 * @queue_count: the number of subqueues to allocate
3749 * Allocates a struct net_device with private data area for driver use
3750 * and performs basic initialization. Also allocates subquue structs
3751 * for each queue on the device at the end of the netdevice.
3753 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
3754 void (*setup)(struct net_device *), unsigned int queue_count)
3756 void *p;
3757 struct net_device *dev;
3758 int alloc_size;
3760 BUG_ON(strlen(name) >= sizeof(dev->name));
3762 /* ensure 32-byte alignment of both the device and private area */
3763 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
3764 (sizeof(struct net_device_subqueue) * (queue_count - 1))) &
3765 ~NETDEV_ALIGN_CONST;
3766 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3768 p = kzalloc(alloc_size, GFP_KERNEL);
3769 if (!p) {
3770 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
3771 return NULL;
3774 dev = (struct net_device *)
3775 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3776 dev->padded = (char *)dev - (char *)p;
3777 dev->nd_net = &init_net;
3779 if (sizeof_priv) {
3780 dev->priv = ((char *)dev +
3781 ((sizeof(struct net_device) +
3782 (sizeof(struct net_device_subqueue) *
3783 (queue_count - 1)) + NETDEV_ALIGN_CONST)
3784 & ~NETDEV_ALIGN_CONST));
3787 dev->egress_subqueue_count = queue_count;
3789 dev->get_stats = internal_stats;
3790 netpoll_netdev_init(dev);
3791 setup(dev);
3792 strcpy(dev->name, name);
3793 return dev;
3795 EXPORT_SYMBOL(alloc_netdev_mq);
3798 * free_netdev - free network device
3799 * @dev: device
3801 * This function does the last stage of destroying an allocated device
3802 * interface. The reference to the device object is released.
3803 * If this is the last reference then it will be freed.
3805 void free_netdev(struct net_device *dev)
3807 #ifdef CONFIG_SYSFS
3808 /* Compatibility with error handling in drivers */
3809 if (dev->reg_state == NETREG_UNINITIALIZED) {
3810 kfree((char *)dev - dev->padded);
3811 return;
3814 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3815 dev->reg_state = NETREG_RELEASED;
3817 /* will free via device release */
3818 put_device(&dev->dev);
3819 #else
3820 kfree((char *)dev - dev->padded);
3821 #endif
3824 /* Synchronize with packet receive processing. */
3825 void synchronize_net(void)
3827 might_sleep();
3828 synchronize_rcu();
3832 * unregister_netdevice - remove device from the kernel
3833 * @dev: device
3835 * This function shuts down a device interface and removes it
3836 * from the kernel tables. On success 0 is returned, on a failure
3837 * a negative errno code is returned.
3839 * Callers must hold the rtnl semaphore. You may want
3840 * unregister_netdev() instead of this.
3843 void unregister_netdevice(struct net_device *dev)
3845 BUG_ON(dev_boot_phase);
3846 ASSERT_RTNL();
3848 /* Some devices call without registering for initialization unwind. */
3849 if (dev->reg_state == NETREG_UNINITIALIZED) {
3850 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3851 "was registered\n", dev->name, dev);
3853 WARN_ON(1);
3854 return;
3857 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3859 /* If device is running, close it first. */
3860 if (dev->flags & IFF_UP)
3861 dev_close(dev);
3863 /* And unlink it from device chain. */
3864 write_lock_bh(&dev_base_lock);
3865 list_del(&dev->dev_list);
3866 hlist_del(&dev->name_hlist);
3867 hlist_del(&dev->index_hlist);
3868 write_unlock_bh(&dev_base_lock);
3870 dev->reg_state = NETREG_UNREGISTERING;
3872 synchronize_net();
3874 /* Shutdown queueing discipline. */
3875 dev_shutdown(dev);
3878 /* Notify protocols, that we are about to destroy
3879 this device. They should clean all the things.
3881 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3884 * Flush the unicast and multicast chains
3886 dev_addr_discard(dev);
3888 if (dev->uninit)
3889 dev->uninit(dev);
3891 /* Notifier chain MUST detach us from master device. */
3892 BUG_TRAP(!dev->master);
3894 /* Remove entries from sysfs */
3895 netdev_unregister_sysfs(dev);
3897 /* Finish processing unregister after unlock */
3898 net_set_todo(dev);
3900 synchronize_net();
3902 dev_put(dev);
3906 * unregister_netdev - remove device from the kernel
3907 * @dev: device
3909 * This function shuts down a device interface and removes it
3910 * from the kernel tables. On success 0 is returned, on a failure
3911 * a negative errno code is returned.
3913 * This is just a wrapper for unregister_netdevice that takes
3914 * the rtnl semaphore. In general you want to use this and not
3915 * unregister_netdevice.
3917 void unregister_netdev(struct net_device *dev)
3919 rtnl_lock();
3920 unregister_netdevice(dev);
3921 rtnl_unlock();
3924 EXPORT_SYMBOL(unregister_netdev);
3926 static int dev_cpu_callback(struct notifier_block *nfb,
3927 unsigned long action,
3928 void *ocpu)
3930 struct sk_buff **list_skb;
3931 struct net_device **list_net;
3932 struct sk_buff *skb;
3933 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3934 struct softnet_data *sd, *oldsd;
3936 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
3937 return NOTIFY_OK;
3939 local_irq_disable();
3940 cpu = smp_processor_id();
3941 sd = &per_cpu(softnet_data, cpu);
3942 oldsd = &per_cpu(softnet_data, oldcpu);
3944 /* Find end of our completion_queue. */
3945 list_skb = &sd->completion_queue;
3946 while (*list_skb)
3947 list_skb = &(*list_skb)->next;
3948 /* Append completion queue from offline CPU. */
3949 *list_skb = oldsd->completion_queue;
3950 oldsd->completion_queue = NULL;
3952 /* Find end of our output_queue. */
3953 list_net = &sd->output_queue;
3954 while (*list_net)
3955 list_net = &(*list_net)->next_sched;
3956 /* Append output queue from offline CPU. */
3957 *list_net = oldsd->output_queue;
3958 oldsd->output_queue = NULL;
3960 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3961 local_irq_enable();
3963 /* Process offline CPU's input_pkt_queue */
3964 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3965 netif_rx(skb);
3967 return NOTIFY_OK;
3970 #ifdef CONFIG_NET_DMA
3972 * net_dma_rebalance - try to maintain one DMA channel per CPU
3973 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
3975 * This is called when the number of channels allocated to the net_dma client
3976 * changes. The net_dma client tries to have one DMA channel per CPU.
3979 static void net_dma_rebalance(struct net_dma *net_dma)
3981 unsigned int cpu, i, n, chan_idx;
3982 struct dma_chan *chan;
3984 if (cpus_empty(net_dma->channel_mask)) {
3985 for_each_online_cpu(cpu)
3986 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
3987 return;
3990 i = 0;
3991 cpu = first_cpu(cpu_online_map);
3993 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
3994 chan = net_dma->channels[chan_idx];
3996 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
3997 + (i < (num_online_cpus() %
3998 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4000 while(n) {
4001 per_cpu(softnet_data, cpu).net_dma = chan;
4002 cpu = next_cpu(cpu, cpu_online_map);
4003 n--;
4005 i++;
4010 * netdev_dma_event - event callback for the net_dma_client
4011 * @client: should always be net_dma_client
4012 * @chan: DMA channel for the event
4013 * @state: DMA state to be handled
4015 static enum dma_state_client
4016 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4017 enum dma_state state)
4019 int i, found = 0, pos = -1;
4020 struct net_dma *net_dma =
4021 container_of(client, struct net_dma, client);
4022 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4024 spin_lock(&net_dma->lock);
4025 switch (state) {
4026 case DMA_RESOURCE_AVAILABLE:
4027 for (i = 0; i < NR_CPUS; i++)
4028 if (net_dma->channels[i] == chan) {
4029 found = 1;
4030 break;
4031 } else if (net_dma->channels[i] == NULL && pos < 0)
4032 pos = i;
4034 if (!found && pos >= 0) {
4035 ack = DMA_ACK;
4036 net_dma->channels[pos] = chan;
4037 cpu_set(pos, net_dma->channel_mask);
4038 net_dma_rebalance(net_dma);
4040 break;
4041 case DMA_RESOURCE_REMOVED:
4042 for (i = 0; i < NR_CPUS; i++)
4043 if (net_dma->channels[i] == chan) {
4044 found = 1;
4045 pos = i;
4046 break;
4049 if (found) {
4050 ack = DMA_ACK;
4051 cpu_clear(pos, net_dma->channel_mask);
4052 net_dma->channels[i] = NULL;
4053 net_dma_rebalance(net_dma);
4055 break;
4056 default:
4057 break;
4059 spin_unlock(&net_dma->lock);
4061 return ack;
4065 * netdev_dma_regiser - register the networking subsystem as a DMA client
4067 static int __init netdev_dma_register(void)
4069 spin_lock_init(&net_dma.lock);
4070 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4071 dma_async_client_register(&net_dma.client);
4072 dma_async_client_chan_request(&net_dma.client);
4073 return 0;
4076 #else
4077 static int __init netdev_dma_register(void) { return -ENODEV; }
4078 #endif /* CONFIG_NET_DMA */
4081 * netdev_compute_feature - compute conjunction of two feature sets
4082 * @all: first feature set
4083 * @one: second feature set
4085 * Computes a new feature set after adding a device with feature set
4086 * @one to the master device with current feature set @all. Returns
4087 * the new feature set.
4089 int netdev_compute_features(unsigned long all, unsigned long one)
4091 /* if device needs checksumming, downgrade to hw checksumming */
4092 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4093 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4095 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4096 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4097 all ^= NETIF_F_HW_CSUM
4098 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4100 if (one & NETIF_F_GSO)
4101 one |= NETIF_F_GSO_SOFTWARE;
4102 one |= NETIF_F_GSO;
4104 /* If even one device supports robust GSO, enable it for all. */
4105 if (one & NETIF_F_GSO_ROBUST)
4106 all |= NETIF_F_GSO_ROBUST;
4108 all &= one | NETIF_F_LLTX;
4110 if (!(all & NETIF_F_ALL_CSUM))
4111 all &= ~NETIF_F_SG;
4112 if (!(all & NETIF_F_SG))
4113 all &= ~NETIF_F_GSO_MASK;
4115 return all;
4117 EXPORT_SYMBOL(netdev_compute_features);
4119 /* Initialize per network namespace state */
4120 static int netdev_init(struct net *net)
4122 int i;
4123 INIT_LIST_HEAD(&net->dev_base_head);
4124 rwlock_init(&dev_base_lock);
4126 net->dev_name_head = kmalloc(
4127 sizeof(*net->dev_name_head)*NETDEV_HASHENTRIES, GFP_KERNEL);
4128 if (!net->dev_name_head)
4129 return -ENOMEM;
4131 net->dev_index_head = kmalloc(
4132 sizeof(*net->dev_index_head)*NETDEV_HASHENTRIES, GFP_KERNEL);
4133 if (!net->dev_index_head) {
4134 kfree(net->dev_name_head);
4135 return -ENOMEM;
4138 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4139 INIT_HLIST_HEAD(&net->dev_name_head[i]);
4141 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4142 INIT_HLIST_HEAD(&net->dev_index_head[i]);
4144 return 0;
4147 static void netdev_exit(struct net *net)
4149 kfree(net->dev_name_head);
4150 kfree(net->dev_index_head);
4153 static struct pernet_operations netdev_net_ops = {
4154 .init = netdev_init,
4155 .exit = netdev_exit,
4159 * Initialize the DEV module. At boot time this walks the device list and
4160 * unhooks any devices that fail to initialise (normally hardware not
4161 * present) and leaves us with a valid list of present and active devices.
4166 * This is called single threaded during boot, so no need
4167 * to take the rtnl semaphore.
4169 static int __init net_dev_init(void)
4171 int i, rc = -ENOMEM;
4173 BUG_ON(!dev_boot_phase);
4175 if (dev_proc_init())
4176 goto out;
4178 if (netdev_sysfs_init())
4179 goto out;
4181 INIT_LIST_HEAD(&ptype_all);
4182 for (i = 0; i < 16; i++)
4183 INIT_LIST_HEAD(&ptype_base[i]);
4185 if (register_pernet_subsys(&netdev_net_ops))
4186 goto out;
4189 * Initialise the packet receive queues.
4192 for_each_possible_cpu(i) {
4193 struct softnet_data *queue;
4195 queue = &per_cpu(softnet_data, i);
4196 skb_queue_head_init(&queue->input_pkt_queue);
4197 queue->completion_queue = NULL;
4198 INIT_LIST_HEAD(&queue->poll_list);
4200 queue->backlog.poll = process_backlog;
4201 queue->backlog.weight = weight_p;
4204 netdev_dma_register();
4206 dev_boot_phase = 0;
4208 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
4209 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
4211 hotcpu_notifier(dev_cpu_callback, 0);
4212 dst_init();
4213 dev_mcast_init();
4214 rc = 0;
4215 out:
4216 return rc;
4219 subsys_initcall(net_dev_init);
4221 EXPORT_SYMBOL(__dev_get_by_index);
4222 EXPORT_SYMBOL(__dev_get_by_name);
4223 EXPORT_SYMBOL(__dev_remove_pack);
4224 EXPORT_SYMBOL(dev_valid_name);
4225 EXPORT_SYMBOL(dev_add_pack);
4226 EXPORT_SYMBOL(dev_alloc_name);
4227 EXPORT_SYMBOL(dev_close);
4228 EXPORT_SYMBOL(dev_get_by_flags);
4229 EXPORT_SYMBOL(dev_get_by_index);
4230 EXPORT_SYMBOL(dev_get_by_name);
4231 EXPORT_SYMBOL(dev_open);
4232 EXPORT_SYMBOL(dev_queue_xmit);
4233 EXPORT_SYMBOL(dev_remove_pack);
4234 EXPORT_SYMBOL(dev_set_allmulti);
4235 EXPORT_SYMBOL(dev_set_promiscuity);
4236 EXPORT_SYMBOL(dev_change_flags);
4237 EXPORT_SYMBOL(dev_set_mtu);
4238 EXPORT_SYMBOL(dev_set_mac_address);
4239 EXPORT_SYMBOL(free_netdev);
4240 EXPORT_SYMBOL(netdev_boot_setup_check);
4241 EXPORT_SYMBOL(netdev_set_master);
4242 EXPORT_SYMBOL(netdev_state_change);
4243 EXPORT_SYMBOL(netif_receive_skb);
4244 EXPORT_SYMBOL(netif_rx);
4245 EXPORT_SYMBOL(register_gifconf);
4246 EXPORT_SYMBOL(register_netdevice);
4247 EXPORT_SYMBOL(register_netdevice_notifier);
4248 EXPORT_SYMBOL(skb_checksum_help);
4249 EXPORT_SYMBOL(synchronize_net);
4250 EXPORT_SYMBOL(unregister_netdevice);
4251 EXPORT_SYMBOL(unregister_netdevice_notifier);
4252 EXPORT_SYMBOL(net_enable_timestamp);
4253 EXPORT_SYMBOL(net_disable_timestamp);
4254 EXPORT_SYMBOL(dev_get_flags);
4256 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4257 EXPORT_SYMBOL(br_handle_frame_hook);
4258 EXPORT_SYMBOL(br_fdb_get_hook);
4259 EXPORT_SYMBOL(br_fdb_put_hook);
4260 #endif
4262 #ifdef CONFIG_KMOD
4263 EXPORT_SYMBOL(dev_load);
4264 #endif
4266 EXPORT_PER_CPU_SYMBOL(softnet_data);