x86: fix spontaneous reboot with allyesconfig bzImage
[wrt350n-kernel.git] / net / core / dev.c
blobfcdf03cf3b3f8b0f62af03ce208b658d11a5834d
1 /*
2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
85 #include <linux/mm.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/notifier.h>
94 #include <linux/skbuff.h>
95 #include <net/net_namespace.h>
96 #include <net/sock.h>
97 #include <linux/rtnetlink.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <linux/stat.h>
101 #include <linux/if_bridge.h>
102 #include <linux/if_macvlan.h>
103 #include <net/dst.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <linux/highmem.h>
107 #include <linux/init.h>
108 #include <linux/kmod.h>
109 #include <linux/module.h>
110 #include <linux/kallsyms.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
123 #include "net-sysfs.h"
126 * The list of packet types we will receive (as opposed to discard)
127 * and the routines to invoke.
129 * Why 16. Because with 16 the only overlap we get on a hash of the
130 * low nibble of the protocol value is RARP/SNAP/X.25.
132 * NOTE: That is no longer true with the addition of VLAN tags. Not
133 * sure which should go first, but I bet it won't make much
134 * difference if we are running VLANs. The good news is that
135 * this protocol won't be in the list unless compiled in, so
136 * the average user (w/out VLANs) will not be adversely affected.
137 * --BLG
139 * 0800 IP
140 * 8100 802.1Q VLAN
141 * 0001 802.3
142 * 0002 AX.25
143 * 0004 802.2
144 * 8035 RARP
145 * 0005 SNAP
146 * 0805 X.25
147 * 0806 ARP
148 * 8137 IPX
149 * 0009 Localtalk
150 * 86DD IPv6
153 #define PTYPE_HASH_SIZE (16)
154 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
156 static DEFINE_SPINLOCK(ptype_lock);
157 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
158 static struct list_head ptype_all __read_mostly; /* Taps */
160 #ifdef CONFIG_NET_DMA
161 struct net_dma {
162 struct dma_client client;
163 spinlock_t lock;
164 cpumask_t channel_mask;
165 struct dma_chan *channels[NR_CPUS];
168 static enum dma_state_client
169 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
170 enum dma_state state);
172 static struct net_dma net_dma = {
173 .client = {
174 .event_callback = netdev_dma_event,
177 #endif
180 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
181 * semaphore.
183 * Pure readers hold dev_base_lock for reading.
185 * Writers must hold the rtnl semaphore while they loop through the
186 * dev_base_head list, and hold dev_base_lock for writing when they do the
187 * actual updates. This allows pure readers to access the list even
188 * while a writer is preparing to update it.
190 * To put it another way, dev_base_lock is held for writing only to
191 * protect against pure readers; the rtnl semaphore provides the
192 * protection against other writers.
194 * See, for example usages, register_netdevice() and
195 * unregister_netdevice(), which must be called with the rtnl
196 * semaphore held.
198 DEFINE_RWLOCK(dev_base_lock);
200 EXPORT_SYMBOL(dev_base_lock);
202 #define NETDEV_HASHBITS 8
203 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
205 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
207 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
208 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
211 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
213 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
216 /* Device list insertion */
217 static int list_netdevice(struct net_device *dev)
219 struct net *net = dev->nd_net;
221 ASSERT_RTNL();
223 write_lock_bh(&dev_base_lock);
224 list_add_tail(&dev->dev_list, &net->dev_base_head);
225 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
226 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
227 write_unlock_bh(&dev_base_lock);
228 return 0;
231 /* Device list removal */
232 static void unlist_netdevice(struct net_device *dev)
234 ASSERT_RTNL();
236 /* Unlink dev from the device chain */
237 write_lock_bh(&dev_base_lock);
238 list_del(&dev->dev_list);
239 hlist_del(&dev->name_hlist);
240 hlist_del(&dev->index_hlist);
241 write_unlock_bh(&dev_base_lock);
245 * Our notifier list
248 static RAW_NOTIFIER_HEAD(netdev_chain);
251 * Device drivers call our routines to queue packets here. We empty the
252 * queue in the local softnet handler.
255 DEFINE_PER_CPU(struct softnet_data, softnet_data);
257 #ifdef CONFIG_DEBUG_LOCK_ALLOC
259 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
260 * according to dev->type
262 static const unsigned short netdev_lock_type[] =
263 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
264 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
265 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
266 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
267 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
268 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
269 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
270 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
271 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
272 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
273 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
274 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
275 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
276 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
277 ARPHRD_NONE};
279 static const char *netdev_lock_name[] =
280 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
281 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
282 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
283 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
284 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
285 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
286 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
287 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
288 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
289 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
290 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
291 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
292 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
293 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
294 "_xmit_NONE"};
296 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
298 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
300 int i;
302 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
303 if (netdev_lock_type[i] == dev_type)
304 return i;
305 /* the last key is used by default */
306 return ARRAY_SIZE(netdev_lock_type) - 1;
309 static inline void netdev_set_lockdep_class(spinlock_t *lock,
310 unsigned short dev_type)
312 int i;
314 i = netdev_lock_pos(dev_type);
315 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
316 netdev_lock_name[i]);
318 #else
319 static inline void netdev_set_lockdep_class(spinlock_t *lock,
320 unsigned short dev_type)
323 #endif
325 /*******************************************************************************
327 Protocol management and registration routines
329 *******************************************************************************/
332 * Add a protocol ID to the list. Now that the input handler is
333 * smarter we can dispense with all the messy stuff that used to be
334 * here.
336 * BEWARE!!! Protocol handlers, mangling input packets,
337 * MUST BE last in hash buckets and checking protocol handlers
338 * MUST start from promiscuous ptype_all chain in net_bh.
339 * It is true now, do not change it.
340 * Explanation follows: if protocol handler, mangling packet, will
341 * be the first on list, it is not able to sense, that packet
342 * is cloned and should be copied-on-write, so that it will
343 * change it and subsequent readers will get broken packet.
344 * --ANK (980803)
348 * dev_add_pack - add packet handler
349 * @pt: packet type declaration
351 * Add a protocol handler to the networking stack. The passed &packet_type
352 * is linked into kernel lists and may not be freed until it has been
353 * removed from the kernel lists.
355 * This call does not sleep therefore it can not
356 * guarantee all CPU's that are in middle of receiving packets
357 * will see the new packet type (until the next received packet).
360 void dev_add_pack(struct packet_type *pt)
362 int hash;
364 spin_lock_bh(&ptype_lock);
365 if (pt->type == htons(ETH_P_ALL))
366 list_add_rcu(&pt->list, &ptype_all);
367 else {
368 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
369 list_add_rcu(&pt->list, &ptype_base[hash]);
371 spin_unlock_bh(&ptype_lock);
375 * __dev_remove_pack - remove packet handler
376 * @pt: packet type declaration
378 * Remove a protocol handler that was previously added to the kernel
379 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
380 * from the kernel lists and can be freed or reused once this function
381 * returns.
383 * The packet type might still be in use by receivers
384 * and must not be freed until after all the CPU's have gone
385 * through a quiescent state.
387 void __dev_remove_pack(struct packet_type *pt)
389 struct list_head *head;
390 struct packet_type *pt1;
392 spin_lock_bh(&ptype_lock);
394 if (pt->type == htons(ETH_P_ALL))
395 head = &ptype_all;
396 else
397 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
399 list_for_each_entry(pt1, head, list) {
400 if (pt == pt1) {
401 list_del_rcu(&pt->list);
402 goto out;
406 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
407 out:
408 spin_unlock_bh(&ptype_lock);
411 * dev_remove_pack - remove packet handler
412 * @pt: packet type declaration
414 * Remove a protocol handler that was previously added to the kernel
415 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
416 * from the kernel lists and can be freed or reused once this function
417 * returns.
419 * This call sleeps to guarantee that no CPU is looking at the packet
420 * type after return.
422 void dev_remove_pack(struct packet_type *pt)
424 __dev_remove_pack(pt);
426 synchronize_net();
429 /******************************************************************************
431 Device Boot-time Settings Routines
433 *******************************************************************************/
435 /* Boot time configuration table */
436 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
439 * netdev_boot_setup_add - add new setup entry
440 * @name: name of the device
441 * @map: configured settings for the device
443 * Adds new setup entry to the dev_boot_setup list. The function
444 * returns 0 on error and 1 on success. This is a generic routine to
445 * all netdevices.
447 static int netdev_boot_setup_add(char *name, struct ifmap *map)
449 struct netdev_boot_setup *s;
450 int i;
452 s = dev_boot_setup;
453 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
454 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
455 memset(s[i].name, 0, sizeof(s[i].name));
456 strcpy(s[i].name, name);
457 memcpy(&s[i].map, map, sizeof(s[i].map));
458 break;
462 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
466 * netdev_boot_setup_check - check boot time settings
467 * @dev: the netdevice
469 * Check boot time settings for the device.
470 * The found settings are set for the device to be used
471 * later in the device probing.
472 * Returns 0 if no settings found, 1 if they are.
474 int netdev_boot_setup_check(struct net_device *dev)
476 struct netdev_boot_setup *s = dev_boot_setup;
477 int i;
479 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
480 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
481 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
482 dev->irq = s[i].map.irq;
483 dev->base_addr = s[i].map.base_addr;
484 dev->mem_start = s[i].map.mem_start;
485 dev->mem_end = s[i].map.mem_end;
486 return 1;
489 return 0;
494 * netdev_boot_base - get address from boot time settings
495 * @prefix: prefix for network device
496 * @unit: id for network device
498 * Check boot time settings for the base address of device.
499 * The found settings are set for the device to be used
500 * later in the device probing.
501 * Returns 0 if no settings found.
503 unsigned long netdev_boot_base(const char *prefix, int unit)
505 const struct netdev_boot_setup *s = dev_boot_setup;
506 char name[IFNAMSIZ];
507 int i;
509 sprintf(name, "%s%d", prefix, unit);
512 * If device already registered then return base of 1
513 * to indicate not to probe for this interface
515 if (__dev_get_by_name(&init_net, name))
516 return 1;
518 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
519 if (!strcmp(name, s[i].name))
520 return s[i].map.base_addr;
521 return 0;
525 * Saves at boot time configured settings for any netdevice.
527 int __init netdev_boot_setup(char *str)
529 int ints[5];
530 struct ifmap map;
532 str = get_options(str, ARRAY_SIZE(ints), ints);
533 if (!str || !*str)
534 return 0;
536 /* Save settings */
537 memset(&map, 0, sizeof(map));
538 if (ints[0] > 0)
539 map.irq = ints[1];
540 if (ints[0] > 1)
541 map.base_addr = ints[2];
542 if (ints[0] > 2)
543 map.mem_start = ints[3];
544 if (ints[0] > 3)
545 map.mem_end = ints[4];
547 /* Add new entry to the list */
548 return netdev_boot_setup_add(str, &map);
551 __setup("netdev=", netdev_boot_setup);
553 /*******************************************************************************
555 Device Interface Subroutines
557 *******************************************************************************/
560 * __dev_get_by_name - find a device by its name
561 * @net: the applicable net namespace
562 * @name: name to find
564 * Find an interface by name. Must be called under RTNL semaphore
565 * or @dev_base_lock. If the name is found a pointer to the device
566 * is returned. If the name is not found then %NULL is returned. The
567 * reference counters are not incremented so the caller must be
568 * careful with locks.
571 struct net_device *__dev_get_by_name(struct net *net, const char *name)
573 struct hlist_node *p;
575 hlist_for_each(p, dev_name_hash(net, name)) {
576 struct net_device *dev
577 = hlist_entry(p, struct net_device, name_hlist);
578 if (!strncmp(dev->name, name, IFNAMSIZ))
579 return dev;
581 return NULL;
585 * dev_get_by_name - find a device by its name
586 * @net: the applicable net namespace
587 * @name: name to find
589 * Find an interface by name. This can be called from any
590 * context and does its own locking. The returned handle has
591 * the usage count incremented and the caller must use dev_put() to
592 * release it when it is no longer needed. %NULL is returned if no
593 * matching device is found.
596 struct net_device *dev_get_by_name(struct net *net, const char *name)
598 struct net_device *dev;
600 read_lock(&dev_base_lock);
601 dev = __dev_get_by_name(net, name);
602 if (dev)
603 dev_hold(dev);
604 read_unlock(&dev_base_lock);
605 return dev;
609 * __dev_get_by_index - find a device by its ifindex
610 * @net: the applicable net namespace
611 * @ifindex: index of device
613 * Search for an interface by index. Returns %NULL if the device
614 * is not found or a pointer to the device. The device has not
615 * had its reference counter increased so the caller must be careful
616 * about locking. The caller must hold either the RTNL semaphore
617 * or @dev_base_lock.
620 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
622 struct hlist_node *p;
624 hlist_for_each(p, dev_index_hash(net, ifindex)) {
625 struct net_device *dev
626 = hlist_entry(p, struct net_device, index_hlist);
627 if (dev->ifindex == ifindex)
628 return dev;
630 return NULL;
635 * dev_get_by_index - find a device by its ifindex
636 * @net: the applicable net namespace
637 * @ifindex: index of device
639 * Search for an interface by index. Returns NULL if the device
640 * is not found or a pointer to the device. The device returned has
641 * had a reference added and the pointer is safe until the user calls
642 * dev_put to indicate they have finished with it.
645 struct net_device *dev_get_by_index(struct net *net, int ifindex)
647 struct net_device *dev;
649 read_lock(&dev_base_lock);
650 dev = __dev_get_by_index(net, ifindex);
651 if (dev)
652 dev_hold(dev);
653 read_unlock(&dev_base_lock);
654 return dev;
658 * dev_getbyhwaddr - find a device by its hardware address
659 * @net: the applicable net namespace
660 * @type: media type of device
661 * @ha: hardware address
663 * Search for an interface by MAC address. Returns NULL if the device
664 * is not found or a pointer to the device. The caller must hold the
665 * rtnl semaphore. The returned device has not had its ref count increased
666 * and the caller must therefore be careful about locking
668 * BUGS:
669 * If the API was consistent this would be __dev_get_by_hwaddr
672 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
674 struct net_device *dev;
676 ASSERT_RTNL();
678 for_each_netdev(net, dev)
679 if (dev->type == type &&
680 !memcmp(dev->dev_addr, ha, dev->addr_len))
681 return dev;
683 return NULL;
686 EXPORT_SYMBOL(dev_getbyhwaddr);
688 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
690 struct net_device *dev;
692 ASSERT_RTNL();
693 for_each_netdev(net, dev)
694 if (dev->type == type)
695 return dev;
697 return NULL;
700 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
702 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
704 struct net_device *dev;
706 rtnl_lock();
707 dev = __dev_getfirstbyhwtype(net, type);
708 if (dev)
709 dev_hold(dev);
710 rtnl_unlock();
711 return dev;
714 EXPORT_SYMBOL(dev_getfirstbyhwtype);
717 * dev_get_by_flags - find any device with given flags
718 * @net: the applicable net namespace
719 * @if_flags: IFF_* values
720 * @mask: bitmask of bits in if_flags to check
722 * Search for any interface with the given flags. Returns NULL if a device
723 * is not found or a pointer to the device. The device returned has
724 * had a reference added and the pointer is safe until the user calls
725 * dev_put to indicate they have finished with it.
728 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
730 struct net_device *dev, *ret;
732 ret = NULL;
733 read_lock(&dev_base_lock);
734 for_each_netdev(net, dev) {
735 if (((dev->flags ^ if_flags) & mask) == 0) {
736 dev_hold(dev);
737 ret = dev;
738 break;
741 read_unlock(&dev_base_lock);
742 return ret;
746 * dev_valid_name - check if name is okay for network device
747 * @name: name string
749 * Network device names need to be valid file names to
750 * to allow sysfs to work. We also disallow any kind of
751 * whitespace.
753 int dev_valid_name(const char *name)
755 if (*name == '\0')
756 return 0;
757 if (strlen(name) >= IFNAMSIZ)
758 return 0;
759 if (!strcmp(name, ".") || !strcmp(name, ".."))
760 return 0;
762 while (*name) {
763 if (*name == '/' || isspace(*name))
764 return 0;
765 name++;
767 return 1;
771 * __dev_alloc_name - allocate a name for a device
772 * @net: network namespace to allocate the device name in
773 * @name: name format string
774 * @buf: scratch buffer and result name string
776 * Passed a format string - eg "lt%d" it will try and find a suitable
777 * id. It scans list of devices to build up a free map, then chooses
778 * the first empty slot. The caller must hold the dev_base or rtnl lock
779 * while allocating the name and adding the device in order to avoid
780 * duplicates.
781 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
782 * Returns the number of the unit assigned or a negative errno code.
785 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
787 int i = 0;
788 const char *p;
789 const int max_netdevices = 8*PAGE_SIZE;
790 unsigned long *inuse;
791 struct net_device *d;
793 p = strnchr(name, IFNAMSIZ-1, '%');
794 if (p) {
796 * Verify the string as this thing may have come from
797 * the user. There must be either one "%d" and no other "%"
798 * characters.
800 if (p[1] != 'd' || strchr(p + 2, '%'))
801 return -EINVAL;
803 /* Use one page as a bit array of possible slots */
804 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
805 if (!inuse)
806 return -ENOMEM;
808 for_each_netdev(net, d) {
809 if (!sscanf(d->name, name, &i))
810 continue;
811 if (i < 0 || i >= max_netdevices)
812 continue;
814 /* avoid cases where sscanf is not exact inverse of printf */
815 snprintf(buf, IFNAMSIZ, name, i);
816 if (!strncmp(buf, d->name, IFNAMSIZ))
817 set_bit(i, inuse);
820 i = find_first_zero_bit(inuse, max_netdevices);
821 free_page((unsigned long) inuse);
824 snprintf(buf, IFNAMSIZ, name, i);
825 if (!__dev_get_by_name(net, buf))
826 return i;
828 /* It is possible to run out of possible slots
829 * when the name is long and there isn't enough space left
830 * for the digits, or if all bits are used.
832 return -ENFILE;
836 * dev_alloc_name - allocate a name for a device
837 * @dev: device
838 * @name: name format string
840 * Passed a format string - eg "lt%d" it will try and find a suitable
841 * id. It scans list of devices to build up a free map, then chooses
842 * the first empty slot. The caller must hold the dev_base or rtnl lock
843 * while allocating the name and adding the device in order to avoid
844 * duplicates.
845 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
846 * Returns the number of the unit assigned or a negative errno code.
849 int dev_alloc_name(struct net_device *dev, const char *name)
851 char buf[IFNAMSIZ];
852 struct net *net;
853 int ret;
855 BUG_ON(!dev->nd_net);
856 net = dev->nd_net;
857 ret = __dev_alloc_name(net, name, buf);
858 if (ret >= 0)
859 strlcpy(dev->name, buf, IFNAMSIZ);
860 return ret;
865 * dev_change_name - change name of a device
866 * @dev: device
867 * @newname: name (or format string) must be at least IFNAMSIZ
869 * Change name of a device, can pass format strings "eth%d".
870 * for wildcarding.
872 int dev_change_name(struct net_device *dev, char *newname)
874 char oldname[IFNAMSIZ];
875 int err = 0;
876 int ret;
877 struct net *net;
879 ASSERT_RTNL();
880 BUG_ON(!dev->nd_net);
882 net = dev->nd_net;
883 if (dev->flags & IFF_UP)
884 return -EBUSY;
886 if (!dev_valid_name(newname))
887 return -EINVAL;
889 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
890 return 0;
892 memcpy(oldname, dev->name, IFNAMSIZ);
894 if (strchr(newname, '%')) {
895 err = dev_alloc_name(dev, newname);
896 if (err < 0)
897 return err;
898 strcpy(newname, dev->name);
900 else if (__dev_get_by_name(net, newname))
901 return -EEXIST;
902 else
903 strlcpy(dev->name, newname, IFNAMSIZ);
905 rollback:
906 device_rename(&dev->dev, dev->name);
908 write_lock_bh(&dev_base_lock);
909 hlist_del(&dev->name_hlist);
910 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
911 write_unlock_bh(&dev_base_lock);
913 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
914 ret = notifier_to_errno(ret);
916 if (ret) {
917 if (err) {
918 printk(KERN_ERR
919 "%s: name change rollback failed: %d.\n",
920 dev->name, ret);
921 } else {
922 err = ret;
923 memcpy(dev->name, oldname, IFNAMSIZ);
924 goto rollback;
928 return err;
932 * netdev_features_change - device changes features
933 * @dev: device to cause notification
935 * Called to indicate a device has changed features.
937 void netdev_features_change(struct net_device *dev)
939 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
941 EXPORT_SYMBOL(netdev_features_change);
944 * netdev_state_change - device changes state
945 * @dev: device to cause notification
947 * Called to indicate a device has changed state. This function calls
948 * the notifier chains for netdev_chain and sends a NEWLINK message
949 * to the routing socket.
951 void netdev_state_change(struct net_device *dev)
953 if (dev->flags & IFF_UP) {
954 call_netdevice_notifiers(NETDEV_CHANGE, dev);
955 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
960 * dev_load - load a network module
961 * @net: the applicable net namespace
962 * @name: name of interface
964 * If a network interface is not present and the process has suitable
965 * privileges this function loads the module. If module loading is not
966 * available in this kernel then it becomes a nop.
969 void dev_load(struct net *net, const char *name)
971 struct net_device *dev;
973 read_lock(&dev_base_lock);
974 dev = __dev_get_by_name(net, name);
975 read_unlock(&dev_base_lock);
977 if (!dev && capable(CAP_SYS_MODULE))
978 request_module("%s", name);
982 * dev_open - prepare an interface for use.
983 * @dev: device to open
985 * Takes a device from down to up state. The device's private open
986 * function is invoked and then the multicast lists are loaded. Finally
987 * the device is moved into the up state and a %NETDEV_UP message is
988 * sent to the netdev notifier chain.
990 * Calling this function on an active interface is a nop. On a failure
991 * a negative errno code is returned.
993 int dev_open(struct net_device *dev)
995 int ret = 0;
998 * Is it already up?
1001 if (dev->flags & IFF_UP)
1002 return 0;
1005 * Is it even present?
1007 if (!netif_device_present(dev))
1008 return -ENODEV;
1011 * Call device private open method
1013 set_bit(__LINK_STATE_START, &dev->state);
1015 if (dev->validate_addr)
1016 ret = dev->validate_addr(dev);
1018 if (!ret && dev->open)
1019 ret = dev->open(dev);
1022 * If it went open OK then:
1025 if (ret)
1026 clear_bit(__LINK_STATE_START, &dev->state);
1027 else {
1029 * Set the flags.
1031 dev->flags |= IFF_UP;
1034 * Initialize multicasting status
1036 dev_set_rx_mode(dev);
1039 * Wakeup transmit queue engine
1041 dev_activate(dev);
1044 * ... and announce new interface.
1046 call_netdevice_notifiers(NETDEV_UP, dev);
1049 return ret;
1053 * dev_close - shutdown an interface.
1054 * @dev: device to shutdown
1056 * This function moves an active device into down state. A
1057 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1058 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1059 * chain.
1061 int dev_close(struct net_device *dev)
1063 might_sleep();
1065 if (!(dev->flags & IFF_UP))
1066 return 0;
1069 * Tell people we are going down, so that they can
1070 * prepare to death, when device is still operating.
1072 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1074 clear_bit(__LINK_STATE_START, &dev->state);
1076 /* Synchronize to scheduled poll. We cannot touch poll list,
1077 * it can be even on different cpu. So just clear netif_running().
1079 * dev->stop() will invoke napi_disable() on all of it's
1080 * napi_struct instances on this device.
1082 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1084 dev_deactivate(dev);
1087 * Call the device specific close. This cannot fail.
1088 * Only if device is UP
1090 * We allow it to be called even after a DETACH hot-plug
1091 * event.
1093 if (dev->stop)
1094 dev->stop(dev);
1097 * Device is now down.
1100 dev->flags &= ~IFF_UP;
1103 * Tell people we are down
1105 call_netdevice_notifiers(NETDEV_DOWN, dev);
1107 return 0;
1111 static int dev_boot_phase = 1;
1114 * Device change register/unregister. These are not inline or static
1115 * as we export them to the world.
1119 * register_netdevice_notifier - register a network notifier block
1120 * @nb: notifier
1122 * Register a notifier to be called when network device events occur.
1123 * The notifier passed is linked into the kernel structures and must
1124 * not be reused until it has been unregistered. A negative errno code
1125 * is returned on a failure.
1127 * When registered all registration and up events are replayed
1128 * to the new notifier to allow device to have a race free
1129 * view of the network device list.
1132 int register_netdevice_notifier(struct notifier_block *nb)
1134 struct net_device *dev;
1135 struct net_device *last;
1136 struct net *net;
1137 int err;
1139 rtnl_lock();
1140 err = raw_notifier_chain_register(&netdev_chain, nb);
1141 if (err)
1142 goto unlock;
1143 if (dev_boot_phase)
1144 goto unlock;
1145 for_each_net(net) {
1146 for_each_netdev(net, dev) {
1147 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1148 err = notifier_to_errno(err);
1149 if (err)
1150 goto rollback;
1152 if (!(dev->flags & IFF_UP))
1153 continue;
1155 nb->notifier_call(nb, NETDEV_UP, dev);
1159 unlock:
1160 rtnl_unlock();
1161 return err;
1163 rollback:
1164 last = dev;
1165 for_each_net(net) {
1166 for_each_netdev(net, dev) {
1167 if (dev == last)
1168 break;
1170 if (dev->flags & IFF_UP) {
1171 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1172 nb->notifier_call(nb, NETDEV_DOWN, dev);
1174 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1178 raw_notifier_chain_unregister(&netdev_chain, nb);
1179 goto unlock;
1183 * unregister_netdevice_notifier - unregister a network notifier block
1184 * @nb: notifier
1186 * Unregister a notifier previously registered by
1187 * register_netdevice_notifier(). The notifier is unlinked into the
1188 * kernel structures and may then be reused. A negative errno code
1189 * is returned on a failure.
1192 int unregister_netdevice_notifier(struct notifier_block *nb)
1194 int err;
1196 rtnl_lock();
1197 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1198 rtnl_unlock();
1199 return err;
1203 * call_netdevice_notifiers - call all network notifier blocks
1204 * @val: value passed unmodified to notifier function
1205 * @dev: net_device pointer passed unmodified to notifier function
1207 * Call all network notifier blocks. Parameters and return value
1208 * are as for raw_notifier_call_chain().
1211 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1213 return raw_notifier_call_chain(&netdev_chain, val, dev);
1216 /* When > 0 there are consumers of rx skb time stamps */
1217 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1219 void net_enable_timestamp(void)
1221 atomic_inc(&netstamp_needed);
1224 void net_disable_timestamp(void)
1226 atomic_dec(&netstamp_needed);
1229 static inline void net_timestamp(struct sk_buff *skb)
1231 if (atomic_read(&netstamp_needed))
1232 __net_timestamp(skb);
1233 else
1234 skb->tstamp.tv64 = 0;
1238 * Support routine. Sends outgoing frames to any network
1239 * taps currently in use.
1242 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1244 struct packet_type *ptype;
1246 net_timestamp(skb);
1248 rcu_read_lock();
1249 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1250 /* Never send packets back to the socket
1251 * they originated from - MvS (miquels@drinkel.ow.org)
1253 if ((ptype->dev == dev || !ptype->dev) &&
1254 (ptype->af_packet_priv == NULL ||
1255 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1256 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1257 if (!skb2)
1258 break;
1260 /* skb->nh should be correctly
1261 set by sender, so that the second statement is
1262 just protection against buggy protocols.
1264 skb_reset_mac_header(skb2);
1266 if (skb_network_header(skb2) < skb2->data ||
1267 skb2->network_header > skb2->tail) {
1268 if (net_ratelimit())
1269 printk(KERN_CRIT "protocol %04x is "
1270 "buggy, dev %s\n",
1271 skb2->protocol, dev->name);
1272 skb_reset_network_header(skb2);
1275 skb2->transport_header = skb2->network_header;
1276 skb2->pkt_type = PACKET_OUTGOING;
1277 ptype->func(skb2, skb->dev, ptype, skb->dev);
1280 rcu_read_unlock();
1284 void __netif_schedule(struct net_device *dev)
1286 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1287 unsigned long flags;
1288 struct softnet_data *sd;
1290 local_irq_save(flags);
1291 sd = &__get_cpu_var(softnet_data);
1292 dev->next_sched = sd->output_queue;
1293 sd->output_queue = dev;
1294 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1295 local_irq_restore(flags);
1298 EXPORT_SYMBOL(__netif_schedule);
1300 void dev_kfree_skb_irq(struct sk_buff *skb)
1302 if (atomic_dec_and_test(&skb->users)) {
1303 struct softnet_data *sd;
1304 unsigned long flags;
1306 local_irq_save(flags);
1307 sd = &__get_cpu_var(softnet_data);
1308 skb->next = sd->completion_queue;
1309 sd->completion_queue = skb;
1310 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1311 local_irq_restore(flags);
1314 EXPORT_SYMBOL(dev_kfree_skb_irq);
1316 void dev_kfree_skb_any(struct sk_buff *skb)
1318 if (in_irq() || irqs_disabled())
1319 dev_kfree_skb_irq(skb);
1320 else
1321 dev_kfree_skb(skb);
1323 EXPORT_SYMBOL(dev_kfree_skb_any);
1327 * netif_device_detach - mark device as removed
1328 * @dev: network device
1330 * Mark device as removed from system and therefore no longer available.
1332 void netif_device_detach(struct net_device *dev)
1334 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1335 netif_running(dev)) {
1336 netif_stop_queue(dev);
1339 EXPORT_SYMBOL(netif_device_detach);
1342 * netif_device_attach - mark device as attached
1343 * @dev: network device
1345 * Mark device as attached from system and restart if needed.
1347 void netif_device_attach(struct net_device *dev)
1349 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1350 netif_running(dev)) {
1351 netif_wake_queue(dev);
1352 __netdev_watchdog_up(dev);
1355 EXPORT_SYMBOL(netif_device_attach);
1359 * Invalidate hardware checksum when packet is to be mangled, and
1360 * complete checksum manually on outgoing path.
1362 int skb_checksum_help(struct sk_buff *skb)
1364 __wsum csum;
1365 int ret = 0, offset;
1367 if (skb->ip_summed == CHECKSUM_COMPLETE)
1368 goto out_set_summed;
1370 if (unlikely(skb_shinfo(skb)->gso_size)) {
1371 /* Let GSO fix up the checksum. */
1372 goto out_set_summed;
1375 offset = skb->csum_start - skb_headroom(skb);
1376 BUG_ON(offset >= skb_headlen(skb));
1377 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1379 offset += skb->csum_offset;
1380 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1382 if (skb_cloned(skb) &&
1383 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1384 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1385 if (ret)
1386 goto out;
1389 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1390 out_set_summed:
1391 skb->ip_summed = CHECKSUM_NONE;
1392 out:
1393 return ret;
1397 * skb_gso_segment - Perform segmentation on skb.
1398 * @skb: buffer to segment
1399 * @features: features for the output path (see dev->features)
1401 * This function segments the given skb and returns a list of segments.
1403 * It may return NULL if the skb requires no segmentation. This is
1404 * only possible when GSO is used for verifying header integrity.
1406 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1408 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1409 struct packet_type *ptype;
1410 __be16 type = skb->protocol;
1411 int err;
1413 BUG_ON(skb_shinfo(skb)->frag_list);
1415 skb_reset_mac_header(skb);
1416 skb->mac_len = skb->network_header - skb->mac_header;
1417 __skb_pull(skb, skb->mac_len);
1419 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1420 if (skb_header_cloned(skb) &&
1421 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1422 return ERR_PTR(err);
1425 rcu_read_lock();
1426 list_for_each_entry_rcu(ptype,
1427 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1428 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1429 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1430 err = ptype->gso_send_check(skb);
1431 segs = ERR_PTR(err);
1432 if (err || skb_gso_ok(skb, features))
1433 break;
1434 __skb_push(skb, (skb->data -
1435 skb_network_header(skb)));
1437 segs = ptype->gso_segment(skb, features);
1438 break;
1441 rcu_read_unlock();
1443 __skb_push(skb, skb->data - skb_mac_header(skb));
1445 return segs;
1448 EXPORT_SYMBOL(skb_gso_segment);
1450 /* Take action when hardware reception checksum errors are detected. */
1451 #ifdef CONFIG_BUG
1452 void netdev_rx_csum_fault(struct net_device *dev)
1454 if (net_ratelimit()) {
1455 printk(KERN_ERR "%s: hw csum failure.\n",
1456 dev ? dev->name : "<unknown>");
1457 dump_stack();
1460 EXPORT_SYMBOL(netdev_rx_csum_fault);
1461 #endif
1463 /* Actually, we should eliminate this check as soon as we know, that:
1464 * 1. IOMMU is present and allows to map all the memory.
1465 * 2. No high memory really exists on this machine.
1468 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1470 #ifdef CONFIG_HIGHMEM
1471 int i;
1473 if (dev->features & NETIF_F_HIGHDMA)
1474 return 0;
1476 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1477 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1478 return 1;
1480 #endif
1481 return 0;
1484 struct dev_gso_cb {
1485 void (*destructor)(struct sk_buff *skb);
1488 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1490 static void dev_gso_skb_destructor(struct sk_buff *skb)
1492 struct dev_gso_cb *cb;
1494 do {
1495 struct sk_buff *nskb = skb->next;
1497 skb->next = nskb->next;
1498 nskb->next = NULL;
1499 kfree_skb(nskb);
1500 } while (skb->next);
1502 cb = DEV_GSO_CB(skb);
1503 if (cb->destructor)
1504 cb->destructor(skb);
1508 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1509 * @skb: buffer to segment
1511 * This function segments the given skb and stores the list of segments
1512 * in skb->next.
1514 static int dev_gso_segment(struct sk_buff *skb)
1516 struct net_device *dev = skb->dev;
1517 struct sk_buff *segs;
1518 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1519 NETIF_F_SG : 0);
1521 segs = skb_gso_segment(skb, features);
1523 /* Verifying header integrity only. */
1524 if (!segs)
1525 return 0;
1527 if (unlikely(IS_ERR(segs)))
1528 return PTR_ERR(segs);
1530 skb->next = segs;
1531 DEV_GSO_CB(skb)->destructor = skb->destructor;
1532 skb->destructor = dev_gso_skb_destructor;
1534 return 0;
1537 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1539 if (likely(!skb->next)) {
1540 if (!list_empty(&ptype_all))
1541 dev_queue_xmit_nit(skb, dev);
1543 if (netif_needs_gso(dev, skb)) {
1544 if (unlikely(dev_gso_segment(skb)))
1545 goto out_kfree_skb;
1546 if (skb->next)
1547 goto gso;
1550 return dev->hard_start_xmit(skb, dev);
1553 gso:
1554 do {
1555 struct sk_buff *nskb = skb->next;
1556 int rc;
1558 skb->next = nskb->next;
1559 nskb->next = NULL;
1560 rc = dev->hard_start_xmit(nskb, dev);
1561 if (unlikely(rc)) {
1562 nskb->next = skb->next;
1563 skb->next = nskb;
1564 return rc;
1566 if (unlikely((netif_queue_stopped(dev) ||
1567 netif_subqueue_stopped(dev, skb)) &&
1568 skb->next))
1569 return NETDEV_TX_BUSY;
1570 } while (skb->next);
1572 skb->destructor = DEV_GSO_CB(skb)->destructor;
1574 out_kfree_skb:
1575 kfree_skb(skb);
1576 return 0;
1580 * dev_queue_xmit - transmit a buffer
1581 * @skb: buffer to transmit
1583 * Queue a buffer for transmission to a network device. The caller must
1584 * have set the device and priority and built the buffer before calling
1585 * this function. The function can be called from an interrupt.
1587 * A negative errno code is returned on a failure. A success does not
1588 * guarantee the frame will be transmitted as it may be dropped due
1589 * to congestion or traffic shaping.
1591 * -----------------------------------------------------------------------------------
1592 * I notice this method can also return errors from the queue disciplines,
1593 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1594 * be positive.
1596 * Regardless of the return value, the skb is consumed, so it is currently
1597 * difficult to retry a send to this method. (You can bump the ref count
1598 * before sending to hold a reference for retry if you are careful.)
1600 * When calling this method, interrupts MUST be enabled. This is because
1601 * the BH enable code must have IRQs enabled so that it will not deadlock.
1602 * --BLG
1605 int dev_queue_xmit(struct sk_buff *skb)
1607 struct net_device *dev = skb->dev;
1608 struct Qdisc *q;
1609 int rc = -ENOMEM;
1611 /* GSO will handle the following emulations directly. */
1612 if (netif_needs_gso(dev, skb))
1613 goto gso;
1615 if (skb_shinfo(skb)->frag_list &&
1616 !(dev->features & NETIF_F_FRAGLIST) &&
1617 __skb_linearize(skb))
1618 goto out_kfree_skb;
1620 /* Fragmented skb is linearized if device does not support SG,
1621 * or if at least one of fragments is in highmem and device
1622 * does not support DMA from it.
1624 if (skb_shinfo(skb)->nr_frags &&
1625 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1626 __skb_linearize(skb))
1627 goto out_kfree_skb;
1629 /* If packet is not checksummed and device does not support
1630 * checksumming for this protocol, complete checksumming here.
1632 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1633 skb_set_transport_header(skb, skb->csum_start -
1634 skb_headroom(skb));
1636 if (!(dev->features & NETIF_F_GEN_CSUM) &&
1637 !((dev->features & NETIF_F_IP_CSUM) &&
1638 skb->protocol == htons(ETH_P_IP)) &&
1639 !((dev->features & NETIF_F_IPV6_CSUM) &&
1640 skb->protocol == htons(ETH_P_IPV6)))
1641 if (skb_checksum_help(skb))
1642 goto out_kfree_skb;
1645 gso:
1646 spin_lock_prefetch(&dev->queue_lock);
1648 /* Disable soft irqs for various locks below. Also
1649 * stops preemption for RCU.
1651 rcu_read_lock_bh();
1653 /* Updates of qdisc are serialized by queue_lock.
1654 * The struct Qdisc which is pointed to by qdisc is now a
1655 * rcu structure - it may be accessed without acquiring
1656 * a lock (but the structure may be stale.) The freeing of the
1657 * qdisc will be deferred until it's known that there are no
1658 * more references to it.
1660 * If the qdisc has an enqueue function, we still need to
1661 * hold the queue_lock before calling it, since queue_lock
1662 * also serializes access to the device queue.
1665 q = rcu_dereference(dev->qdisc);
1666 #ifdef CONFIG_NET_CLS_ACT
1667 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1668 #endif
1669 if (q->enqueue) {
1670 /* Grab device queue */
1671 spin_lock(&dev->queue_lock);
1672 q = dev->qdisc;
1673 if (q->enqueue) {
1674 /* reset queue_mapping to zero */
1675 skb_set_queue_mapping(skb, 0);
1676 rc = q->enqueue(skb, q);
1677 qdisc_run(dev);
1678 spin_unlock(&dev->queue_lock);
1680 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1681 goto out;
1683 spin_unlock(&dev->queue_lock);
1686 /* The device has no queue. Common case for software devices:
1687 loopback, all the sorts of tunnels...
1689 Really, it is unlikely that netif_tx_lock protection is necessary
1690 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1691 counters.)
1692 However, it is possible, that they rely on protection
1693 made by us here.
1695 Check this and shot the lock. It is not prone from deadlocks.
1696 Either shot noqueue qdisc, it is even simpler 8)
1698 if (dev->flags & IFF_UP) {
1699 int cpu = smp_processor_id(); /* ok because BHs are off */
1701 if (dev->xmit_lock_owner != cpu) {
1703 HARD_TX_LOCK(dev, cpu);
1705 if (!netif_queue_stopped(dev) &&
1706 !netif_subqueue_stopped(dev, skb)) {
1707 rc = 0;
1708 if (!dev_hard_start_xmit(skb, dev)) {
1709 HARD_TX_UNLOCK(dev);
1710 goto out;
1713 HARD_TX_UNLOCK(dev);
1714 if (net_ratelimit())
1715 printk(KERN_CRIT "Virtual device %s asks to "
1716 "queue packet!\n", dev->name);
1717 } else {
1718 /* Recursion is detected! It is possible,
1719 * unfortunately */
1720 if (net_ratelimit())
1721 printk(KERN_CRIT "Dead loop on virtual device "
1722 "%s, fix it urgently!\n", dev->name);
1726 rc = -ENETDOWN;
1727 rcu_read_unlock_bh();
1729 out_kfree_skb:
1730 kfree_skb(skb);
1731 return rc;
1732 out:
1733 rcu_read_unlock_bh();
1734 return rc;
1738 /*=======================================================================
1739 Receiver routines
1740 =======================================================================*/
1742 int netdev_max_backlog __read_mostly = 1000;
1743 int netdev_budget __read_mostly = 300;
1744 int weight_p __read_mostly = 64; /* old backlog weight */
1746 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1750 * netif_rx - post buffer to the network code
1751 * @skb: buffer to post
1753 * This function receives a packet from a device driver and queues it for
1754 * the upper (protocol) levels to process. It always succeeds. The buffer
1755 * may be dropped during processing for congestion control or by the
1756 * protocol layers.
1758 * return values:
1759 * NET_RX_SUCCESS (no congestion)
1760 * NET_RX_DROP (packet was dropped)
1764 int netif_rx(struct sk_buff *skb)
1766 struct softnet_data *queue;
1767 unsigned long flags;
1769 /* if netpoll wants it, pretend we never saw it */
1770 if (netpoll_rx(skb))
1771 return NET_RX_DROP;
1773 if (!skb->tstamp.tv64)
1774 net_timestamp(skb);
1777 * The code is rearranged so that the path is the most
1778 * short when CPU is congested, but is still operating.
1780 local_irq_save(flags);
1781 queue = &__get_cpu_var(softnet_data);
1783 __get_cpu_var(netdev_rx_stat).total++;
1784 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1785 if (queue->input_pkt_queue.qlen) {
1786 enqueue:
1787 dev_hold(skb->dev);
1788 __skb_queue_tail(&queue->input_pkt_queue, skb);
1789 local_irq_restore(flags);
1790 return NET_RX_SUCCESS;
1793 napi_schedule(&queue->backlog);
1794 goto enqueue;
1797 __get_cpu_var(netdev_rx_stat).dropped++;
1798 local_irq_restore(flags);
1800 kfree_skb(skb);
1801 return NET_RX_DROP;
1804 int netif_rx_ni(struct sk_buff *skb)
1806 int err;
1808 preempt_disable();
1809 err = netif_rx(skb);
1810 if (local_softirq_pending())
1811 do_softirq();
1812 preempt_enable();
1814 return err;
1817 EXPORT_SYMBOL(netif_rx_ni);
1819 static inline struct net_device *skb_bond(struct sk_buff *skb)
1821 struct net_device *dev = skb->dev;
1823 if (dev->master) {
1824 if (skb_bond_should_drop(skb)) {
1825 kfree_skb(skb);
1826 return NULL;
1828 skb->dev = dev->master;
1831 return dev;
1835 static void net_tx_action(struct softirq_action *h)
1837 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1839 if (sd->completion_queue) {
1840 struct sk_buff *clist;
1842 local_irq_disable();
1843 clist = sd->completion_queue;
1844 sd->completion_queue = NULL;
1845 local_irq_enable();
1847 while (clist) {
1848 struct sk_buff *skb = clist;
1849 clist = clist->next;
1851 BUG_TRAP(!atomic_read(&skb->users));
1852 __kfree_skb(skb);
1856 if (sd->output_queue) {
1857 struct net_device *head;
1859 local_irq_disable();
1860 head = sd->output_queue;
1861 sd->output_queue = NULL;
1862 local_irq_enable();
1864 while (head) {
1865 struct net_device *dev = head;
1866 head = head->next_sched;
1868 smp_mb__before_clear_bit();
1869 clear_bit(__LINK_STATE_SCHED, &dev->state);
1871 if (spin_trylock(&dev->queue_lock)) {
1872 qdisc_run(dev);
1873 spin_unlock(&dev->queue_lock);
1874 } else {
1875 netif_schedule(dev);
1881 static inline int deliver_skb(struct sk_buff *skb,
1882 struct packet_type *pt_prev,
1883 struct net_device *orig_dev)
1885 atomic_inc(&skb->users);
1886 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1889 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1890 /* These hooks defined here for ATM */
1891 struct net_bridge;
1892 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1893 unsigned char *addr);
1894 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
1897 * If bridge module is loaded call bridging hook.
1898 * returns NULL if packet was consumed.
1900 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
1901 struct sk_buff *skb) __read_mostly;
1902 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1903 struct packet_type **pt_prev, int *ret,
1904 struct net_device *orig_dev)
1906 struct net_bridge_port *port;
1908 if (skb->pkt_type == PACKET_LOOPBACK ||
1909 (port = rcu_dereference(skb->dev->br_port)) == NULL)
1910 return skb;
1912 if (*pt_prev) {
1913 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1914 *pt_prev = NULL;
1917 return br_handle_frame_hook(port, skb);
1919 #else
1920 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
1921 #endif
1923 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1924 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1925 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1927 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1928 struct packet_type **pt_prev,
1929 int *ret,
1930 struct net_device *orig_dev)
1932 if (skb->dev->macvlan_port == NULL)
1933 return skb;
1935 if (*pt_prev) {
1936 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1937 *pt_prev = NULL;
1939 return macvlan_handle_frame_hook(skb);
1941 #else
1942 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
1943 #endif
1945 #ifdef CONFIG_NET_CLS_ACT
1946 /* TODO: Maybe we should just force sch_ingress to be compiled in
1947 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1948 * a compare and 2 stores extra right now if we dont have it on
1949 * but have CONFIG_NET_CLS_ACT
1950 * NOTE: This doesnt stop any functionality; if you dont have
1951 * the ingress scheduler, you just cant add policies on ingress.
1954 static int ing_filter(struct sk_buff *skb)
1956 struct Qdisc *q;
1957 struct net_device *dev = skb->dev;
1958 int result = TC_ACT_OK;
1959 u32 ttl = G_TC_RTTL(skb->tc_verd);
1961 if (MAX_RED_LOOP < ttl++) {
1962 printk(KERN_WARNING
1963 "Redir loop detected Dropping packet (%d->%d)\n",
1964 skb->iif, dev->ifindex);
1965 return TC_ACT_SHOT;
1968 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
1969 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1971 spin_lock(&dev->ingress_lock);
1972 if ((q = dev->qdisc_ingress) != NULL)
1973 result = q->enqueue(skb, q);
1974 spin_unlock(&dev->ingress_lock);
1976 return result;
1979 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
1980 struct packet_type **pt_prev,
1981 int *ret, struct net_device *orig_dev)
1983 if (!skb->dev->qdisc_ingress)
1984 goto out;
1986 if (*pt_prev) {
1987 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1988 *pt_prev = NULL;
1989 } else {
1990 /* Huh? Why does turning on AF_PACKET affect this? */
1991 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1994 switch (ing_filter(skb)) {
1995 case TC_ACT_SHOT:
1996 case TC_ACT_STOLEN:
1997 kfree_skb(skb);
1998 return NULL;
2001 out:
2002 skb->tc_verd = 0;
2003 return skb;
2005 #endif
2008 * netif_receive_skb - process receive buffer from network
2009 * @skb: buffer to process
2011 * netif_receive_skb() is the main receive data processing function.
2012 * It always succeeds. The buffer may be dropped during processing
2013 * for congestion control or by the protocol layers.
2015 * This function may only be called from softirq context and interrupts
2016 * should be enabled.
2018 * Return values (usually ignored):
2019 * NET_RX_SUCCESS: no congestion
2020 * NET_RX_DROP: packet was dropped
2022 int netif_receive_skb(struct sk_buff *skb)
2024 struct packet_type *ptype, *pt_prev;
2025 struct net_device *orig_dev;
2026 int ret = NET_RX_DROP;
2027 __be16 type;
2029 /* if we've gotten here through NAPI, check netpoll */
2030 if (netpoll_receive_skb(skb))
2031 return NET_RX_DROP;
2033 if (!skb->tstamp.tv64)
2034 net_timestamp(skb);
2036 if (!skb->iif)
2037 skb->iif = skb->dev->ifindex;
2039 orig_dev = skb_bond(skb);
2041 if (!orig_dev)
2042 return NET_RX_DROP;
2044 __get_cpu_var(netdev_rx_stat).total++;
2046 skb_reset_network_header(skb);
2047 skb_reset_transport_header(skb);
2048 skb->mac_len = skb->network_header - skb->mac_header;
2050 pt_prev = NULL;
2052 rcu_read_lock();
2054 #ifdef CONFIG_NET_CLS_ACT
2055 if (skb->tc_verd & TC_NCLS) {
2056 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2057 goto ncls;
2059 #endif
2061 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2062 if (!ptype->dev || ptype->dev == skb->dev) {
2063 if (pt_prev)
2064 ret = deliver_skb(skb, pt_prev, orig_dev);
2065 pt_prev = ptype;
2069 #ifdef CONFIG_NET_CLS_ACT
2070 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2071 if (!skb)
2072 goto out;
2073 ncls:
2074 #endif
2076 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2077 if (!skb)
2078 goto out;
2079 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2080 if (!skb)
2081 goto out;
2083 type = skb->protocol;
2084 list_for_each_entry_rcu(ptype,
2085 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2086 if (ptype->type == type &&
2087 (!ptype->dev || ptype->dev == skb->dev)) {
2088 if (pt_prev)
2089 ret = deliver_skb(skb, pt_prev, orig_dev);
2090 pt_prev = ptype;
2094 if (pt_prev) {
2095 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2096 } else {
2097 kfree_skb(skb);
2098 /* Jamal, now you will not able to escape explaining
2099 * me how you were going to use this. :-)
2101 ret = NET_RX_DROP;
2104 out:
2105 rcu_read_unlock();
2106 return ret;
2109 static int process_backlog(struct napi_struct *napi, int quota)
2111 int work = 0;
2112 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2113 unsigned long start_time = jiffies;
2115 napi->weight = weight_p;
2116 do {
2117 struct sk_buff *skb;
2118 struct net_device *dev;
2120 local_irq_disable();
2121 skb = __skb_dequeue(&queue->input_pkt_queue);
2122 if (!skb) {
2123 __napi_complete(napi);
2124 local_irq_enable();
2125 break;
2128 local_irq_enable();
2130 dev = skb->dev;
2132 netif_receive_skb(skb);
2134 dev_put(dev);
2135 } while (++work < quota && jiffies == start_time);
2137 return work;
2141 * __napi_schedule - schedule for receive
2142 * @n: entry to schedule
2144 * The entry's receive function will be scheduled to run
2146 void __napi_schedule(struct napi_struct *n)
2148 unsigned long flags;
2150 local_irq_save(flags);
2151 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2152 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2153 local_irq_restore(flags);
2155 EXPORT_SYMBOL(__napi_schedule);
2158 static void net_rx_action(struct softirq_action *h)
2160 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2161 unsigned long start_time = jiffies;
2162 int budget = netdev_budget;
2163 void *have;
2165 local_irq_disable();
2167 while (!list_empty(list)) {
2168 struct napi_struct *n;
2169 int work, weight;
2171 /* If softirq window is exhuasted then punt.
2173 * Note that this is a slight policy change from the
2174 * previous NAPI code, which would allow up to 2
2175 * jiffies to pass before breaking out. The test
2176 * used to be "jiffies - start_time > 1".
2178 if (unlikely(budget <= 0 || jiffies != start_time))
2179 goto softnet_break;
2181 local_irq_enable();
2183 /* Even though interrupts have been re-enabled, this
2184 * access is safe because interrupts can only add new
2185 * entries to the tail of this list, and only ->poll()
2186 * calls can remove this head entry from the list.
2188 n = list_entry(list->next, struct napi_struct, poll_list);
2190 have = netpoll_poll_lock(n);
2192 weight = n->weight;
2194 /* This NAPI_STATE_SCHED test is for avoiding a race
2195 * with netpoll's poll_napi(). Only the entity which
2196 * obtains the lock and sees NAPI_STATE_SCHED set will
2197 * actually make the ->poll() call. Therefore we avoid
2198 * accidently calling ->poll() when NAPI is not scheduled.
2200 work = 0;
2201 if (test_bit(NAPI_STATE_SCHED, &n->state))
2202 work = n->poll(n, weight);
2204 WARN_ON_ONCE(work > weight);
2206 budget -= work;
2208 local_irq_disable();
2210 /* Drivers must not modify the NAPI state if they
2211 * consume the entire weight. In such cases this code
2212 * still "owns" the NAPI instance and therefore can
2213 * move the instance around on the list at-will.
2215 if (unlikely(work == weight)) {
2216 if (unlikely(napi_disable_pending(n)))
2217 __napi_complete(n);
2218 else
2219 list_move_tail(&n->poll_list, list);
2222 netpoll_poll_unlock(have);
2224 out:
2225 local_irq_enable();
2227 #ifdef CONFIG_NET_DMA
2229 * There may not be any more sk_buffs coming right now, so push
2230 * any pending DMA copies to hardware
2232 if (!cpus_empty(net_dma.channel_mask)) {
2233 int chan_idx;
2234 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2235 struct dma_chan *chan = net_dma.channels[chan_idx];
2236 if (chan)
2237 dma_async_memcpy_issue_pending(chan);
2240 #endif
2242 return;
2244 softnet_break:
2245 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2246 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2247 goto out;
2250 static gifconf_func_t * gifconf_list [NPROTO];
2253 * register_gifconf - register a SIOCGIF handler
2254 * @family: Address family
2255 * @gifconf: Function handler
2257 * Register protocol dependent address dumping routines. The handler
2258 * that is passed must not be freed or reused until it has been replaced
2259 * by another handler.
2261 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2263 if (family >= NPROTO)
2264 return -EINVAL;
2265 gifconf_list[family] = gifconf;
2266 return 0;
2271 * Map an interface index to its name (SIOCGIFNAME)
2275 * We need this ioctl for efficient implementation of the
2276 * if_indextoname() function required by the IPv6 API. Without
2277 * it, we would have to search all the interfaces to find a
2278 * match. --pb
2281 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2283 struct net_device *dev;
2284 struct ifreq ifr;
2287 * Fetch the caller's info block.
2290 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2291 return -EFAULT;
2293 read_lock(&dev_base_lock);
2294 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2295 if (!dev) {
2296 read_unlock(&dev_base_lock);
2297 return -ENODEV;
2300 strcpy(ifr.ifr_name, dev->name);
2301 read_unlock(&dev_base_lock);
2303 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2304 return -EFAULT;
2305 return 0;
2309 * Perform a SIOCGIFCONF call. This structure will change
2310 * size eventually, and there is nothing I can do about it.
2311 * Thus we will need a 'compatibility mode'.
2314 static int dev_ifconf(struct net *net, char __user *arg)
2316 struct ifconf ifc;
2317 struct net_device *dev;
2318 char __user *pos;
2319 int len;
2320 int total;
2321 int i;
2324 * Fetch the caller's info block.
2327 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2328 return -EFAULT;
2330 pos = ifc.ifc_buf;
2331 len = ifc.ifc_len;
2334 * Loop over the interfaces, and write an info block for each.
2337 total = 0;
2338 for_each_netdev(net, dev) {
2339 for (i = 0; i < NPROTO; i++) {
2340 if (gifconf_list[i]) {
2341 int done;
2342 if (!pos)
2343 done = gifconf_list[i](dev, NULL, 0);
2344 else
2345 done = gifconf_list[i](dev, pos + total,
2346 len - total);
2347 if (done < 0)
2348 return -EFAULT;
2349 total += done;
2355 * All done. Write the updated control block back to the caller.
2357 ifc.ifc_len = total;
2360 * Both BSD and Solaris return 0 here, so we do too.
2362 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2365 #ifdef CONFIG_PROC_FS
2367 * This is invoked by the /proc filesystem handler to display a device
2368 * in detail.
2370 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2371 __acquires(dev_base_lock)
2373 struct net *net = seq_file_net(seq);
2374 loff_t off;
2375 struct net_device *dev;
2377 read_lock(&dev_base_lock);
2378 if (!*pos)
2379 return SEQ_START_TOKEN;
2381 off = 1;
2382 for_each_netdev(net, dev)
2383 if (off++ == *pos)
2384 return dev;
2386 return NULL;
2389 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2391 struct net *net = seq_file_net(seq);
2392 ++*pos;
2393 return v == SEQ_START_TOKEN ?
2394 first_net_device(net) : next_net_device((struct net_device *)v);
2397 void dev_seq_stop(struct seq_file *seq, void *v)
2398 __releases(dev_base_lock)
2400 read_unlock(&dev_base_lock);
2403 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2405 struct net_device_stats *stats = dev->get_stats(dev);
2407 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2408 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2409 dev->name, stats->rx_bytes, stats->rx_packets,
2410 stats->rx_errors,
2411 stats->rx_dropped + stats->rx_missed_errors,
2412 stats->rx_fifo_errors,
2413 stats->rx_length_errors + stats->rx_over_errors +
2414 stats->rx_crc_errors + stats->rx_frame_errors,
2415 stats->rx_compressed, stats->multicast,
2416 stats->tx_bytes, stats->tx_packets,
2417 stats->tx_errors, stats->tx_dropped,
2418 stats->tx_fifo_errors, stats->collisions,
2419 stats->tx_carrier_errors +
2420 stats->tx_aborted_errors +
2421 stats->tx_window_errors +
2422 stats->tx_heartbeat_errors,
2423 stats->tx_compressed);
2427 * Called from the PROCfs module. This now uses the new arbitrary sized
2428 * /proc/net interface to create /proc/net/dev
2430 static int dev_seq_show(struct seq_file *seq, void *v)
2432 if (v == SEQ_START_TOKEN)
2433 seq_puts(seq, "Inter-| Receive "
2434 " | Transmit\n"
2435 " face |bytes packets errs drop fifo frame "
2436 "compressed multicast|bytes packets errs "
2437 "drop fifo colls carrier compressed\n");
2438 else
2439 dev_seq_printf_stats(seq, v);
2440 return 0;
2443 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2445 struct netif_rx_stats *rc = NULL;
2447 while (*pos < NR_CPUS)
2448 if (cpu_online(*pos)) {
2449 rc = &per_cpu(netdev_rx_stat, *pos);
2450 break;
2451 } else
2452 ++*pos;
2453 return rc;
2456 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2458 return softnet_get_online(pos);
2461 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2463 ++*pos;
2464 return softnet_get_online(pos);
2467 static void softnet_seq_stop(struct seq_file *seq, void *v)
2471 static int softnet_seq_show(struct seq_file *seq, void *v)
2473 struct netif_rx_stats *s = v;
2475 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2476 s->total, s->dropped, s->time_squeeze, 0,
2477 0, 0, 0, 0, /* was fastroute */
2478 s->cpu_collision );
2479 return 0;
2482 static const struct seq_operations dev_seq_ops = {
2483 .start = dev_seq_start,
2484 .next = dev_seq_next,
2485 .stop = dev_seq_stop,
2486 .show = dev_seq_show,
2489 static int dev_seq_open(struct inode *inode, struct file *file)
2491 return seq_open_net(inode, file, &dev_seq_ops,
2492 sizeof(struct seq_net_private));
2495 static const struct file_operations dev_seq_fops = {
2496 .owner = THIS_MODULE,
2497 .open = dev_seq_open,
2498 .read = seq_read,
2499 .llseek = seq_lseek,
2500 .release = seq_release_net,
2503 static const struct seq_operations softnet_seq_ops = {
2504 .start = softnet_seq_start,
2505 .next = softnet_seq_next,
2506 .stop = softnet_seq_stop,
2507 .show = softnet_seq_show,
2510 static int softnet_seq_open(struct inode *inode, struct file *file)
2512 return seq_open(file, &softnet_seq_ops);
2515 static const struct file_operations softnet_seq_fops = {
2516 .owner = THIS_MODULE,
2517 .open = softnet_seq_open,
2518 .read = seq_read,
2519 .llseek = seq_lseek,
2520 .release = seq_release,
2523 static void *ptype_get_idx(loff_t pos)
2525 struct packet_type *pt = NULL;
2526 loff_t i = 0;
2527 int t;
2529 list_for_each_entry_rcu(pt, &ptype_all, list) {
2530 if (i == pos)
2531 return pt;
2532 ++i;
2535 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2536 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2537 if (i == pos)
2538 return pt;
2539 ++i;
2542 return NULL;
2545 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2546 __acquires(RCU)
2548 rcu_read_lock();
2549 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2552 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2554 struct packet_type *pt;
2555 struct list_head *nxt;
2556 int hash;
2558 ++*pos;
2559 if (v == SEQ_START_TOKEN)
2560 return ptype_get_idx(0);
2562 pt = v;
2563 nxt = pt->list.next;
2564 if (pt->type == htons(ETH_P_ALL)) {
2565 if (nxt != &ptype_all)
2566 goto found;
2567 hash = 0;
2568 nxt = ptype_base[0].next;
2569 } else
2570 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2572 while (nxt == &ptype_base[hash]) {
2573 if (++hash >= PTYPE_HASH_SIZE)
2574 return NULL;
2575 nxt = ptype_base[hash].next;
2577 found:
2578 return list_entry(nxt, struct packet_type, list);
2581 static void ptype_seq_stop(struct seq_file *seq, void *v)
2582 __releases(RCU)
2584 rcu_read_unlock();
2587 static void ptype_seq_decode(struct seq_file *seq, void *sym)
2589 #ifdef CONFIG_KALLSYMS
2590 unsigned long offset = 0, symsize;
2591 const char *symname;
2592 char *modname;
2593 char namebuf[128];
2595 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2596 &modname, namebuf);
2598 if (symname) {
2599 char *delim = ":";
2601 if (!modname)
2602 modname = delim = "";
2603 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2604 symname, offset);
2605 return;
2607 #endif
2609 seq_printf(seq, "[%p]", sym);
2612 static int ptype_seq_show(struct seq_file *seq, void *v)
2614 struct packet_type *pt = v;
2616 if (v == SEQ_START_TOKEN)
2617 seq_puts(seq, "Type Device Function\n");
2618 else {
2619 if (pt->type == htons(ETH_P_ALL))
2620 seq_puts(seq, "ALL ");
2621 else
2622 seq_printf(seq, "%04x", ntohs(pt->type));
2624 seq_printf(seq, " %-8s ",
2625 pt->dev ? pt->dev->name : "");
2626 ptype_seq_decode(seq, pt->func);
2627 seq_putc(seq, '\n');
2630 return 0;
2633 static const struct seq_operations ptype_seq_ops = {
2634 .start = ptype_seq_start,
2635 .next = ptype_seq_next,
2636 .stop = ptype_seq_stop,
2637 .show = ptype_seq_show,
2640 static int ptype_seq_open(struct inode *inode, struct file *file)
2642 return seq_open(file, &ptype_seq_ops);
2645 static const struct file_operations ptype_seq_fops = {
2646 .owner = THIS_MODULE,
2647 .open = ptype_seq_open,
2648 .read = seq_read,
2649 .llseek = seq_lseek,
2650 .release = seq_release,
2654 static int __net_init dev_proc_net_init(struct net *net)
2656 int rc = -ENOMEM;
2658 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2659 goto out;
2660 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2661 goto out_dev;
2662 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2663 goto out_softnet;
2665 if (wext_proc_init(net))
2666 goto out_ptype;
2667 rc = 0;
2668 out:
2669 return rc;
2670 out_ptype:
2671 proc_net_remove(net, "ptype");
2672 out_softnet:
2673 proc_net_remove(net, "softnet_stat");
2674 out_dev:
2675 proc_net_remove(net, "dev");
2676 goto out;
2679 static void __net_exit dev_proc_net_exit(struct net *net)
2681 wext_proc_exit(net);
2683 proc_net_remove(net, "ptype");
2684 proc_net_remove(net, "softnet_stat");
2685 proc_net_remove(net, "dev");
2688 static struct pernet_operations __net_initdata dev_proc_ops = {
2689 .init = dev_proc_net_init,
2690 .exit = dev_proc_net_exit,
2693 static int __init dev_proc_init(void)
2695 return register_pernet_subsys(&dev_proc_ops);
2697 #else
2698 #define dev_proc_init() 0
2699 #endif /* CONFIG_PROC_FS */
2703 * netdev_set_master - set up master/slave pair
2704 * @slave: slave device
2705 * @master: new master device
2707 * Changes the master device of the slave. Pass %NULL to break the
2708 * bonding. The caller must hold the RTNL semaphore. On a failure
2709 * a negative errno code is returned. On success the reference counts
2710 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2711 * function returns zero.
2713 int netdev_set_master(struct net_device *slave, struct net_device *master)
2715 struct net_device *old = slave->master;
2717 ASSERT_RTNL();
2719 if (master) {
2720 if (old)
2721 return -EBUSY;
2722 dev_hold(master);
2725 slave->master = master;
2727 synchronize_net();
2729 if (old)
2730 dev_put(old);
2732 if (master)
2733 slave->flags |= IFF_SLAVE;
2734 else
2735 slave->flags &= ~IFF_SLAVE;
2737 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2738 return 0;
2741 static void __dev_set_promiscuity(struct net_device *dev, int inc)
2743 unsigned short old_flags = dev->flags;
2745 ASSERT_RTNL();
2747 if ((dev->promiscuity += inc) == 0)
2748 dev->flags &= ~IFF_PROMISC;
2749 else
2750 dev->flags |= IFF_PROMISC;
2751 if (dev->flags != old_flags) {
2752 printk(KERN_INFO "device %s %s promiscuous mode\n",
2753 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2754 "left");
2755 if (audit_enabled)
2756 audit_log(current->audit_context, GFP_ATOMIC,
2757 AUDIT_ANOM_PROMISCUOUS,
2758 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2759 dev->name, (dev->flags & IFF_PROMISC),
2760 (old_flags & IFF_PROMISC),
2761 audit_get_loginuid(current),
2762 current->uid, current->gid,
2763 audit_get_sessionid(current));
2765 if (dev->change_rx_flags)
2766 dev->change_rx_flags(dev, IFF_PROMISC);
2771 * dev_set_promiscuity - update promiscuity count on a device
2772 * @dev: device
2773 * @inc: modifier
2775 * Add or remove promiscuity from a device. While the count in the device
2776 * remains above zero the interface remains promiscuous. Once it hits zero
2777 * the device reverts back to normal filtering operation. A negative inc
2778 * value is used to drop promiscuity on the device.
2780 void dev_set_promiscuity(struct net_device *dev, int inc)
2782 unsigned short old_flags = dev->flags;
2784 __dev_set_promiscuity(dev, inc);
2785 if (dev->flags != old_flags)
2786 dev_set_rx_mode(dev);
2790 * dev_set_allmulti - update allmulti count on a device
2791 * @dev: device
2792 * @inc: modifier
2794 * Add or remove reception of all multicast frames to a device. While the
2795 * count in the device remains above zero the interface remains listening
2796 * to all interfaces. Once it hits zero the device reverts back to normal
2797 * filtering operation. A negative @inc value is used to drop the counter
2798 * when releasing a resource needing all multicasts.
2801 void dev_set_allmulti(struct net_device *dev, int inc)
2803 unsigned short old_flags = dev->flags;
2805 ASSERT_RTNL();
2807 dev->flags |= IFF_ALLMULTI;
2808 if ((dev->allmulti += inc) == 0)
2809 dev->flags &= ~IFF_ALLMULTI;
2810 if (dev->flags ^ old_flags) {
2811 if (dev->change_rx_flags)
2812 dev->change_rx_flags(dev, IFF_ALLMULTI);
2813 dev_set_rx_mode(dev);
2818 * Upload unicast and multicast address lists to device and
2819 * configure RX filtering. When the device doesn't support unicast
2820 * filtering it is put in promiscuous mode while unicast addresses
2821 * are present.
2823 void __dev_set_rx_mode(struct net_device *dev)
2825 /* dev_open will call this function so the list will stay sane. */
2826 if (!(dev->flags&IFF_UP))
2827 return;
2829 if (!netif_device_present(dev))
2830 return;
2832 if (dev->set_rx_mode)
2833 dev->set_rx_mode(dev);
2834 else {
2835 /* Unicast addresses changes may only happen under the rtnl,
2836 * therefore calling __dev_set_promiscuity here is safe.
2838 if (dev->uc_count > 0 && !dev->uc_promisc) {
2839 __dev_set_promiscuity(dev, 1);
2840 dev->uc_promisc = 1;
2841 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2842 __dev_set_promiscuity(dev, -1);
2843 dev->uc_promisc = 0;
2846 if (dev->set_multicast_list)
2847 dev->set_multicast_list(dev);
2851 void dev_set_rx_mode(struct net_device *dev)
2853 netif_tx_lock_bh(dev);
2854 __dev_set_rx_mode(dev);
2855 netif_tx_unlock_bh(dev);
2858 int __dev_addr_delete(struct dev_addr_list **list, int *count,
2859 void *addr, int alen, int glbl)
2861 struct dev_addr_list *da;
2863 for (; (da = *list) != NULL; list = &da->next) {
2864 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2865 alen == da->da_addrlen) {
2866 if (glbl) {
2867 int old_glbl = da->da_gusers;
2868 da->da_gusers = 0;
2869 if (old_glbl == 0)
2870 break;
2872 if (--da->da_users)
2873 return 0;
2875 *list = da->next;
2876 kfree(da);
2877 (*count)--;
2878 return 0;
2881 return -ENOENT;
2884 int __dev_addr_add(struct dev_addr_list **list, int *count,
2885 void *addr, int alen, int glbl)
2887 struct dev_addr_list *da;
2889 for (da = *list; da != NULL; da = da->next) {
2890 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2891 da->da_addrlen == alen) {
2892 if (glbl) {
2893 int old_glbl = da->da_gusers;
2894 da->da_gusers = 1;
2895 if (old_glbl)
2896 return 0;
2898 da->da_users++;
2899 return 0;
2903 da = kzalloc(sizeof(*da), GFP_ATOMIC);
2904 if (da == NULL)
2905 return -ENOMEM;
2906 memcpy(da->da_addr, addr, alen);
2907 da->da_addrlen = alen;
2908 da->da_users = 1;
2909 da->da_gusers = glbl ? 1 : 0;
2910 da->next = *list;
2911 *list = da;
2912 (*count)++;
2913 return 0;
2917 * dev_unicast_delete - Release secondary unicast address.
2918 * @dev: device
2919 * @addr: address to delete
2920 * @alen: length of @addr
2922 * Release reference to a secondary unicast address and remove it
2923 * from the device if the reference count drops to zero.
2925 * The caller must hold the rtnl_mutex.
2927 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
2929 int err;
2931 ASSERT_RTNL();
2933 netif_tx_lock_bh(dev);
2934 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2935 if (!err)
2936 __dev_set_rx_mode(dev);
2937 netif_tx_unlock_bh(dev);
2938 return err;
2940 EXPORT_SYMBOL(dev_unicast_delete);
2943 * dev_unicast_add - add a secondary unicast address
2944 * @dev: device
2945 * @addr: address to delete
2946 * @alen: length of @addr
2948 * Add a secondary unicast address to the device or increase
2949 * the reference count if it already exists.
2951 * The caller must hold the rtnl_mutex.
2953 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2955 int err;
2957 ASSERT_RTNL();
2959 netif_tx_lock_bh(dev);
2960 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2961 if (!err)
2962 __dev_set_rx_mode(dev);
2963 netif_tx_unlock_bh(dev);
2964 return err;
2966 EXPORT_SYMBOL(dev_unicast_add);
2968 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
2969 struct dev_addr_list **from, int *from_count)
2971 struct dev_addr_list *da, *next;
2972 int err = 0;
2974 da = *from;
2975 while (da != NULL) {
2976 next = da->next;
2977 if (!da->da_synced) {
2978 err = __dev_addr_add(to, to_count,
2979 da->da_addr, da->da_addrlen, 0);
2980 if (err < 0)
2981 break;
2982 da->da_synced = 1;
2983 da->da_users++;
2984 } else if (da->da_users == 1) {
2985 __dev_addr_delete(to, to_count,
2986 da->da_addr, da->da_addrlen, 0);
2987 __dev_addr_delete(from, from_count,
2988 da->da_addr, da->da_addrlen, 0);
2990 da = next;
2992 return err;
2995 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
2996 struct dev_addr_list **from, int *from_count)
2998 struct dev_addr_list *da, *next;
3000 da = *from;
3001 while (da != NULL) {
3002 next = da->next;
3003 if (da->da_synced) {
3004 __dev_addr_delete(to, to_count,
3005 da->da_addr, da->da_addrlen, 0);
3006 da->da_synced = 0;
3007 __dev_addr_delete(from, from_count,
3008 da->da_addr, da->da_addrlen, 0);
3010 da = next;
3015 * dev_unicast_sync - Synchronize device's unicast list to another device
3016 * @to: destination device
3017 * @from: source device
3019 * Add newly added addresses to the destination device and release
3020 * addresses that have no users left. The source device must be
3021 * locked by netif_tx_lock_bh.
3023 * This function is intended to be called from the dev->set_rx_mode
3024 * function of layered software devices.
3026 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3028 int err = 0;
3030 netif_tx_lock_bh(to);
3031 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3032 &from->uc_list, &from->uc_count);
3033 if (!err)
3034 __dev_set_rx_mode(to);
3035 netif_tx_unlock_bh(to);
3036 return err;
3038 EXPORT_SYMBOL(dev_unicast_sync);
3041 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3042 * @to: destination device
3043 * @from: source device
3045 * Remove all addresses that were added to the destination device by
3046 * dev_unicast_sync(). This function is intended to be called from the
3047 * dev->stop function of layered software devices.
3049 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3051 netif_tx_lock_bh(from);
3052 netif_tx_lock_bh(to);
3054 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3055 &from->uc_list, &from->uc_count);
3056 __dev_set_rx_mode(to);
3058 netif_tx_unlock_bh(to);
3059 netif_tx_unlock_bh(from);
3061 EXPORT_SYMBOL(dev_unicast_unsync);
3063 static void __dev_addr_discard(struct dev_addr_list **list)
3065 struct dev_addr_list *tmp;
3067 while (*list != NULL) {
3068 tmp = *list;
3069 *list = tmp->next;
3070 if (tmp->da_users > tmp->da_gusers)
3071 printk("__dev_addr_discard: address leakage! "
3072 "da_users=%d\n", tmp->da_users);
3073 kfree(tmp);
3077 static void dev_addr_discard(struct net_device *dev)
3079 netif_tx_lock_bh(dev);
3081 __dev_addr_discard(&dev->uc_list);
3082 dev->uc_count = 0;
3084 __dev_addr_discard(&dev->mc_list);
3085 dev->mc_count = 0;
3087 netif_tx_unlock_bh(dev);
3090 unsigned dev_get_flags(const struct net_device *dev)
3092 unsigned flags;
3094 flags = (dev->flags & ~(IFF_PROMISC |
3095 IFF_ALLMULTI |
3096 IFF_RUNNING |
3097 IFF_LOWER_UP |
3098 IFF_DORMANT)) |
3099 (dev->gflags & (IFF_PROMISC |
3100 IFF_ALLMULTI));
3102 if (netif_running(dev)) {
3103 if (netif_oper_up(dev))
3104 flags |= IFF_RUNNING;
3105 if (netif_carrier_ok(dev))
3106 flags |= IFF_LOWER_UP;
3107 if (netif_dormant(dev))
3108 flags |= IFF_DORMANT;
3111 return flags;
3114 int dev_change_flags(struct net_device *dev, unsigned flags)
3116 int ret, changes;
3117 int old_flags = dev->flags;
3119 ASSERT_RTNL();
3122 * Set the flags on our device.
3125 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3126 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3127 IFF_AUTOMEDIA)) |
3128 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3129 IFF_ALLMULTI));
3132 * Load in the correct multicast list now the flags have changed.
3135 if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST)
3136 dev->change_rx_flags(dev, IFF_MULTICAST);
3138 dev_set_rx_mode(dev);
3141 * Have we downed the interface. We handle IFF_UP ourselves
3142 * according to user attempts to set it, rather than blindly
3143 * setting it.
3146 ret = 0;
3147 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3148 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3150 if (!ret)
3151 dev_set_rx_mode(dev);
3154 if (dev->flags & IFF_UP &&
3155 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3156 IFF_VOLATILE)))
3157 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3159 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3160 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3161 dev->gflags ^= IFF_PROMISC;
3162 dev_set_promiscuity(dev, inc);
3165 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3166 is important. Some (broken) drivers set IFF_PROMISC, when
3167 IFF_ALLMULTI is requested not asking us and not reporting.
3169 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3170 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3171 dev->gflags ^= IFF_ALLMULTI;
3172 dev_set_allmulti(dev, inc);
3175 /* Exclude state transition flags, already notified */
3176 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3177 if (changes)
3178 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3180 return ret;
3183 int dev_set_mtu(struct net_device *dev, int new_mtu)
3185 int err;
3187 if (new_mtu == dev->mtu)
3188 return 0;
3190 /* MTU must be positive. */
3191 if (new_mtu < 0)
3192 return -EINVAL;
3194 if (!netif_device_present(dev))
3195 return -ENODEV;
3197 err = 0;
3198 if (dev->change_mtu)
3199 err = dev->change_mtu(dev, new_mtu);
3200 else
3201 dev->mtu = new_mtu;
3202 if (!err && dev->flags & IFF_UP)
3203 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3204 return err;
3207 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3209 int err;
3211 if (!dev->set_mac_address)
3212 return -EOPNOTSUPP;
3213 if (sa->sa_family != dev->type)
3214 return -EINVAL;
3215 if (!netif_device_present(dev))
3216 return -ENODEV;
3217 err = dev->set_mac_address(dev, sa);
3218 if (!err)
3219 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3220 return err;
3224 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3226 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3228 int err;
3229 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3231 if (!dev)
3232 return -ENODEV;
3234 switch (cmd) {
3235 case SIOCGIFFLAGS: /* Get interface flags */
3236 ifr->ifr_flags = dev_get_flags(dev);
3237 return 0;
3239 case SIOCGIFMETRIC: /* Get the metric on the interface
3240 (currently unused) */
3241 ifr->ifr_metric = 0;
3242 return 0;
3244 case SIOCGIFMTU: /* Get the MTU of a device */
3245 ifr->ifr_mtu = dev->mtu;
3246 return 0;
3248 case SIOCGIFHWADDR:
3249 if (!dev->addr_len)
3250 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3251 else
3252 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3253 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3254 ifr->ifr_hwaddr.sa_family = dev->type;
3255 return 0;
3257 case SIOCGIFSLAVE:
3258 err = -EINVAL;
3259 break;
3261 case SIOCGIFMAP:
3262 ifr->ifr_map.mem_start = dev->mem_start;
3263 ifr->ifr_map.mem_end = dev->mem_end;
3264 ifr->ifr_map.base_addr = dev->base_addr;
3265 ifr->ifr_map.irq = dev->irq;
3266 ifr->ifr_map.dma = dev->dma;
3267 ifr->ifr_map.port = dev->if_port;
3268 return 0;
3270 case SIOCGIFINDEX:
3271 ifr->ifr_ifindex = dev->ifindex;
3272 return 0;
3274 case SIOCGIFTXQLEN:
3275 ifr->ifr_qlen = dev->tx_queue_len;
3276 return 0;
3278 default:
3279 /* dev_ioctl() should ensure this case
3280 * is never reached
3282 WARN_ON(1);
3283 err = -EINVAL;
3284 break;
3287 return err;
3291 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3293 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3295 int err;
3296 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3298 if (!dev)
3299 return -ENODEV;
3301 switch (cmd) {
3302 case SIOCSIFFLAGS: /* Set interface flags */
3303 return dev_change_flags(dev, ifr->ifr_flags);
3305 case SIOCSIFMETRIC: /* Set the metric on the interface
3306 (currently unused) */
3307 return -EOPNOTSUPP;
3309 case SIOCSIFMTU: /* Set the MTU of a device */
3310 return dev_set_mtu(dev, ifr->ifr_mtu);
3312 case SIOCSIFHWADDR:
3313 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3315 case SIOCSIFHWBROADCAST:
3316 if (ifr->ifr_hwaddr.sa_family != dev->type)
3317 return -EINVAL;
3318 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3319 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3320 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3321 return 0;
3323 case SIOCSIFMAP:
3324 if (dev->set_config) {
3325 if (!netif_device_present(dev))
3326 return -ENODEV;
3327 return dev->set_config(dev, &ifr->ifr_map);
3329 return -EOPNOTSUPP;
3331 case SIOCADDMULTI:
3332 if (!dev->set_multicast_list ||
3333 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3334 return -EINVAL;
3335 if (!netif_device_present(dev))
3336 return -ENODEV;
3337 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3338 dev->addr_len, 1);
3340 case SIOCDELMULTI:
3341 if (!dev->set_multicast_list ||
3342 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3343 return -EINVAL;
3344 if (!netif_device_present(dev))
3345 return -ENODEV;
3346 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3347 dev->addr_len, 1);
3349 case SIOCSIFTXQLEN:
3350 if (ifr->ifr_qlen < 0)
3351 return -EINVAL;
3352 dev->tx_queue_len = ifr->ifr_qlen;
3353 return 0;
3355 case SIOCSIFNAME:
3356 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3357 return dev_change_name(dev, ifr->ifr_newname);
3360 * Unknown or private ioctl
3363 default:
3364 if ((cmd >= SIOCDEVPRIVATE &&
3365 cmd <= SIOCDEVPRIVATE + 15) ||
3366 cmd == SIOCBONDENSLAVE ||
3367 cmd == SIOCBONDRELEASE ||
3368 cmd == SIOCBONDSETHWADDR ||
3369 cmd == SIOCBONDSLAVEINFOQUERY ||
3370 cmd == SIOCBONDINFOQUERY ||
3371 cmd == SIOCBONDCHANGEACTIVE ||
3372 cmd == SIOCGMIIPHY ||
3373 cmd == SIOCGMIIREG ||
3374 cmd == SIOCSMIIREG ||
3375 cmd == SIOCBRADDIF ||
3376 cmd == SIOCBRDELIF ||
3377 cmd == SIOCWANDEV) {
3378 err = -EOPNOTSUPP;
3379 if (dev->do_ioctl) {
3380 if (netif_device_present(dev))
3381 err = dev->do_ioctl(dev, ifr,
3382 cmd);
3383 else
3384 err = -ENODEV;
3386 } else
3387 err = -EINVAL;
3390 return err;
3394 * This function handles all "interface"-type I/O control requests. The actual
3395 * 'doing' part of this is dev_ifsioc above.
3399 * dev_ioctl - network device ioctl
3400 * @net: the applicable net namespace
3401 * @cmd: command to issue
3402 * @arg: pointer to a struct ifreq in user space
3404 * Issue ioctl functions to devices. This is normally called by the
3405 * user space syscall interfaces but can sometimes be useful for
3406 * other purposes. The return value is the return from the syscall if
3407 * positive or a negative errno code on error.
3410 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3412 struct ifreq ifr;
3413 int ret;
3414 char *colon;
3416 /* One special case: SIOCGIFCONF takes ifconf argument
3417 and requires shared lock, because it sleeps writing
3418 to user space.
3421 if (cmd == SIOCGIFCONF) {
3422 rtnl_lock();
3423 ret = dev_ifconf(net, (char __user *) arg);
3424 rtnl_unlock();
3425 return ret;
3427 if (cmd == SIOCGIFNAME)
3428 return dev_ifname(net, (struct ifreq __user *)arg);
3430 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3431 return -EFAULT;
3433 ifr.ifr_name[IFNAMSIZ-1] = 0;
3435 colon = strchr(ifr.ifr_name, ':');
3436 if (colon)
3437 *colon = 0;
3440 * See which interface the caller is talking about.
3443 switch (cmd) {
3445 * These ioctl calls:
3446 * - can be done by all.
3447 * - atomic and do not require locking.
3448 * - return a value
3450 case SIOCGIFFLAGS:
3451 case SIOCGIFMETRIC:
3452 case SIOCGIFMTU:
3453 case SIOCGIFHWADDR:
3454 case SIOCGIFSLAVE:
3455 case SIOCGIFMAP:
3456 case SIOCGIFINDEX:
3457 case SIOCGIFTXQLEN:
3458 dev_load(net, ifr.ifr_name);
3459 read_lock(&dev_base_lock);
3460 ret = dev_ifsioc_locked(net, &ifr, cmd);
3461 read_unlock(&dev_base_lock);
3462 if (!ret) {
3463 if (colon)
3464 *colon = ':';
3465 if (copy_to_user(arg, &ifr,
3466 sizeof(struct ifreq)))
3467 ret = -EFAULT;
3469 return ret;
3471 case SIOCETHTOOL:
3472 dev_load(net, ifr.ifr_name);
3473 rtnl_lock();
3474 ret = dev_ethtool(net, &ifr);
3475 rtnl_unlock();
3476 if (!ret) {
3477 if (colon)
3478 *colon = ':';
3479 if (copy_to_user(arg, &ifr,
3480 sizeof(struct ifreq)))
3481 ret = -EFAULT;
3483 return ret;
3486 * These ioctl calls:
3487 * - require superuser power.
3488 * - require strict serialization.
3489 * - return a value
3491 case SIOCGMIIPHY:
3492 case SIOCGMIIREG:
3493 case SIOCSIFNAME:
3494 if (!capable(CAP_NET_ADMIN))
3495 return -EPERM;
3496 dev_load(net, ifr.ifr_name);
3497 rtnl_lock();
3498 ret = dev_ifsioc(net, &ifr, cmd);
3499 rtnl_unlock();
3500 if (!ret) {
3501 if (colon)
3502 *colon = ':';
3503 if (copy_to_user(arg, &ifr,
3504 sizeof(struct ifreq)))
3505 ret = -EFAULT;
3507 return ret;
3510 * These ioctl calls:
3511 * - require superuser power.
3512 * - require strict serialization.
3513 * - do not return a value
3515 case SIOCSIFFLAGS:
3516 case SIOCSIFMETRIC:
3517 case SIOCSIFMTU:
3518 case SIOCSIFMAP:
3519 case SIOCSIFHWADDR:
3520 case SIOCSIFSLAVE:
3521 case SIOCADDMULTI:
3522 case SIOCDELMULTI:
3523 case SIOCSIFHWBROADCAST:
3524 case SIOCSIFTXQLEN:
3525 case SIOCSMIIREG:
3526 case SIOCBONDENSLAVE:
3527 case SIOCBONDRELEASE:
3528 case SIOCBONDSETHWADDR:
3529 case SIOCBONDCHANGEACTIVE:
3530 case SIOCBRADDIF:
3531 case SIOCBRDELIF:
3532 if (!capable(CAP_NET_ADMIN))
3533 return -EPERM;
3534 /* fall through */
3535 case SIOCBONDSLAVEINFOQUERY:
3536 case SIOCBONDINFOQUERY:
3537 dev_load(net, ifr.ifr_name);
3538 rtnl_lock();
3539 ret = dev_ifsioc(net, &ifr, cmd);
3540 rtnl_unlock();
3541 return ret;
3543 case SIOCGIFMEM:
3544 /* Get the per device memory space. We can add this but
3545 * currently do not support it */
3546 case SIOCSIFMEM:
3547 /* Set the per device memory buffer space.
3548 * Not applicable in our case */
3549 case SIOCSIFLINK:
3550 return -EINVAL;
3553 * Unknown or private ioctl.
3555 default:
3556 if (cmd == SIOCWANDEV ||
3557 (cmd >= SIOCDEVPRIVATE &&
3558 cmd <= SIOCDEVPRIVATE + 15)) {
3559 dev_load(net, ifr.ifr_name);
3560 rtnl_lock();
3561 ret = dev_ifsioc(net, &ifr, cmd);
3562 rtnl_unlock();
3563 if (!ret && copy_to_user(arg, &ifr,
3564 sizeof(struct ifreq)))
3565 ret = -EFAULT;
3566 return ret;
3568 /* Take care of Wireless Extensions */
3569 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3570 return wext_handle_ioctl(net, &ifr, cmd, arg);
3571 return -EINVAL;
3577 * dev_new_index - allocate an ifindex
3578 * @net: the applicable net namespace
3580 * Returns a suitable unique value for a new device interface
3581 * number. The caller must hold the rtnl semaphore or the
3582 * dev_base_lock to be sure it remains unique.
3584 static int dev_new_index(struct net *net)
3586 static int ifindex;
3587 for (;;) {
3588 if (++ifindex <= 0)
3589 ifindex = 1;
3590 if (!__dev_get_by_index(net, ifindex))
3591 return ifindex;
3595 /* Delayed registration/unregisteration */
3596 static DEFINE_SPINLOCK(net_todo_list_lock);
3597 static LIST_HEAD(net_todo_list);
3599 static void net_set_todo(struct net_device *dev)
3601 spin_lock(&net_todo_list_lock);
3602 list_add_tail(&dev->todo_list, &net_todo_list);
3603 spin_unlock(&net_todo_list_lock);
3606 static void rollback_registered(struct net_device *dev)
3608 BUG_ON(dev_boot_phase);
3609 ASSERT_RTNL();
3611 /* Some devices call without registering for initialization unwind. */
3612 if (dev->reg_state == NETREG_UNINITIALIZED) {
3613 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3614 "was registered\n", dev->name, dev);
3616 WARN_ON(1);
3617 return;
3620 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3622 /* If device is running, close it first. */
3623 dev_close(dev);
3625 /* And unlink it from device chain. */
3626 unlist_netdevice(dev);
3628 dev->reg_state = NETREG_UNREGISTERING;
3630 synchronize_net();
3632 /* Shutdown queueing discipline. */
3633 dev_shutdown(dev);
3636 /* Notify protocols, that we are about to destroy
3637 this device. They should clean all the things.
3639 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3642 * Flush the unicast and multicast chains
3644 dev_addr_discard(dev);
3646 if (dev->uninit)
3647 dev->uninit(dev);
3649 /* Notifier chain MUST detach us from master device. */
3650 BUG_TRAP(!dev->master);
3652 /* Remove entries from kobject tree */
3653 netdev_unregister_kobject(dev);
3655 synchronize_net();
3657 dev_put(dev);
3661 * register_netdevice - register a network device
3662 * @dev: device to register
3664 * Take a completed network device structure and add it to the kernel
3665 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3666 * chain. 0 is returned on success. A negative errno code is returned
3667 * on a failure to set up the device, or if the name is a duplicate.
3669 * Callers must hold the rtnl semaphore. You may want
3670 * register_netdev() instead of this.
3672 * BUGS:
3673 * The locking appears insufficient to guarantee two parallel registers
3674 * will not get the same name.
3677 int register_netdevice(struct net_device *dev)
3679 struct hlist_head *head;
3680 struct hlist_node *p;
3681 int ret;
3682 struct net *net;
3684 BUG_ON(dev_boot_phase);
3685 ASSERT_RTNL();
3687 might_sleep();
3689 /* When net_device's are persistent, this will be fatal. */
3690 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3691 BUG_ON(!dev->nd_net);
3692 net = dev->nd_net;
3694 spin_lock_init(&dev->queue_lock);
3695 spin_lock_init(&dev->_xmit_lock);
3696 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
3697 dev->xmit_lock_owner = -1;
3698 spin_lock_init(&dev->ingress_lock);
3700 dev->iflink = -1;
3702 /* Init, if this function is available */
3703 if (dev->init) {
3704 ret = dev->init(dev);
3705 if (ret) {
3706 if (ret > 0)
3707 ret = -EIO;
3708 goto out;
3712 if (!dev_valid_name(dev->name)) {
3713 ret = -EINVAL;
3714 goto err_uninit;
3717 dev->ifindex = dev_new_index(net);
3718 if (dev->iflink == -1)
3719 dev->iflink = dev->ifindex;
3721 /* Check for existence of name */
3722 head = dev_name_hash(net, dev->name);
3723 hlist_for_each(p, head) {
3724 struct net_device *d
3725 = hlist_entry(p, struct net_device, name_hlist);
3726 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3727 ret = -EEXIST;
3728 goto err_uninit;
3732 /* Fix illegal checksum combinations */
3733 if ((dev->features & NETIF_F_HW_CSUM) &&
3734 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3735 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3736 dev->name);
3737 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3740 if ((dev->features & NETIF_F_NO_CSUM) &&
3741 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3742 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3743 dev->name);
3744 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3748 /* Fix illegal SG+CSUM combinations. */
3749 if ((dev->features & NETIF_F_SG) &&
3750 !(dev->features & NETIF_F_ALL_CSUM)) {
3751 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
3752 dev->name);
3753 dev->features &= ~NETIF_F_SG;
3756 /* TSO requires that SG is present as well. */
3757 if ((dev->features & NETIF_F_TSO) &&
3758 !(dev->features & NETIF_F_SG)) {
3759 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
3760 dev->name);
3761 dev->features &= ~NETIF_F_TSO;
3763 if (dev->features & NETIF_F_UFO) {
3764 if (!(dev->features & NETIF_F_HW_CSUM)) {
3765 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3766 "NETIF_F_HW_CSUM feature.\n",
3767 dev->name);
3768 dev->features &= ~NETIF_F_UFO;
3770 if (!(dev->features & NETIF_F_SG)) {
3771 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3772 "NETIF_F_SG feature.\n",
3773 dev->name);
3774 dev->features &= ~NETIF_F_UFO;
3778 ret = netdev_register_kobject(dev);
3779 if (ret)
3780 goto err_uninit;
3781 dev->reg_state = NETREG_REGISTERED;
3784 * Default initial state at registry is that the
3785 * device is present.
3788 set_bit(__LINK_STATE_PRESENT, &dev->state);
3790 dev_init_scheduler(dev);
3791 dev_hold(dev);
3792 list_netdevice(dev);
3794 /* Notify protocols, that a new device appeared. */
3795 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
3796 ret = notifier_to_errno(ret);
3797 if (ret) {
3798 rollback_registered(dev);
3799 dev->reg_state = NETREG_UNREGISTERED;
3802 out:
3803 return ret;
3805 err_uninit:
3806 if (dev->uninit)
3807 dev->uninit(dev);
3808 goto out;
3812 * register_netdev - register a network device
3813 * @dev: device to register
3815 * Take a completed network device structure and add it to the kernel
3816 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3817 * chain. 0 is returned on success. A negative errno code is returned
3818 * on a failure to set up the device, or if the name is a duplicate.
3820 * This is a wrapper around register_netdevice that takes the rtnl semaphore
3821 * and expands the device name if you passed a format string to
3822 * alloc_netdev.
3824 int register_netdev(struct net_device *dev)
3826 int err;
3828 rtnl_lock();
3831 * If the name is a format string the caller wants us to do a
3832 * name allocation.
3834 if (strchr(dev->name, '%')) {
3835 err = dev_alloc_name(dev, dev->name);
3836 if (err < 0)
3837 goto out;
3840 err = register_netdevice(dev);
3841 out:
3842 rtnl_unlock();
3843 return err;
3845 EXPORT_SYMBOL(register_netdev);
3848 * netdev_wait_allrefs - wait until all references are gone.
3850 * This is called when unregistering network devices.
3852 * Any protocol or device that holds a reference should register
3853 * for netdevice notification, and cleanup and put back the
3854 * reference if they receive an UNREGISTER event.
3855 * We can get stuck here if buggy protocols don't correctly
3856 * call dev_put.
3858 static void netdev_wait_allrefs(struct net_device *dev)
3860 unsigned long rebroadcast_time, warning_time;
3862 rebroadcast_time = warning_time = jiffies;
3863 while (atomic_read(&dev->refcnt) != 0) {
3864 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
3865 rtnl_lock();
3867 /* Rebroadcast unregister notification */
3868 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3870 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
3871 &dev->state)) {
3872 /* We must not have linkwatch events
3873 * pending on unregister. If this
3874 * happens, we simply run the queue
3875 * unscheduled, resulting in a noop
3876 * for this device.
3878 linkwatch_run_queue();
3881 __rtnl_unlock();
3883 rebroadcast_time = jiffies;
3886 msleep(250);
3888 if (time_after(jiffies, warning_time + 10 * HZ)) {
3889 printk(KERN_EMERG "unregister_netdevice: "
3890 "waiting for %s to become free. Usage "
3891 "count = %d\n",
3892 dev->name, atomic_read(&dev->refcnt));
3893 warning_time = jiffies;
3898 /* The sequence is:
3900 * rtnl_lock();
3901 * ...
3902 * register_netdevice(x1);
3903 * register_netdevice(x2);
3904 * ...
3905 * unregister_netdevice(y1);
3906 * unregister_netdevice(y2);
3907 * ...
3908 * rtnl_unlock();
3909 * free_netdev(y1);
3910 * free_netdev(y2);
3912 * We are invoked by rtnl_unlock() after it drops the semaphore.
3913 * This allows us to deal with problems:
3914 * 1) We can delete sysfs objects which invoke hotplug
3915 * without deadlocking with linkwatch via keventd.
3916 * 2) Since we run with the RTNL semaphore not held, we can sleep
3917 * safely in order to wait for the netdev refcnt to drop to zero.
3919 static DEFINE_MUTEX(net_todo_run_mutex);
3920 void netdev_run_todo(void)
3922 struct list_head list;
3924 /* Need to guard against multiple cpu's getting out of order. */
3925 mutex_lock(&net_todo_run_mutex);
3927 /* Not safe to do outside the semaphore. We must not return
3928 * until all unregister events invoked by the local processor
3929 * have been completed (either by this todo run, or one on
3930 * another cpu).
3932 if (list_empty(&net_todo_list))
3933 goto out;
3935 /* Snapshot list, allow later requests */
3936 spin_lock(&net_todo_list_lock);
3937 list_replace_init(&net_todo_list, &list);
3938 spin_unlock(&net_todo_list_lock);
3940 while (!list_empty(&list)) {
3941 struct net_device *dev
3942 = list_entry(list.next, struct net_device, todo_list);
3943 list_del(&dev->todo_list);
3945 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
3946 printk(KERN_ERR "network todo '%s' but state %d\n",
3947 dev->name, dev->reg_state);
3948 dump_stack();
3949 continue;
3952 dev->reg_state = NETREG_UNREGISTERED;
3954 netdev_wait_allrefs(dev);
3956 /* paranoia */
3957 BUG_ON(atomic_read(&dev->refcnt));
3958 BUG_TRAP(!dev->ip_ptr);
3959 BUG_TRAP(!dev->ip6_ptr);
3960 BUG_TRAP(!dev->dn_ptr);
3962 if (dev->destructor)
3963 dev->destructor(dev);
3965 /* Free network device */
3966 kobject_put(&dev->dev.kobj);
3969 out:
3970 mutex_unlock(&net_todo_run_mutex);
3973 static struct net_device_stats *internal_stats(struct net_device *dev)
3975 return &dev->stats;
3979 * alloc_netdev_mq - allocate network device
3980 * @sizeof_priv: size of private data to allocate space for
3981 * @name: device name format string
3982 * @setup: callback to initialize device
3983 * @queue_count: the number of subqueues to allocate
3985 * Allocates a struct net_device with private data area for driver use
3986 * and performs basic initialization. Also allocates subquue structs
3987 * for each queue on the device at the end of the netdevice.
3989 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
3990 void (*setup)(struct net_device *), unsigned int queue_count)
3992 void *p;
3993 struct net_device *dev;
3994 int alloc_size;
3996 BUG_ON(strlen(name) >= sizeof(dev->name));
3998 /* ensure 32-byte alignment of both the device and private area */
3999 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
4000 (sizeof(struct net_device_subqueue) * (queue_count - 1))) &
4001 ~NETDEV_ALIGN_CONST;
4002 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
4004 p = kzalloc(alloc_size, GFP_KERNEL);
4005 if (!p) {
4006 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4007 return NULL;
4010 dev = (struct net_device *)
4011 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4012 dev->padded = (char *)dev - (char *)p;
4013 dev->nd_net = &init_net;
4015 if (sizeof_priv) {
4016 dev->priv = ((char *)dev +
4017 ((sizeof(struct net_device) +
4018 (sizeof(struct net_device_subqueue) *
4019 (queue_count - 1)) + NETDEV_ALIGN_CONST)
4020 & ~NETDEV_ALIGN_CONST));
4023 dev->egress_subqueue_count = queue_count;
4025 dev->get_stats = internal_stats;
4026 netpoll_netdev_init(dev);
4027 setup(dev);
4028 strcpy(dev->name, name);
4029 return dev;
4031 EXPORT_SYMBOL(alloc_netdev_mq);
4034 * free_netdev - free network device
4035 * @dev: device
4037 * This function does the last stage of destroying an allocated device
4038 * interface. The reference to the device object is released.
4039 * If this is the last reference then it will be freed.
4041 void free_netdev(struct net_device *dev)
4043 /* Compatibility with error handling in drivers */
4044 if (dev->reg_state == NETREG_UNINITIALIZED) {
4045 kfree((char *)dev - dev->padded);
4046 return;
4049 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4050 dev->reg_state = NETREG_RELEASED;
4052 /* will free via device release */
4053 put_device(&dev->dev);
4056 /* Synchronize with packet receive processing. */
4057 void synchronize_net(void)
4059 might_sleep();
4060 synchronize_rcu();
4064 * unregister_netdevice - remove device from the kernel
4065 * @dev: device
4067 * This function shuts down a device interface and removes it
4068 * from the kernel tables.
4070 * Callers must hold the rtnl semaphore. You may want
4071 * unregister_netdev() instead of this.
4074 void unregister_netdevice(struct net_device *dev)
4076 ASSERT_RTNL();
4078 rollback_registered(dev);
4079 /* Finish processing unregister after unlock */
4080 net_set_todo(dev);
4084 * unregister_netdev - remove device from the kernel
4085 * @dev: device
4087 * This function shuts down a device interface and removes it
4088 * from the kernel tables.
4090 * This is just a wrapper for unregister_netdevice that takes
4091 * the rtnl semaphore. In general you want to use this and not
4092 * unregister_netdevice.
4094 void unregister_netdev(struct net_device *dev)
4096 rtnl_lock();
4097 unregister_netdevice(dev);
4098 rtnl_unlock();
4101 EXPORT_SYMBOL(unregister_netdev);
4104 * dev_change_net_namespace - move device to different nethost namespace
4105 * @dev: device
4106 * @net: network namespace
4107 * @pat: If not NULL name pattern to try if the current device name
4108 * is already taken in the destination network namespace.
4110 * This function shuts down a device interface and moves it
4111 * to a new network namespace. On success 0 is returned, on
4112 * a failure a netagive errno code is returned.
4114 * Callers must hold the rtnl semaphore.
4117 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4119 char buf[IFNAMSIZ];
4120 const char *destname;
4121 int err;
4123 ASSERT_RTNL();
4125 /* Don't allow namespace local devices to be moved. */
4126 err = -EINVAL;
4127 if (dev->features & NETIF_F_NETNS_LOCAL)
4128 goto out;
4130 /* Ensure the device has been registrered */
4131 err = -EINVAL;
4132 if (dev->reg_state != NETREG_REGISTERED)
4133 goto out;
4135 /* Get out if there is nothing todo */
4136 err = 0;
4137 if (dev->nd_net == net)
4138 goto out;
4140 /* Pick the destination device name, and ensure
4141 * we can use it in the destination network namespace.
4143 err = -EEXIST;
4144 destname = dev->name;
4145 if (__dev_get_by_name(net, destname)) {
4146 /* We get here if we can't use the current device name */
4147 if (!pat)
4148 goto out;
4149 if (!dev_valid_name(pat))
4150 goto out;
4151 if (strchr(pat, '%')) {
4152 if (__dev_alloc_name(net, pat, buf) < 0)
4153 goto out;
4154 destname = buf;
4155 } else
4156 destname = pat;
4157 if (__dev_get_by_name(net, destname))
4158 goto out;
4162 * And now a mini version of register_netdevice unregister_netdevice.
4165 /* If device is running close it first. */
4166 dev_close(dev);
4168 /* And unlink it from device chain */
4169 err = -ENODEV;
4170 unlist_netdevice(dev);
4172 synchronize_net();
4174 /* Shutdown queueing discipline. */
4175 dev_shutdown(dev);
4177 /* Notify protocols, that we are about to destroy
4178 this device. They should clean all the things.
4180 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4183 * Flush the unicast and multicast chains
4185 dev_addr_discard(dev);
4187 /* Actually switch the network namespace */
4188 dev->nd_net = net;
4190 /* Assign the new device name */
4191 if (destname != dev->name)
4192 strcpy(dev->name, destname);
4194 /* If there is an ifindex conflict assign a new one */
4195 if (__dev_get_by_index(net, dev->ifindex)) {
4196 int iflink = (dev->iflink == dev->ifindex);
4197 dev->ifindex = dev_new_index(net);
4198 if (iflink)
4199 dev->iflink = dev->ifindex;
4202 /* Fixup kobjects */
4203 err = device_rename(&dev->dev, dev->name);
4204 WARN_ON(err);
4206 /* Add the device back in the hashes */
4207 list_netdevice(dev);
4209 /* Notify protocols, that a new device appeared. */
4210 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4212 synchronize_net();
4213 err = 0;
4214 out:
4215 return err;
4218 static int dev_cpu_callback(struct notifier_block *nfb,
4219 unsigned long action,
4220 void *ocpu)
4222 struct sk_buff **list_skb;
4223 struct net_device **list_net;
4224 struct sk_buff *skb;
4225 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4226 struct softnet_data *sd, *oldsd;
4228 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4229 return NOTIFY_OK;
4231 local_irq_disable();
4232 cpu = smp_processor_id();
4233 sd = &per_cpu(softnet_data, cpu);
4234 oldsd = &per_cpu(softnet_data, oldcpu);
4236 /* Find end of our completion_queue. */
4237 list_skb = &sd->completion_queue;
4238 while (*list_skb)
4239 list_skb = &(*list_skb)->next;
4240 /* Append completion queue from offline CPU. */
4241 *list_skb = oldsd->completion_queue;
4242 oldsd->completion_queue = NULL;
4244 /* Find end of our output_queue. */
4245 list_net = &sd->output_queue;
4246 while (*list_net)
4247 list_net = &(*list_net)->next_sched;
4248 /* Append output queue from offline CPU. */
4249 *list_net = oldsd->output_queue;
4250 oldsd->output_queue = NULL;
4252 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4253 local_irq_enable();
4255 /* Process offline CPU's input_pkt_queue */
4256 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4257 netif_rx(skb);
4259 return NOTIFY_OK;
4262 #ifdef CONFIG_NET_DMA
4264 * net_dma_rebalance - try to maintain one DMA channel per CPU
4265 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4267 * This is called when the number of channels allocated to the net_dma client
4268 * changes. The net_dma client tries to have one DMA channel per CPU.
4271 static void net_dma_rebalance(struct net_dma *net_dma)
4273 unsigned int cpu, i, n, chan_idx;
4274 struct dma_chan *chan;
4276 if (cpus_empty(net_dma->channel_mask)) {
4277 for_each_online_cpu(cpu)
4278 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4279 return;
4282 i = 0;
4283 cpu = first_cpu(cpu_online_map);
4285 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
4286 chan = net_dma->channels[chan_idx];
4288 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4289 + (i < (num_online_cpus() %
4290 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4292 while(n) {
4293 per_cpu(softnet_data, cpu).net_dma = chan;
4294 cpu = next_cpu(cpu, cpu_online_map);
4295 n--;
4297 i++;
4302 * netdev_dma_event - event callback for the net_dma_client
4303 * @client: should always be net_dma_client
4304 * @chan: DMA channel for the event
4305 * @state: DMA state to be handled
4307 static enum dma_state_client
4308 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4309 enum dma_state state)
4311 int i, found = 0, pos = -1;
4312 struct net_dma *net_dma =
4313 container_of(client, struct net_dma, client);
4314 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4316 spin_lock(&net_dma->lock);
4317 switch (state) {
4318 case DMA_RESOURCE_AVAILABLE:
4319 for (i = 0; i < NR_CPUS; i++)
4320 if (net_dma->channels[i] == chan) {
4321 found = 1;
4322 break;
4323 } else if (net_dma->channels[i] == NULL && pos < 0)
4324 pos = i;
4326 if (!found && pos >= 0) {
4327 ack = DMA_ACK;
4328 net_dma->channels[pos] = chan;
4329 cpu_set(pos, net_dma->channel_mask);
4330 net_dma_rebalance(net_dma);
4332 break;
4333 case DMA_RESOURCE_REMOVED:
4334 for (i = 0; i < NR_CPUS; i++)
4335 if (net_dma->channels[i] == chan) {
4336 found = 1;
4337 pos = i;
4338 break;
4341 if (found) {
4342 ack = DMA_ACK;
4343 cpu_clear(pos, net_dma->channel_mask);
4344 net_dma->channels[i] = NULL;
4345 net_dma_rebalance(net_dma);
4347 break;
4348 default:
4349 break;
4351 spin_unlock(&net_dma->lock);
4353 return ack;
4357 * netdev_dma_regiser - register the networking subsystem as a DMA client
4359 static int __init netdev_dma_register(void)
4361 spin_lock_init(&net_dma.lock);
4362 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4363 dma_async_client_register(&net_dma.client);
4364 dma_async_client_chan_request(&net_dma.client);
4365 return 0;
4368 #else
4369 static int __init netdev_dma_register(void) { return -ENODEV; }
4370 #endif /* CONFIG_NET_DMA */
4373 * netdev_compute_feature - compute conjunction of two feature sets
4374 * @all: first feature set
4375 * @one: second feature set
4377 * Computes a new feature set after adding a device with feature set
4378 * @one to the master device with current feature set @all. Returns
4379 * the new feature set.
4381 int netdev_compute_features(unsigned long all, unsigned long one)
4383 /* if device needs checksumming, downgrade to hw checksumming */
4384 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4385 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4387 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4388 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4389 all ^= NETIF_F_HW_CSUM
4390 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4392 if (one & NETIF_F_GSO)
4393 one |= NETIF_F_GSO_SOFTWARE;
4394 one |= NETIF_F_GSO;
4396 /* If even one device supports robust GSO, enable it for all. */
4397 if (one & NETIF_F_GSO_ROBUST)
4398 all |= NETIF_F_GSO_ROBUST;
4400 all &= one | NETIF_F_LLTX;
4402 if (!(all & NETIF_F_ALL_CSUM))
4403 all &= ~NETIF_F_SG;
4404 if (!(all & NETIF_F_SG))
4405 all &= ~NETIF_F_GSO_MASK;
4407 return all;
4409 EXPORT_SYMBOL(netdev_compute_features);
4411 static struct hlist_head *netdev_create_hash(void)
4413 int i;
4414 struct hlist_head *hash;
4416 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4417 if (hash != NULL)
4418 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4419 INIT_HLIST_HEAD(&hash[i]);
4421 return hash;
4424 /* Initialize per network namespace state */
4425 static int __net_init netdev_init(struct net *net)
4427 INIT_LIST_HEAD(&net->dev_base_head);
4429 net->dev_name_head = netdev_create_hash();
4430 if (net->dev_name_head == NULL)
4431 goto err_name;
4433 net->dev_index_head = netdev_create_hash();
4434 if (net->dev_index_head == NULL)
4435 goto err_idx;
4437 return 0;
4439 err_idx:
4440 kfree(net->dev_name_head);
4441 err_name:
4442 return -ENOMEM;
4445 static void __net_exit netdev_exit(struct net *net)
4447 kfree(net->dev_name_head);
4448 kfree(net->dev_index_head);
4451 static struct pernet_operations __net_initdata netdev_net_ops = {
4452 .init = netdev_init,
4453 .exit = netdev_exit,
4456 static void __net_exit default_device_exit(struct net *net)
4458 struct net_device *dev, *next;
4460 * Push all migratable of the network devices back to the
4461 * initial network namespace
4463 rtnl_lock();
4464 for_each_netdev_safe(net, dev, next) {
4465 int err;
4467 /* Ignore unmoveable devices (i.e. loopback) */
4468 if (dev->features & NETIF_F_NETNS_LOCAL)
4469 continue;
4471 /* Push remaing network devices to init_net */
4472 err = dev_change_net_namespace(dev, &init_net, "dev%d");
4473 if (err) {
4474 printk(KERN_WARNING "%s: failed to move %s to init_net: %d\n",
4475 __func__, dev->name, err);
4476 unregister_netdevice(dev);
4479 rtnl_unlock();
4482 static struct pernet_operations __net_initdata default_device_ops = {
4483 .exit = default_device_exit,
4487 * Initialize the DEV module. At boot time this walks the device list and
4488 * unhooks any devices that fail to initialise (normally hardware not
4489 * present) and leaves us with a valid list of present and active devices.
4494 * This is called single threaded during boot, so no need
4495 * to take the rtnl semaphore.
4497 static int __init net_dev_init(void)
4499 int i, rc = -ENOMEM;
4501 BUG_ON(!dev_boot_phase);
4503 if (dev_proc_init())
4504 goto out;
4506 if (netdev_kobject_init())
4507 goto out;
4509 INIT_LIST_HEAD(&ptype_all);
4510 for (i = 0; i < PTYPE_HASH_SIZE; i++)
4511 INIT_LIST_HEAD(&ptype_base[i]);
4513 if (register_pernet_subsys(&netdev_net_ops))
4514 goto out;
4516 if (register_pernet_device(&default_device_ops))
4517 goto out;
4520 * Initialise the packet receive queues.
4523 for_each_possible_cpu(i) {
4524 struct softnet_data *queue;
4526 queue = &per_cpu(softnet_data, i);
4527 skb_queue_head_init(&queue->input_pkt_queue);
4528 queue->completion_queue = NULL;
4529 INIT_LIST_HEAD(&queue->poll_list);
4531 queue->backlog.poll = process_backlog;
4532 queue->backlog.weight = weight_p;
4535 netdev_dma_register();
4537 dev_boot_phase = 0;
4539 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
4540 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
4542 hotcpu_notifier(dev_cpu_callback, 0);
4543 dst_init();
4544 dev_mcast_init();
4545 rc = 0;
4546 out:
4547 return rc;
4550 subsys_initcall(net_dev_init);
4552 EXPORT_SYMBOL(__dev_get_by_index);
4553 EXPORT_SYMBOL(__dev_get_by_name);
4554 EXPORT_SYMBOL(__dev_remove_pack);
4555 EXPORT_SYMBOL(dev_valid_name);
4556 EXPORT_SYMBOL(dev_add_pack);
4557 EXPORT_SYMBOL(dev_alloc_name);
4558 EXPORT_SYMBOL(dev_close);
4559 EXPORT_SYMBOL(dev_get_by_flags);
4560 EXPORT_SYMBOL(dev_get_by_index);
4561 EXPORT_SYMBOL(dev_get_by_name);
4562 EXPORT_SYMBOL(dev_open);
4563 EXPORT_SYMBOL(dev_queue_xmit);
4564 EXPORT_SYMBOL(dev_remove_pack);
4565 EXPORT_SYMBOL(dev_set_allmulti);
4566 EXPORT_SYMBOL(dev_set_promiscuity);
4567 EXPORT_SYMBOL(dev_change_flags);
4568 EXPORT_SYMBOL(dev_set_mtu);
4569 EXPORT_SYMBOL(dev_set_mac_address);
4570 EXPORT_SYMBOL(free_netdev);
4571 EXPORT_SYMBOL(netdev_boot_setup_check);
4572 EXPORT_SYMBOL(netdev_set_master);
4573 EXPORT_SYMBOL(netdev_state_change);
4574 EXPORT_SYMBOL(netif_receive_skb);
4575 EXPORT_SYMBOL(netif_rx);
4576 EXPORT_SYMBOL(register_gifconf);
4577 EXPORT_SYMBOL(register_netdevice);
4578 EXPORT_SYMBOL(register_netdevice_notifier);
4579 EXPORT_SYMBOL(skb_checksum_help);
4580 EXPORT_SYMBOL(synchronize_net);
4581 EXPORT_SYMBOL(unregister_netdevice);
4582 EXPORT_SYMBOL(unregister_netdevice_notifier);
4583 EXPORT_SYMBOL(net_enable_timestamp);
4584 EXPORT_SYMBOL(net_disable_timestamp);
4585 EXPORT_SYMBOL(dev_get_flags);
4587 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4588 EXPORT_SYMBOL(br_handle_frame_hook);
4589 EXPORT_SYMBOL(br_fdb_get_hook);
4590 EXPORT_SYMBOL(br_fdb_put_hook);
4591 #endif
4593 #ifdef CONFIG_KMOD
4594 EXPORT_SYMBOL(dev_load);
4595 #endif
4597 EXPORT_PER_CPU_SYMBOL(softnet_data);