2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <linux/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/sched/mm.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <linux/bpf.h>
99 #include <linux/bpf_trace.h>
100 #include <net/net_namespace.h>
101 #include <net/sock.h>
102 #include <net/busy_poll.h>
103 #include <linux/rtnetlink.h>
104 #include <linux/stat.h>
106 #include <net/dst_metadata.h>
107 #include <net/pkt_sched.h>
108 #include <net/pkt_cls.h>
109 #include <net/checksum.h>
110 #include <net/xfrm.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <linux/pci.h>
136 #include <linux/inetdevice.h>
137 #include <linux/cpu_rmap.h>
138 #include <linux/static_key.h>
139 #include <linux/hashtable.h>
140 #include <linux/vmalloc.h>
141 #include <linux/if_macvlan.h>
142 #include <linux/errqueue.h>
143 #include <linux/hrtimer.h>
144 #include <linux/netfilter_ingress.h>
145 #include <linux/crash_dump.h>
146 #include <linux/sctp.h>
147 #include <net/udp_tunnel.h>
148 #include <linux/net_namespace.h>
150 #include "net-sysfs.h"
152 /* Instead of increasing this, you should create a hash table. */
153 #define MAX_GRO_SKBS 8
155 /* This should be increased if a protocol with a bigger head is added. */
156 #define GRO_MAX_HEAD (MAX_HEADER + 128)
158 static DEFINE_SPINLOCK(ptype_lock
);
159 static DEFINE_SPINLOCK(offload_lock
);
160 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
161 struct list_head ptype_all __read_mostly
; /* Taps */
162 static struct list_head offload_base __read_mostly
;
164 static int netif_rx_internal(struct sk_buff
*skb
);
165 static int call_netdevice_notifiers_info(unsigned long val
,
166 struct netdev_notifier_info
*info
);
167 static struct napi_struct
*napi_by_id(unsigned int napi_id
);
170 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
173 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
175 * Writers must hold the rtnl semaphore while they loop through the
176 * dev_base_head list, and hold dev_base_lock for writing when they do the
177 * actual updates. This allows pure readers to access the list even
178 * while a writer is preparing to update it.
180 * To put it another way, dev_base_lock is held for writing only to
181 * protect against pure readers; the rtnl semaphore provides the
182 * protection against other writers.
184 * See, for example usages, register_netdevice() and
185 * unregister_netdevice(), which must be called with the rtnl
188 DEFINE_RWLOCK(dev_base_lock
);
189 EXPORT_SYMBOL(dev_base_lock
);
191 static DEFINE_MUTEX(ifalias_mutex
);
193 /* protects napi_hash addition/deletion and napi_gen_id */
194 static DEFINE_SPINLOCK(napi_hash_lock
);
196 static unsigned int napi_gen_id
= NR_CPUS
;
197 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
199 static seqcount_t devnet_rename_seq
;
201 static inline void dev_base_seq_inc(struct net
*net
)
203 while (++net
->dev_base_seq
== 0)
207 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
209 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
211 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
214 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
216 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
219 static inline void rps_lock(struct softnet_data
*sd
)
222 spin_lock(&sd
->input_pkt_queue
.lock
);
226 static inline void rps_unlock(struct softnet_data
*sd
)
229 spin_unlock(&sd
->input_pkt_queue
.lock
);
233 /* Device list insertion */
234 static void list_netdevice(struct net_device
*dev
)
236 struct net
*net
= dev_net(dev
);
240 write_lock_bh(&dev_base_lock
);
241 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
242 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
243 hlist_add_head_rcu(&dev
->index_hlist
,
244 dev_index_hash(net
, dev
->ifindex
));
245 write_unlock_bh(&dev_base_lock
);
247 dev_base_seq_inc(net
);
250 /* Device list removal
251 * caller must respect a RCU grace period before freeing/reusing dev
253 static void unlist_netdevice(struct net_device
*dev
)
257 /* Unlink dev from the device chain */
258 write_lock_bh(&dev_base_lock
);
259 list_del_rcu(&dev
->dev_list
);
260 hlist_del_rcu(&dev
->name_hlist
);
261 hlist_del_rcu(&dev
->index_hlist
);
262 write_unlock_bh(&dev_base_lock
);
264 dev_base_seq_inc(dev_net(dev
));
271 static RAW_NOTIFIER_HEAD(netdev_chain
);
274 * Device drivers call our routines to queue packets here. We empty the
275 * queue in the local softnet handler.
278 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
279 EXPORT_PER_CPU_SYMBOL(softnet_data
);
281 #ifdef CONFIG_LOCKDEP
283 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
284 * according to dev->type
286 static const unsigned short netdev_lock_type
[] = {
287 ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
288 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
289 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
290 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
291 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
292 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
293 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
294 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
295 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
296 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
297 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
298 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
299 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
300 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
301 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
303 static const char *const netdev_lock_name
[] = {
304 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
305 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
306 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
307 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
308 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
309 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
310 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
311 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
312 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
313 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
314 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
315 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
316 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
317 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
318 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
320 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
321 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
323 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
327 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
328 if (netdev_lock_type
[i
] == dev_type
)
330 /* the last key is used by default */
331 return ARRAY_SIZE(netdev_lock_type
) - 1;
334 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
335 unsigned short dev_type
)
339 i
= netdev_lock_pos(dev_type
);
340 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
341 netdev_lock_name
[i
]);
344 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
348 i
= netdev_lock_pos(dev
->type
);
349 lockdep_set_class_and_name(&dev
->addr_list_lock
,
350 &netdev_addr_lock_key
[i
],
351 netdev_lock_name
[i
]);
354 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
355 unsigned short dev_type
)
358 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
363 /*******************************************************************************
365 * Protocol management and registration routines
367 *******************************************************************************/
371 * Add a protocol ID to the list. Now that the input handler is
372 * smarter we can dispense with all the messy stuff that used to be
375 * BEWARE!!! Protocol handlers, mangling input packets,
376 * MUST BE last in hash buckets and checking protocol handlers
377 * MUST start from promiscuous ptype_all chain in net_bh.
378 * It is true now, do not change it.
379 * Explanation follows: if protocol handler, mangling packet, will
380 * be the first on list, it is not able to sense, that packet
381 * is cloned and should be copied-on-write, so that it will
382 * change it and subsequent readers will get broken packet.
386 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
388 if (pt
->type
== htons(ETH_P_ALL
))
389 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
391 return pt
->dev
? &pt
->dev
->ptype_specific
:
392 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
396 * dev_add_pack - add packet handler
397 * @pt: packet type declaration
399 * Add a protocol handler to the networking stack. The passed &packet_type
400 * is linked into kernel lists and may not be freed until it has been
401 * removed from the kernel lists.
403 * This call does not sleep therefore it can not
404 * guarantee all CPU's that are in middle of receiving packets
405 * will see the new packet type (until the next received packet).
408 void dev_add_pack(struct packet_type
*pt
)
410 struct list_head
*head
= ptype_head(pt
);
412 spin_lock(&ptype_lock
);
413 list_add_rcu(&pt
->list
, head
);
414 spin_unlock(&ptype_lock
);
416 EXPORT_SYMBOL(dev_add_pack
);
419 * __dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
427 * The packet type might still be in use by receivers
428 * and must not be freed until after all the CPU's have gone
429 * through a quiescent state.
431 void __dev_remove_pack(struct packet_type
*pt
)
433 struct list_head
*head
= ptype_head(pt
);
434 struct packet_type
*pt1
;
436 spin_lock(&ptype_lock
);
438 list_for_each_entry(pt1
, head
, list
) {
440 list_del_rcu(&pt
->list
);
445 pr_warn("dev_remove_pack: %p not found\n", pt
);
447 spin_unlock(&ptype_lock
);
449 EXPORT_SYMBOL(__dev_remove_pack
);
452 * dev_remove_pack - remove packet handler
453 * @pt: packet type declaration
455 * Remove a protocol handler that was previously added to the kernel
456 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
457 * from the kernel lists and can be freed or reused once this function
460 * This call sleeps to guarantee that no CPU is looking at the packet
463 void dev_remove_pack(struct packet_type
*pt
)
465 __dev_remove_pack(pt
);
469 EXPORT_SYMBOL(dev_remove_pack
);
473 * dev_add_offload - register offload handlers
474 * @po: protocol offload declaration
476 * Add protocol offload handlers to the networking stack. The passed
477 * &proto_offload is linked into kernel lists and may not be freed until
478 * it has been removed from the kernel lists.
480 * This call does not sleep therefore it can not
481 * guarantee all CPU's that are in middle of receiving packets
482 * will see the new offload handlers (until the next received packet).
484 void dev_add_offload(struct packet_offload
*po
)
486 struct packet_offload
*elem
;
488 spin_lock(&offload_lock
);
489 list_for_each_entry(elem
, &offload_base
, list
) {
490 if (po
->priority
< elem
->priority
)
493 list_add_rcu(&po
->list
, elem
->list
.prev
);
494 spin_unlock(&offload_lock
);
496 EXPORT_SYMBOL(dev_add_offload
);
499 * __dev_remove_offload - remove offload handler
500 * @po: packet offload declaration
502 * Remove a protocol offload handler that was previously added to the
503 * kernel offload handlers by dev_add_offload(). The passed &offload_type
504 * is removed from the kernel lists and can be freed or reused once this
507 * The packet type might still be in use by receivers
508 * and must not be freed until after all the CPU's have gone
509 * through a quiescent state.
511 static void __dev_remove_offload(struct packet_offload
*po
)
513 struct list_head
*head
= &offload_base
;
514 struct packet_offload
*po1
;
516 spin_lock(&offload_lock
);
518 list_for_each_entry(po1
, head
, list
) {
520 list_del_rcu(&po
->list
);
525 pr_warn("dev_remove_offload: %p not found\n", po
);
527 spin_unlock(&offload_lock
);
531 * dev_remove_offload - remove packet offload handler
532 * @po: packet offload declaration
534 * Remove a packet offload handler that was previously added to the kernel
535 * offload handlers by dev_add_offload(). The passed &offload_type is
536 * removed from the kernel lists and can be freed or reused once this
539 * This call sleeps to guarantee that no CPU is looking at the packet
542 void dev_remove_offload(struct packet_offload
*po
)
544 __dev_remove_offload(po
);
548 EXPORT_SYMBOL(dev_remove_offload
);
550 /******************************************************************************
552 * Device Boot-time Settings Routines
554 ******************************************************************************/
556 /* Boot time configuration table */
557 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
560 * netdev_boot_setup_add - add new setup entry
561 * @name: name of the device
562 * @map: configured settings for the device
564 * Adds new setup entry to the dev_boot_setup list. The function
565 * returns 0 on error and 1 on success. This is a generic routine to
568 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
570 struct netdev_boot_setup
*s
;
574 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
575 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
576 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
577 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
578 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
583 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
587 * netdev_boot_setup_check - check boot time settings
588 * @dev: the netdevice
590 * Check boot time settings for the device.
591 * The found settings are set for the device to be used
592 * later in the device probing.
593 * Returns 0 if no settings found, 1 if they are.
595 int netdev_boot_setup_check(struct net_device
*dev
)
597 struct netdev_boot_setup
*s
= dev_boot_setup
;
600 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
601 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
602 !strcmp(dev
->name
, s
[i
].name
)) {
603 dev
->irq
= s
[i
].map
.irq
;
604 dev
->base_addr
= s
[i
].map
.base_addr
;
605 dev
->mem_start
= s
[i
].map
.mem_start
;
606 dev
->mem_end
= s
[i
].map
.mem_end
;
612 EXPORT_SYMBOL(netdev_boot_setup_check
);
616 * netdev_boot_base - get address from boot time settings
617 * @prefix: prefix for network device
618 * @unit: id for network device
620 * Check boot time settings for the base address of device.
621 * The found settings are set for the device to be used
622 * later in the device probing.
623 * Returns 0 if no settings found.
625 unsigned long netdev_boot_base(const char *prefix
, int unit
)
627 const struct netdev_boot_setup
*s
= dev_boot_setup
;
631 sprintf(name
, "%s%d", prefix
, unit
);
634 * If device already registered then return base of 1
635 * to indicate not to probe for this interface
637 if (__dev_get_by_name(&init_net
, name
))
640 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
641 if (!strcmp(name
, s
[i
].name
))
642 return s
[i
].map
.base_addr
;
647 * Saves at boot time configured settings for any netdevice.
649 int __init
netdev_boot_setup(char *str
)
654 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
659 memset(&map
, 0, sizeof(map
));
663 map
.base_addr
= ints
[2];
665 map
.mem_start
= ints
[3];
667 map
.mem_end
= ints
[4];
669 /* Add new entry to the list */
670 return netdev_boot_setup_add(str
, &map
);
673 __setup("netdev=", netdev_boot_setup
);
675 /*******************************************************************************
677 * Device Interface Subroutines
679 *******************************************************************************/
682 * dev_get_iflink - get 'iflink' value of a interface
683 * @dev: targeted interface
685 * Indicates the ifindex the interface is linked to.
686 * Physical interfaces have the same 'ifindex' and 'iflink' values.
689 int dev_get_iflink(const struct net_device
*dev
)
691 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
692 return dev
->netdev_ops
->ndo_get_iflink(dev
);
696 EXPORT_SYMBOL(dev_get_iflink
);
699 * dev_fill_metadata_dst - Retrieve tunnel egress information.
700 * @dev: targeted interface
703 * For better visibility of tunnel traffic OVS needs to retrieve
704 * egress tunnel information for a packet. Following API allows
705 * user to get this info.
707 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
709 struct ip_tunnel_info
*info
;
711 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
714 info
= skb_tunnel_info_unclone(skb
);
717 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
720 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
722 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
725 * __dev_get_by_name - find a device by its name
726 * @net: the applicable net namespace
727 * @name: name to find
729 * Find an interface by name. Must be called under RTNL semaphore
730 * or @dev_base_lock. If the name is found a pointer to the device
731 * is returned. If the name is not found then %NULL is returned. The
732 * reference counters are not incremented so the caller must be
733 * careful with locks.
736 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
738 struct net_device
*dev
;
739 struct hlist_head
*head
= dev_name_hash(net
, name
);
741 hlist_for_each_entry(dev
, head
, name_hlist
)
742 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
747 EXPORT_SYMBOL(__dev_get_by_name
);
750 * dev_get_by_name_rcu - find a device by its name
751 * @net: the applicable net namespace
752 * @name: name to find
754 * Find an interface by name.
755 * If the name is found a pointer to the device is returned.
756 * If the name is not found then %NULL is returned.
757 * The reference counters are not incremented so the caller must be
758 * careful with locks. The caller must hold RCU lock.
761 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
763 struct net_device
*dev
;
764 struct hlist_head
*head
= dev_name_hash(net
, name
);
766 hlist_for_each_entry_rcu(dev
, head
, name_hlist
)
767 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
772 EXPORT_SYMBOL(dev_get_by_name_rcu
);
775 * dev_get_by_name - find a device by its name
776 * @net: the applicable net namespace
777 * @name: name to find
779 * Find an interface by name. This can be called from any
780 * context and does its own locking. The returned handle has
781 * the usage count incremented and the caller must use dev_put() to
782 * release it when it is no longer needed. %NULL is returned if no
783 * matching device is found.
786 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
788 struct net_device
*dev
;
791 dev
= dev_get_by_name_rcu(net
, name
);
797 EXPORT_SYMBOL(dev_get_by_name
);
800 * __dev_get_by_index - find a device by its ifindex
801 * @net: the applicable net namespace
802 * @ifindex: index of device
804 * Search for an interface by index. Returns %NULL if the device
805 * is not found or a pointer to the device. The device has not
806 * had its reference counter increased so the caller must be careful
807 * about locking. The caller must hold either the RTNL semaphore
811 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
813 struct net_device
*dev
;
814 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
816 hlist_for_each_entry(dev
, head
, index_hlist
)
817 if (dev
->ifindex
== ifindex
)
822 EXPORT_SYMBOL(__dev_get_by_index
);
825 * dev_get_by_index_rcu - find a device by its ifindex
826 * @net: the applicable net namespace
827 * @ifindex: index of device
829 * Search for an interface by index. Returns %NULL if the device
830 * is not found or a pointer to the device. The device has not
831 * had its reference counter increased so the caller must be careful
832 * about locking. The caller must hold RCU lock.
835 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
837 struct net_device
*dev
;
838 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
840 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
841 if (dev
->ifindex
== ifindex
)
846 EXPORT_SYMBOL(dev_get_by_index_rcu
);
850 * dev_get_by_index - find a device by its ifindex
851 * @net: the applicable net namespace
852 * @ifindex: index of device
854 * Search for an interface by index. Returns NULL if the device
855 * is not found or a pointer to the device. The device returned has
856 * had a reference added and the pointer is safe until the user calls
857 * dev_put to indicate they have finished with it.
860 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
862 struct net_device
*dev
;
865 dev
= dev_get_by_index_rcu(net
, ifindex
);
871 EXPORT_SYMBOL(dev_get_by_index
);
874 * dev_get_by_napi_id - find a device by napi_id
875 * @napi_id: ID of the NAPI struct
877 * Search for an interface by NAPI ID. Returns %NULL if the device
878 * is not found or a pointer to the device. The device has not had
879 * its reference counter increased so the caller must be careful
880 * about locking. The caller must hold RCU lock.
883 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
885 struct napi_struct
*napi
;
887 WARN_ON_ONCE(!rcu_read_lock_held());
889 if (napi_id
< MIN_NAPI_ID
)
892 napi
= napi_by_id(napi_id
);
894 return napi
? napi
->dev
: NULL
;
896 EXPORT_SYMBOL(dev_get_by_napi_id
);
899 * netdev_get_name - get a netdevice name, knowing its ifindex.
900 * @net: network namespace
901 * @name: a pointer to the buffer where the name will be stored.
902 * @ifindex: the ifindex of the interface to get the name from.
904 * The use of raw_seqcount_begin() and cond_resched() before
905 * retrying is required as we want to give the writers a chance
906 * to complete when CONFIG_PREEMPT is not set.
908 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
910 struct net_device
*dev
;
914 seq
= raw_seqcount_begin(&devnet_rename_seq
);
916 dev
= dev_get_by_index_rcu(net
, ifindex
);
922 strcpy(name
, dev
->name
);
924 if (read_seqcount_retry(&devnet_rename_seq
, seq
)) {
933 * dev_getbyhwaddr_rcu - find a device by its hardware address
934 * @net: the applicable net namespace
935 * @type: media type of device
936 * @ha: hardware address
938 * Search for an interface by MAC address. Returns NULL if the device
939 * is not found or a pointer to the device.
940 * The caller must hold RCU or RTNL.
941 * The returned device has not had its ref count increased
942 * and the caller must therefore be careful about locking
946 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
949 struct net_device
*dev
;
951 for_each_netdev_rcu(net
, dev
)
952 if (dev
->type
== type
&&
953 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
958 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
960 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
962 struct net_device
*dev
;
965 for_each_netdev(net
, dev
)
966 if (dev
->type
== type
)
971 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
973 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
975 struct net_device
*dev
, *ret
= NULL
;
978 for_each_netdev_rcu(net
, dev
)
979 if (dev
->type
== type
) {
987 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
990 * __dev_get_by_flags - find any device with given flags
991 * @net: the applicable net namespace
992 * @if_flags: IFF_* values
993 * @mask: bitmask of bits in if_flags to check
995 * Search for any interface with the given flags. Returns NULL if a device
996 * is not found or a pointer to the device. Must be called inside
997 * rtnl_lock(), and result refcount is unchanged.
1000 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
1001 unsigned short mask
)
1003 struct net_device
*dev
, *ret
;
1008 for_each_netdev(net
, dev
) {
1009 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
1016 EXPORT_SYMBOL(__dev_get_by_flags
);
1019 * dev_valid_name - check if name is okay for network device
1020 * @name: name string
1022 * Network device names need to be valid file names to
1023 * to allow sysfs to work. We also disallow any kind of
1026 bool dev_valid_name(const char *name
)
1030 if (strlen(name
) >= IFNAMSIZ
)
1032 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
1036 if (*name
== '/' || *name
== ':' || isspace(*name
))
1042 EXPORT_SYMBOL(dev_valid_name
);
1045 * __dev_alloc_name - allocate a name for a device
1046 * @net: network namespace to allocate the device name in
1047 * @name: name format string
1048 * @buf: scratch buffer and result name string
1050 * Passed a format string - eg "lt%d" it will try and find a suitable
1051 * id. It scans list of devices to build up a free map, then chooses
1052 * the first empty slot. The caller must hold the dev_base or rtnl lock
1053 * while allocating the name and adding the device in order to avoid
1055 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1056 * Returns the number of the unit assigned or a negative errno code.
1059 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
1063 const int max_netdevices
= 8*PAGE_SIZE
;
1064 unsigned long *inuse
;
1065 struct net_device
*d
;
1067 if (!dev_valid_name(name
))
1070 p
= strchr(name
, '%');
1073 * Verify the string as this thing may have come from
1074 * the user. There must be either one "%d" and no other "%"
1077 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
1080 /* Use one page as a bit array of possible slots */
1081 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1085 for_each_netdev(net
, d
) {
1086 if (!sscanf(d
->name
, name
, &i
))
1088 if (i
< 0 || i
>= max_netdevices
)
1091 /* avoid cases where sscanf is not exact inverse of printf */
1092 snprintf(buf
, IFNAMSIZ
, name
, i
);
1093 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1097 i
= find_first_zero_bit(inuse
, max_netdevices
);
1098 free_page((unsigned long) inuse
);
1101 snprintf(buf
, IFNAMSIZ
, name
, i
);
1102 if (!__dev_get_by_name(net
, buf
))
1105 /* It is possible to run out of possible slots
1106 * when the name is long and there isn't enough space left
1107 * for the digits, or if all bits are used.
1112 static int dev_alloc_name_ns(struct net
*net
,
1113 struct net_device
*dev
,
1120 ret
= __dev_alloc_name(net
, name
, buf
);
1122 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1127 * dev_alloc_name - allocate a name for a device
1129 * @name: name format string
1131 * Passed a format string - eg "lt%d" it will try and find a suitable
1132 * id. It scans list of devices to build up a free map, then chooses
1133 * the first empty slot. The caller must hold the dev_base or rtnl lock
1134 * while allocating the name and adding the device in order to avoid
1136 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1137 * Returns the number of the unit assigned or a negative errno code.
1140 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1142 return dev_alloc_name_ns(dev_net(dev
), dev
, name
);
1144 EXPORT_SYMBOL(dev_alloc_name
);
1146 int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1151 if (!dev_valid_name(name
))
1154 if (strchr(name
, '%'))
1155 return dev_alloc_name_ns(net
, dev
, name
);
1156 else if (__dev_get_by_name(net
, name
))
1158 else if (dev
->name
!= name
)
1159 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1163 EXPORT_SYMBOL(dev_get_valid_name
);
1166 * dev_change_name - change name of a device
1168 * @newname: name (or format string) must be at least IFNAMSIZ
1170 * Change name of a device, can pass format strings "eth%d".
1173 int dev_change_name(struct net_device
*dev
, const char *newname
)
1175 unsigned char old_assign_type
;
1176 char oldname
[IFNAMSIZ
];
1182 BUG_ON(!dev_net(dev
));
1185 if (dev
->flags
& IFF_UP
)
1188 write_seqcount_begin(&devnet_rename_seq
);
1190 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1191 write_seqcount_end(&devnet_rename_seq
);
1195 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1197 err
= dev_get_valid_name(net
, dev
, newname
);
1199 write_seqcount_end(&devnet_rename_seq
);
1203 if (oldname
[0] && !strchr(oldname
, '%'))
1204 netdev_info(dev
, "renamed from %s\n", oldname
);
1206 old_assign_type
= dev
->name_assign_type
;
1207 dev
->name_assign_type
= NET_NAME_RENAMED
;
1210 ret
= device_rename(&dev
->dev
, dev
->name
);
1212 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1213 dev
->name_assign_type
= old_assign_type
;
1214 write_seqcount_end(&devnet_rename_seq
);
1218 write_seqcount_end(&devnet_rename_seq
);
1220 netdev_adjacent_rename_links(dev
, oldname
);
1222 write_lock_bh(&dev_base_lock
);
1223 hlist_del_rcu(&dev
->name_hlist
);
1224 write_unlock_bh(&dev_base_lock
);
1228 write_lock_bh(&dev_base_lock
);
1229 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1230 write_unlock_bh(&dev_base_lock
);
1232 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1233 ret
= notifier_to_errno(ret
);
1236 /* err >= 0 after dev_alloc_name() or stores the first errno */
1239 write_seqcount_begin(&devnet_rename_seq
);
1240 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1241 memcpy(oldname
, newname
, IFNAMSIZ
);
1242 dev
->name_assign_type
= old_assign_type
;
1243 old_assign_type
= NET_NAME_RENAMED
;
1246 pr_err("%s: name change rollback failed: %d\n",
1255 * dev_set_alias - change ifalias of a device
1257 * @alias: name up to IFALIASZ
1258 * @len: limit of bytes to copy from info
1260 * Set ifalias for a device,
1262 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1264 struct dev_ifalias
*new_alias
= NULL
;
1266 if (len
>= IFALIASZ
)
1270 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1274 memcpy(new_alias
->ifalias
, alias
, len
);
1275 new_alias
->ifalias
[len
] = 0;
1278 mutex_lock(&ifalias_mutex
);
1279 rcu_swap_protected(dev
->ifalias
, new_alias
,
1280 mutex_is_locked(&ifalias_mutex
));
1281 mutex_unlock(&ifalias_mutex
);
1284 kfree_rcu(new_alias
, rcuhead
);
1290 * dev_get_alias - get ifalias of a device
1292 * @name: buffer to store name of ifalias
1293 * @len: size of buffer
1295 * get ifalias for a device. Caller must make sure dev cannot go
1296 * away, e.g. rcu read lock or own a reference count to device.
1298 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1300 const struct dev_ifalias
*alias
;
1304 alias
= rcu_dereference(dev
->ifalias
);
1306 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1313 * netdev_features_change - device changes features
1314 * @dev: device to cause notification
1316 * Called to indicate a device has changed features.
1318 void netdev_features_change(struct net_device
*dev
)
1320 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1322 EXPORT_SYMBOL(netdev_features_change
);
1325 * netdev_state_change - device changes state
1326 * @dev: device to cause notification
1328 * Called to indicate a device has changed state. This function calls
1329 * the notifier chains for netdev_chain and sends a NEWLINK message
1330 * to the routing socket.
1332 void netdev_state_change(struct net_device
*dev
)
1334 if (dev
->flags
& IFF_UP
) {
1335 struct netdev_notifier_change_info change_info
= {
1339 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1341 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1344 EXPORT_SYMBOL(netdev_state_change
);
1347 * netdev_notify_peers - notify network peers about existence of @dev
1348 * @dev: network device
1350 * Generate traffic such that interested network peers are aware of
1351 * @dev, such as by generating a gratuitous ARP. This may be used when
1352 * a device wants to inform the rest of the network about some sort of
1353 * reconfiguration such as a failover event or virtual machine
1356 void netdev_notify_peers(struct net_device
*dev
)
1359 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1360 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1363 EXPORT_SYMBOL(netdev_notify_peers
);
1365 static int __dev_open(struct net_device
*dev
)
1367 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1372 if (!netif_device_present(dev
))
1375 /* Block netpoll from trying to do any rx path servicing.
1376 * If we don't do this there is a chance ndo_poll_controller
1377 * or ndo_poll may be running while we open the device
1379 netpoll_poll_disable(dev
);
1381 ret
= call_netdevice_notifiers(NETDEV_PRE_UP
, dev
);
1382 ret
= notifier_to_errno(ret
);
1386 set_bit(__LINK_STATE_START
, &dev
->state
);
1388 if (ops
->ndo_validate_addr
)
1389 ret
= ops
->ndo_validate_addr(dev
);
1391 if (!ret
&& ops
->ndo_open
)
1392 ret
= ops
->ndo_open(dev
);
1394 netpoll_poll_enable(dev
);
1397 clear_bit(__LINK_STATE_START
, &dev
->state
);
1399 dev
->flags
|= IFF_UP
;
1400 dev_set_rx_mode(dev
);
1402 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1409 * dev_open - prepare an interface for use.
1410 * @dev: device to open
1412 * Takes a device from down to up state. The device's private open
1413 * function is invoked and then the multicast lists are loaded. Finally
1414 * the device is moved into the up state and a %NETDEV_UP message is
1415 * sent to the netdev notifier chain.
1417 * Calling this function on an active interface is a nop. On a failure
1418 * a negative errno code is returned.
1420 int dev_open(struct net_device
*dev
)
1424 if (dev
->flags
& IFF_UP
)
1427 ret
= __dev_open(dev
);
1431 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1432 call_netdevice_notifiers(NETDEV_UP
, dev
);
1436 EXPORT_SYMBOL(dev_open
);
1438 static void __dev_close_many(struct list_head
*head
)
1440 struct net_device
*dev
;
1445 list_for_each_entry(dev
, head
, close_list
) {
1446 /* Temporarily disable netpoll until the interface is down */
1447 netpoll_poll_disable(dev
);
1449 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1451 clear_bit(__LINK_STATE_START
, &dev
->state
);
1453 /* Synchronize to scheduled poll. We cannot touch poll list, it
1454 * can be even on different cpu. So just clear netif_running().
1456 * dev->stop() will invoke napi_disable() on all of it's
1457 * napi_struct instances on this device.
1459 smp_mb__after_atomic(); /* Commit netif_running(). */
1462 dev_deactivate_many(head
);
1464 list_for_each_entry(dev
, head
, close_list
) {
1465 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1468 * Call the device specific close. This cannot fail.
1469 * Only if device is UP
1471 * We allow it to be called even after a DETACH hot-plug
1477 dev
->flags
&= ~IFF_UP
;
1478 netpoll_poll_enable(dev
);
1482 static void __dev_close(struct net_device
*dev
)
1486 list_add(&dev
->close_list
, &single
);
1487 __dev_close_many(&single
);
1491 void dev_close_many(struct list_head
*head
, bool unlink
)
1493 struct net_device
*dev
, *tmp
;
1495 /* Remove the devices that don't need to be closed */
1496 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1497 if (!(dev
->flags
& IFF_UP
))
1498 list_del_init(&dev
->close_list
);
1500 __dev_close_many(head
);
1502 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1503 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1504 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1506 list_del_init(&dev
->close_list
);
1509 EXPORT_SYMBOL(dev_close_many
);
1512 * dev_close - shutdown an interface.
1513 * @dev: device to shutdown
1515 * This function moves an active device into down state. A
1516 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1517 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1520 void dev_close(struct net_device
*dev
)
1522 if (dev
->flags
& IFF_UP
) {
1525 list_add(&dev
->close_list
, &single
);
1526 dev_close_many(&single
, true);
1530 EXPORT_SYMBOL(dev_close
);
1534 * dev_disable_lro - disable Large Receive Offload on a device
1537 * Disable Large Receive Offload (LRO) on a net device. Must be
1538 * called under RTNL. This is needed if received packets may be
1539 * forwarded to another interface.
1541 void dev_disable_lro(struct net_device
*dev
)
1543 struct net_device
*lower_dev
;
1544 struct list_head
*iter
;
1546 dev
->wanted_features
&= ~NETIF_F_LRO
;
1547 netdev_update_features(dev
);
1549 if (unlikely(dev
->features
& NETIF_F_LRO
))
1550 netdev_WARN(dev
, "failed to disable LRO!\n");
1552 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1553 dev_disable_lro(lower_dev
);
1555 EXPORT_SYMBOL(dev_disable_lro
);
1558 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1561 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1562 * called under RTNL. This is needed if Generic XDP is installed on
1565 static void dev_disable_gro_hw(struct net_device
*dev
)
1567 dev
->wanted_features
&= ~NETIF_F_GRO_HW
;
1568 netdev_update_features(dev
);
1570 if (unlikely(dev
->features
& NETIF_F_GRO_HW
))
1571 netdev_WARN(dev
, "failed to disable GRO_HW!\n");
1574 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1575 struct net_device
*dev
)
1577 struct netdev_notifier_info info
= {
1581 return nb
->notifier_call(nb
, val
, &info
);
1584 static int dev_boot_phase
= 1;
1587 * register_netdevice_notifier - register a network notifier block
1590 * Register a notifier to be called when network device events occur.
1591 * The notifier passed is linked into the kernel structures and must
1592 * not be reused until it has been unregistered. A negative errno code
1593 * is returned on a failure.
1595 * When registered all registration and up events are replayed
1596 * to the new notifier to allow device to have a race free
1597 * view of the network device list.
1600 int register_netdevice_notifier(struct notifier_block
*nb
)
1602 struct net_device
*dev
;
1603 struct net_device
*last
;
1608 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1614 for_each_netdev(net
, dev
) {
1615 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1616 err
= notifier_to_errno(err
);
1620 if (!(dev
->flags
& IFF_UP
))
1623 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1634 for_each_netdev(net
, dev
) {
1638 if (dev
->flags
& IFF_UP
) {
1639 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1641 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1643 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1648 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1651 EXPORT_SYMBOL(register_netdevice_notifier
);
1654 * unregister_netdevice_notifier - unregister a network notifier block
1657 * Unregister a notifier previously registered by
1658 * register_netdevice_notifier(). The notifier is unlinked into the
1659 * kernel structures and may then be reused. A negative errno code
1660 * is returned on a failure.
1662 * After unregistering unregister and down device events are synthesized
1663 * for all devices on the device list to the removed notifier to remove
1664 * the need for special case cleanup code.
1667 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1669 struct net_device
*dev
;
1674 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1679 for_each_netdev(net
, dev
) {
1680 if (dev
->flags
& IFF_UP
) {
1681 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1683 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1685 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1692 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1695 * call_netdevice_notifiers_info - call all network notifier blocks
1696 * @val: value passed unmodified to notifier function
1697 * @info: notifier information data
1699 * Call all network notifier blocks. Parameters and return value
1700 * are as for raw_notifier_call_chain().
1703 static int call_netdevice_notifiers_info(unsigned long val
,
1704 struct netdev_notifier_info
*info
)
1707 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1711 * call_netdevice_notifiers - call all network notifier blocks
1712 * @val: value passed unmodified to notifier function
1713 * @dev: net_device pointer passed unmodified to notifier function
1715 * Call all network notifier blocks. Parameters and return value
1716 * are as for raw_notifier_call_chain().
1719 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1721 struct netdev_notifier_info info
= {
1725 return call_netdevice_notifiers_info(val
, &info
);
1727 EXPORT_SYMBOL(call_netdevice_notifiers
);
1729 #ifdef CONFIG_NET_INGRESS
1730 static struct static_key ingress_needed __read_mostly
;
1732 void net_inc_ingress_queue(void)
1734 static_key_slow_inc(&ingress_needed
);
1736 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
1738 void net_dec_ingress_queue(void)
1740 static_key_slow_dec(&ingress_needed
);
1742 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
1745 #ifdef CONFIG_NET_EGRESS
1746 static struct static_key egress_needed __read_mostly
;
1748 void net_inc_egress_queue(void)
1750 static_key_slow_inc(&egress_needed
);
1752 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
1754 void net_dec_egress_queue(void)
1756 static_key_slow_dec(&egress_needed
);
1758 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
1761 static struct static_key netstamp_needed __read_mostly
;
1762 #ifdef HAVE_JUMP_LABEL
1763 static atomic_t netstamp_needed_deferred
;
1764 static atomic_t netstamp_wanted
;
1765 static void netstamp_clear(struct work_struct
*work
)
1767 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1770 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
1772 static_key_enable(&netstamp_needed
);
1774 static_key_disable(&netstamp_needed
);
1776 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
1779 void net_enable_timestamp(void)
1781 #ifdef HAVE_JUMP_LABEL
1785 wanted
= atomic_read(&netstamp_wanted
);
1788 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
+ 1) == wanted
)
1791 atomic_inc(&netstamp_needed_deferred
);
1792 schedule_work(&netstamp_work
);
1794 static_key_slow_inc(&netstamp_needed
);
1797 EXPORT_SYMBOL(net_enable_timestamp
);
1799 void net_disable_timestamp(void)
1801 #ifdef HAVE_JUMP_LABEL
1805 wanted
= atomic_read(&netstamp_wanted
);
1808 if (atomic_cmpxchg(&netstamp_wanted
, wanted
, wanted
- 1) == wanted
)
1811 atomic_dec(&netstamp_needed_deferred
);
1812 schedule_work(&netstamp_work
);
1814 static_key_slow_dec(&netstamp_needed
);
1817 EXPORT_SYMBOL(net_disable_timestamp
);
1819 static inline void net_timestamp_set(struct sk_buff
*skb
)
1822 if (static_key_false(&netstamp_needed
))
1823 __net_timestamp(skb
);
1826 #define net_timestamp_check(COND, SKB) \
1827 if (static_key_false(&netstamp_needed)) { \
1828 if ((COND) && !(SKB)->tstamp) \
1829 __net_timestamp(SKB); \
1832 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1836 if (!(dev
->flags
& IFF_UP
))
1839 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1840 if (skb
->len
<= len
)
1843 /* if TSO is enabled, we don't care about the length as the packet
1844 * could be forwarded without being segmented before
1846 if (skb_is_gso(skb
))
1851 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
1853 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1855 int ret
= ____dev_forward_skb(dev
, skb
);
1858 skb
->protocol
= eth_type_trans(skb
, dev
);
1859 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1864 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
1867 * dev_forward_skb - loopback an skb to another netif
1869 * @dev: destination network device
1870 * @skb: buffer to forward
1873 * NET_RX_SUCCESS (no congestion)
1874 * NET_RX_DROP (packet was dropped, but freed)
1876 * dev_forward_skb can be used for injecting an skb from the
1877 * start_xmit function of one device into the receive queue
1878 * of another device.
1880 * The receiving device may be in another namespace, so
1881 * we have to clear all information in the skb that could
1882 * impact namespace isolation.
1884 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1886 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
1888 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1890 static inline int deliver_skb(struct sk_buff
*skb
,
1891 struct packet_type
*pt_prev
,
1892 struct net_device
*orig_dev
)
1894 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
1896 refcount_inc(&skb
->users
);
1897 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1900 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
1901 struct packet_type
**pt
,
1902 struct net_device
*orig_dev
,
1904 struct list_head
*ptype_list
)
1906 struct packet_type
*ptype
, *pt_prev
= *pt
;
1908 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1909 if (ptype
->type
!= type
)
1912 deliver_skb(skb
, pt_prev
, orig_dev
);
1918 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1920 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1923 if (ptype
->id_match
)
1924 return ptype
->id_match(ptype
, skb
->sk
);
1925 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1932 * Support routine. Sends outgoing frames to any network
1933 * taps currently in use.
1936 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1938 struct packet_type
*ptype
;
1939 struct sk_buff
*skb2
= NULL
;
1940 struct packet_type
*pt_prev
= NULL
;
1941 struct list_head
*ptype_list
= &ptype_all
;
1945 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1946 /* Never send packets back to the socket
1947 * they originated from - MvS (miquels@drinkel.ow.org)
1949 if (skb_loop_sk(ptype
, skb
))
1953 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1958 /* need to clone skb, done only once */
1959 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1963 net_timestamp_set(skb2
);
1965 /* skb->nh should be correctly
1966 * set by sender, so that the second statement is
1967 * just protection against buggy protocols.
1969 skb_reset_mac_header(skb2
);
1971 if (skb_network_header(skb2
) < skb2
->data
||
1972 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
1973 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1974 ntohs(skb2
->protocol
),
1976 skb_reset_network_header(skb2
);
1979 skb2
->transport_header
= skb2
->network_header
;
1980 skb2
->pkt_type
= PACKET_OUTGOING
;
1984 if (ptype_list
== &ptype_all
) {
1985 ptype_list
= &dev
->ptype_all
;
1990 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
1991 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
1997 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2000 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2001 * @dev: Network device
2002 * @txq: number of queues available
2004 * If real_num_tx_queues is changed the tc mappings may no longer be
2005 * valid. To resolve this verify the tc mapping remains valid and if
2006 * not NULL the mapping. With no priorities mapping to this
2007 * offset/count pair it will no longer be used. In the worst case TC0
2008 * is invalid nothing can be done so disable priority mappings. If is
2009 * expected that drivers will fix this mapping if they can before
2010 * calling netif_set_real_num_tx_queues.
2012 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2015 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2017 /* If TC0 is invalidated disable TC mapping */
2018 if (tc
->offset
+ tc
->count
> txq
) {
2019 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2024 /* Invalidated prio to tc mappings set to TC0 */
2025 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2026 int q
= netdev_get_prio_tc_map(dev
, i
);
2028 tc
= &dev
->tc_to_txq
[q
];
2029 if (tc
->offset
+ tc
->count
> txq
) {
2030 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2032 netdev_set_prio_tc_map(dev
, i
, 0);
2037 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2040 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2043 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2044 if ((txq
- tc
->offset
) < tc
->count
)
2053 EXPORT_SYMBOL(netdev_txq_to_tc
);
2056 static DEFINE_MUTEX(xps_map_mutex
);
2057 #define xmap_dereference(P) \
2058 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2060 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2063 struct xps_map
*map
= NULL
;
2067 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2071 for (pos
= map
->len
; pos
--;) {
2072 if (map
->queues
[pos
] != index
)
2076 map
->queues
[pos
] = map
->queues
[--map
->len
];
2080 RCU_INIT_POINTER(dev_maps
->cpu_map
[tci
], NULL
);
2081 kfree_rcu(map
, rcu
);
2088 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2089 struct xps_dev_maps
*dev_maps
,
2090 int cpu
, u16 offset
, u16 count
)
2092 int num_tc
= dev
->num_tc
? : 1;
2093 bool active
= false;
2096 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2099 for (i
= count
, j
= offset
; i
--; j
++) {
2100 if (!remove_xps_queue(dev_maps
, cpu
, j
))
2110 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2113 struct xps_dev_maps
*dev_maps
;
2115 bool active
= false;
2117 mutex_lock(&xps_map_mutex
);
2118 dev_maps
= xmap_dereference(dev
->xps_maps
);
2123 for_each_possible_cpu(cpu
)
2124 active
|= remove_xps_queue_cpu(dev
, dev_maps
, cpu
,
2128 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2129 kfree_rcu(dev_maps
, rcu
);
2132 for (i
= offset
+ (count
- 1); count
--; i
--)
2133 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, i
),
2137 mutex_unlock(&xps_map_mutex
);
2140 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2142 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2145 static struct xps_map
*expand_xps_map(struct xps_map
*map
,
2148 struct xps_map
*new_map
;
2149 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2152 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2153 if (map
->queues
[pos
] != index
)
2158 /* Need to add queue to this CPU's existing map */
2160 if (pos
< map
->alloc_len
)
2163 alloc_len
= map
->alloc_len
* 2;
2166 /* Need to allocate new map to store queue on this CPU's map */
2167 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2172 for (i
= 0; i
< pos
; i
++)
2173 new_map
->queues
[i
] = map
->queues
[i
];
2174 new_map
->alloc_len
= alloc_len
;
2180 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2183 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
2184 int i
, cpu
, tci
, numa_node_id
= -2;
2185 int maps_sz
, num_tc
= 1, tc
= 0;
2186 struct xps_map
*map
, *new_map
;
2187 bool active
= false;
2190 num_tc
= dev
->num_tc
;
2191 tc
= netdev_txq_to_tc(dev
, index
);
2196 maps_sz
= XPS_DEV_MAPS_SIZE(num_tc
);
2197 if (maps_sz
< L1_CACHE_BYTES
)
2198 maps_sz
= L1_CACHE_BYTES
;
2200 mutex_lock(&xps_map_mutex
);
2202 dev_maps
= xmap_dereference(dev
->xps_maps
);
2204 /* allocate memory for queue storage */
2205 for_each_cpu_and(cpu
, cpu_online_mask
, mask
) {
2207 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2208 if (!new_dev_maps
) {
2209 mutex_unlock(&xps_map_mutex
);
2213 tci
= cpu
* num_tc
+ tc
;
2214 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[tci
]) :
2217 map
= expand_xps_map(map
, cpu
, index
);
2221 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2225 goto out_no_new_maps
;
2227 for_each_possible_cpu(cpu
) {
2228 /* copy maps belonging to foreign traffic classes */
2229 for (i
= tc
, tci
= cpu
* num_tc
; dev_maps
&& i
--; tci
++) {
2230 /* fill in the new device map from the old device map */
2231 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2232 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2235 /* We need to explicitly update tci as prevous loop
2236 * could break out early if dev_maps is NULL.
2238 tci
= cpu
* num_tc
+ tc
;
2240 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
)) {
2241 /* add queue to CPU maps */
2244 map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2245 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2248 if (pos
== map
->len
)
2249 map
->queues
[map
->len
++] = index
;
2251 if (numa_node_id
== -2)
2252 numa_node_id
= cpu_to_node(cpu
);
2253 else if (numa_node_id
!= cpu_to_node(cpu
))
2256 } else if (dev_maps
) {
2257 /* fill in the new device map from the old device map */
2258 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2259 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2262 /* copy maps belonging to foreign traffic classes */
2263 for (i
= num_tc
- tc
, tci
++; dev_maps
&& --i
; tci
++) {
2264 /* fill in the new device map from the old device map */
2265 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2266 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[tci
], map
);
2270 rcu_assign_pointer(dev
->xps_maps
, new_dev_maps
);
2272 /* Cleanup old maps */
2274 goto out_no_old_maps
;
2276 for_each_possible_cpu(cpu
) {
2277 for (i
= num_tc
, tci
= cpu
* num_tc
; i
--; tci
++) {
2278 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2279 map
= xmap_dereference(dev_maps
->cpu_map
[tci
]);
2280 if (map
&& map
!= new_map
)
2281 kfree_rcu(map
, rcu
);
2285 kfree_rcu(dev_maps
, rcu
);
2288 dev_maps
= new_dev_maps
;
2292 /* update Tx queue numa node */
2293 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2294 (numa_node_id
>= 0) ? numa_node_id
:
2300 /* removes queue from unused CPUs */
2301 for_each_possible_cpu(cpu
) {
2302 for (i
= tc
, tci
= cpu
* num_tc
; i
--; tci
++)
2303 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2304 if (!cpumask_test_cpu(cpu
, mask
) || !cpu_online(cpu
))
2305 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2306 for (i
= num_tc
- tc
, tci
++; --i
; tci
++)
2307 active
|= remove_xps_queue(dev_maps
, tci
, index
);
2310 /* free map if not active */
2312 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2313 kfree_rcu(dev_maps
, rcu
);
2317 mutex_unlock(&xps_map_mutex
);
2321 /* remove any maps that we added */
2322 for_each_possible_cpu(cpu
) {
2323 for (i
= num_tc
, tci
= cpu
* num_tc
; i
--; tci
++) {
2324 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[tci
]);
2326 xmap_dereference(dev_maps
->cpu_map
[tci
]) :
2328 if (new_map
&& new_map
!= map
)
2333 mutex_unlock(&xps_map_mutex
);
2335 kfree(new_dev_maps
);
2338 EXPORT_SYMBOL(netif_set_xps_queue
);
2341 void netdev_reset_tc(struct net_device
*dev
)
2344 netif_reset_xps_queues_gt(dev
, 0);
2347 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2348 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2350 EXPORT_SYMBOL(netdev_reset_tc
);
2352 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2354 if (tc
>= dev
->num_tc
)
2358 netif_reset_xps_queues(dev
, offset
, count
);
2360 dev
->tc_to_txq
[tc
].count
= count
;
2361 dev
->tc_to_txq
[tc
].offset
= offset
;
2364 EXPORT_SYMBOL(netdev_set_tc_queue
);
2366 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2368 if (num_tc
> TC_MAX_QUEUE
)
2372 netif_reset_xps_queues_gt(dev
, 0);
2374 dev
->num_tc
= num_tc
;
2377 EXPORT_SYMBOL(netdev_set_num_tc
);
2380 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2381 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2383 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2387 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2390 if (dev
->reg_state
== NETREG_REGISTERED
||
2391 dev
->reg_state
== NETREG_UNREGISTERING
) {
2394 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2400 netif_setup_tc(dev
, txq
);
2402 if (txq
< dev
->real_num_tx_queues
) {
2403 qdisc_reset_all_tx_gt(dev
, txq
);
2405 netif_reset_xps_queues_gt(dev
, txq
);
2410 dev
->real_num_tx_queues
= txq
;
2413 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2417 * netif_set_real_num_rx_queues - set actual number of RX queues used
2418 * @dev: Network device
2419 * @rxq: Actual number of RX queues
2421 * This must be called either with the rtnl_lock held or before
2422 * registration of the net device. Returns 0 on success, or a
2423 * negative error code. If called before registration, it always
2426 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2430 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2433 if (dev
->reg_state
== NETREG_REGISTERED
) {
2436 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2442 dev
->real_num_rx_queues
= rxq
;
2445 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2449 * netif_get_num_default_rss_queues - default number of RSS queues
2451 * This routine should set an upper limit on the number of RSS queues
2452 * used by default by multiqueue devices.
2454 int netif_get_num_default_rss_queues(void)
2456 return is_kdump_kernel() ?
2457 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2459 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2461 static void __netif_reschedule(struct Qdisc
*q
)
2463 struct softnet_data
*sd
;
2464 unsigned long flags
;
2466 local_irq_save(flags
);
2467 sd
= this_cpu_ptr(&softnet_data
);
2468 q
->next_sched
= NULL
;
2469 *sd
->output_queue_tailp
= q
;
2470 sd
->output_queue_tailp
= &q
->next_sched
;
2471 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2472 local_irq_restore(flags
);
2475 void __netif_schedule(struct Qdisc
*q
)
2477 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2478 __netif_reschedule(q
);
2480 EXPORT_SYMBOL(__netif_schedule
);
2482 struct dev_kfree_skb_cb
{
2483 enum skb_free_reason reason
;
2486 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2488 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2491 void netif_schedule_queue(struct netdev_queue
*txq
)
2494 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
)) {
2495 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2497 __netif_schedule(q
);
2501 EXPORT_SYMBOL(netif_schedule_queue
);
2503 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2505 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2509 q
= rcu_dereference(dev_queue
->qdisc
);
2510 __netif_schedule(q
);
2514 EXPORT_SYMBOL(netif_tx_wake_queue
);
2516 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2518 unsigned long flags
;
2523 if (likely(refcount_read(&skb
->users
) == 1)) {
2525 refcount_set(&skb
->users
, 0);
2526 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
2529 get_kfree_skb_cb(skb
)->reason
= reason
;
2530 local_irq_save(flags
);
2531 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2532 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2533 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2534 local_irq_restore(flags
);
2536 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2538 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2540 if (in_irq() || irqs_disabled())
2541 __dev_kfree_skb_irq(skb
, reason
);
2545 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2549 * netif_device_detach - mark device as removed
2550 * @dev: network device
2552 * Mark device as removed from system and therefore no longer available.
2554 void netif_device_detach(struct net_device
*dev
)
2556 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2557 netif_running(dev
)) {
2558 netif_tx_stop_all_queues(dev
);
2561 EXPORT_SYMBOL(netif_device_detach
);
2564 * netif_device_attach - mark device as attached
2565 * @dev: network device
2567 * Mark device as attached from system and restart if needed.
2569 void netif_device_attach(struct net_device
*dev
)
2571 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2572 netif_running(dev
)) {
2573 netif_tx_wake_all_queues(dev
);
2574 __netdev_watchdog_up(dev
);
2577 EXPORT_SYMBOL(netif_device_attach
);
2580 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2581 * to be used as a distribution range.
2583 u16
__skb_tx_hash(const struct net_device
*dev
, struct sk_buff
*skb
,
2584 unsigned int num_tx_queues
)
2588 u16 qcount
= num_tx_queues
;
2590 if (skb_rx_queue_recorded(skb
)) {
2591 hash
= skb_get_rx_queue(skb
);
2592 while (unlikely(hash
>= num_tx_queues
))
2593 hash
-= num_tx_queues
;
2598 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
2600 qoffset
= dev
->tc_to_txq
[tc
].offset
;
2601 qcount
= dev
->tc_to_txq
[tc
].count
;
2604 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
2606 EXPORT_SYMBOL(__skb_tx_hash
);
2608 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2610 static const netdev_features_t null_features
;
2611 struct net_device
*dev
= skb
->dev
;
2612 const char *name
= "";
2614 if (!net_ratelimit())
2618 if (dev
->dev
.parent
)
2619 name
= dev_driver_string(dev
->dev
.parent
);
2621 name
= netdev_name(dev
);
2623 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2624 "gso_type=%d ip_summed=%d\n",
2625 name
, dev
? &dev
->features
: &null_features
,
2626 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
,
2627 skb
->len
, skb
->data_len
, skb_shinfo(skb
)->gso_size
,
2628 skb_shinfo(skb
)->gso_type
, skb
->ip_summed
);
2632 * Invalidate hardware checksum when packet is to be mangled, and
2633 * complete checksum manually on outgoing path.
2635 int skb_checksum_help(struct sk_buff
*skb
)
2638 int ret
= 0, offset
;
2640 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2641 goto out_set_summed
;
2643 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2644 skb_warn_bad_offload(skb
);
2648 /* Before computing a checksum, we should make sure no frag could
2649 * be modified by an external entity : checksum could be wrong.
2651 if (skb_has_shared_frag(skb
)) {
2652 ret
= __skb_linearize(skb
);
2657 offset
= skb_checksum_start_offset(skb
);
2658 BUG_ON(offset
>= skb_headlen(skb
));
2659 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2661 offset
+= skb
->csum_offset
;
2662 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2664 if (skb_cloned(skb
) &&
2665 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2666 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2671 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
2673 skb
->ip_summed
= CHECKSUM_NONE
;
2677 EXPORT_SYMBOL(skb_checksum_help
);
2679 int skb_crc32c_csum_help(struct sk_buff
*skb
)
2682 int ret
= 0, offset
, start
;
2684 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2687 if (unlikely(skb_is_gso(skb
)))
2690 /* Before computing a checksum, we should make sure no frag could
2691 * be modified by an external entity : checksum could be wrong.
2693 if (unlikely(skb_has_shared_frag(skb
))) {
2694 ret
= __skb_linearize(skb
);
2698 start
= skb_checksum_start_offset(skb
);
2699 offset
= start
+ offsetof(struct sctphdr
, checksum
);
2700 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
2704 if (skb_cloned(skb
) &&
2705 !skb_clone_writable(skb
, offset
+ sizeof(__le32
))) {
2706 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2710 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
2711 skb
->len
- start
, ~(__u32
)0,
2713 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
2714 skb
->ip_summed
= CHECKSUM_NONE
;
2715 skb
->csum_not_inet
= 0;
2720 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
2722 __be16 type
= skb
->protocol
;
2724 /* Tunnel gso handlers can set protocol to ethernet. */
2725 if (type
== htons(ETH_P_TEB
)) {
2728 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
2731 eth
= (struct ethhdr
*)skb_mac_header(skb
);
2732 type
= eth
->h_proto
;
2735 return __vlan_get_protocol(skb
, type
, depth
);
2739 * skb_mac_gso_segment - mac layer segmentation handler.
2740 * @skb: buffer to segment
2741 * @features: features for the output path (see dev->features)
2743 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2744 netdev_features_t features
)
2746 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2747 struct packet_offload
*ptype
;
2748 int vlan_depth
= skb
->mac_len
;
2749 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
2751 if (unlikely(!type
))
2752 return ERR_PTR(-EINVAL
);
2754 __skb_pull(skb
, vlan_depth
);
2757 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2758 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2759 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2765 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2769 EXPORT_SYMBOL(skb_mac_gso_segment
);
2772 /* openvswitch calls this on rx path, so we need a different check.
2774 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
2777 return skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
2778 skb
->ip_summed
!= CHECKSUM_UNNECESSARY
;
2780 return skb
->ip_summed
== CHECKSUM_NONE
;
2784 * __skb_gso_segment - Perform segmentation on skb.
2785 * @skb: buffer to segment
2786 * @features: features for the output path (see dev->features)
2787 * @tx_path: whether it is called in TX path
2789 * This function segments the given skb and returns a list of segments.
2791 * It may return NULL if the skb requires no segmentation. This is
2792 * only possible when GSO is used for verifying header integrity.
2794 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2796 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2797 netdev_features_t features
, bool tx_path
)
2799 struct sk_buff
*segs
;
2801 if (unlikely(skb_needs_check(skb
, tx_path
))) {
2804 /* We're going to init ->check field in TCP or UDP header */
2805 err
= skb_cow_head(skb
, 0);
2807 return ERR_PTR(err
);
2810 /* Only report GSO partial support if it will enable us to
2811 * support segmentation on this frame without needing additional
2814 if (features
& NETIF_F_GSO_PARTIAL
) {
2815 netdev_features_t partial_features
= NETIF_F_GSO_ROBUST
;
2816 struct net_device
*dev
= skb
->dev
;
2818 partial_features
|= dev
->features
& dev
->gso_partial_features
;
2819 if (!skb_gso_ok(skb
, features
| partial_features
))
2820 features
&= ~NETIF_F_GSO_PARTIAL
;
2823 BUILD_BUG_ON(SKB_SGO_CB_OFFSET
+
2824 sizeof(*SKB_GSO_CB(skb
)) > sizeof(skb
->cb
));
2826 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
2827 SKB_GSO_CB(skb
)->encap_level
= 0;
2829 skb_reset_mac_header(skb
);
2830 skb_reset_mac_len(skb
);
2832 segs
= skb_mac_gso_segment(skb
, features
);
2834 if (unlikely(skb_needs_check(skb
, tx_path
) && !IS_ERR(segs
)))
2835 skb_warn_bad_offload(skb
);
2839 EXPORT_SYMBOL(__skb_gso_segment
);
2841 /* Take action when hardware reception checksum errors are detected. */
2843 void netdev_rx_csum_fault(struct net_device
*dev
)
2845 if (net_ratelimit()) {
2846 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
2850 EXPORT_SYMBOL(netdev_rx_csum_fault
);
2853 /* Actually, we should eliminate this check as soon as we know, that:
2854 * 1. IOMMU is present and allows to map all the memory.
2855 * 2. No high memory really exists on this machine.
2858 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
2860 #ifdef CONFIG_HIGHMEM
2863 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
2864 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2865 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2867 if (PageHighMem(skb_frag_page(frag
)))
2872 if (PCI_DMA_BUS_IS_PHYS
) {
2873 struct device
*pdev
= dev
->dev
.parent
;
2877 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2878 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2879 dma_addr_t addr
= page_to_phys(skb_frag_page(frag
));
2881 if (!pdev
->dma_mask
|| addr
+ PAGE_SIZE
- 1 > *pdev
->dma_mask
)
2889 /* If MPLS offload request, verify we are testing hardware MPLS features
2890 * instead of standard features for the netdev.
2892 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2893 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2894 netdev_features_t features
,
2897 if (eth_p_mpls(type
))
2898 features
&= skb
->dev
->mpls_features
;
2903 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2904 netdev_features_t features
,
2911 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
2912 netdev_features_t features
)
2917 type
= skb_network_protocol(skb
, &tmp
);
2918 features
= net_mpls_features(skb
, features
, type
);
2920 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
2921 !can_checksum_protocol(features
, type
)) {
2922 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2924 if (illegal_highdma(skb
->dev
, skb
))
2925 features
&= ~NETIF_F_SG
;
2930 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
2931 struct net_device
*dev
,
2932 netdev_features_t features
)
2936 EXPORT_SYMBOL(passthru_features_check
);
2938 static netdev_features_t
dflt_features_check(const struct sk_buff
*skb
,
2939 struct net_device
*dev
,
2940 netdev_features_t features
)
2942 return vlan_features_check(skb
, features
);
2945 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
2946 struct net_device
*dev
,
2947 netdev_features_t features
)
2949 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2951 if (gso_segs
> dev
->gso_max_segs
)
2952 return features
& ~NETIF_F_GSO_MASK
;
2954 /* Support for GSO partial features requires software
2955 * intervention before we can actually process the packets
2956 * so we need to strip support for any partial features now
2957 * and we can pull them back in after we have partially
2958 * segmented the frame.
2960 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
2961 features
&= ~dev
->gso_partial_features
;
2963 /* Make sure to clear the IPv4 ID mangling feature if the
2964 * IPv4 header has the potential to be fragmented.
2966 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
2967 struct iphdr
*iph
= skb
->encapsulation
?
2968 inner_ip_hdr(skb
) : ip_hdr(skb
);
2970 if (!(iph
->frag_off
& htons(IP_DF
)))
2971 features
&= ~NETIF_F_TSO_MANGLEID
;
2977 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
2979 struct net_device
*dev
= skb
->dev
;
2980 netdev_features_t features
= dev
->features
;
2982 if (skb_is_gso(skb
))
2983 features
= gso_features_check(skb
, dev
, features
);
2985 /* If encapsulation offload request, verify we are testing
2986 * hardware encapsulation features instead of standard
2987 * features for the netdev
2989 if (skb
->encapsulation
)
2990 features
&= dev
->hw_enc_features
;
2992 if (skb_vlan_tagged(skb
))
2993 features
= netdev_intersect_features(features
,
2994 dev
->vlan_features
|
2995 NETIF_F_HW_VLAN_CTAG_TX
|
2996 NETIF_F_HW_VLAN_STAG_TX
);
2998 if (dev
->netdev_ops
->ndo_features_check
)
2999 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3002 features
&= dflt_features_check(skb
, dev
, features
);
3004 return harmonize_features(skb
, features
);
3006 EXPORT_SYMBOL(netif_skb_features
);
3008 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3009 struct netdev_queue
*txq
, bool more
)
3014 if (!list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
))
3015 dev_queue_xmit_nit(skb
, dev
);
3018 trace_net_dev_start_xmit(skb
, dev
);
3019 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3020 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3025 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3026 struct netdev_queue
*txq
, int *ret
)
3028 struct sk_buff
*skb
= first
;
3029 int rc
= NETDEV_TX_OK
;
3032 struct sk_buff
*next
= skb
->next
;
3035 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3036 if (unlikely(!dev_xmit_complete(rc
))) {
3042 if (netif_xmit_stopped(txq
) && skb
) {
3043 rc
= NETDEV_TX_BUSY
;
3053 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3054 netdev_features_t features
)
3056 if (skb_vlan_tag_present(skb
) &&
3057 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3058 skb
= __vlan_hwaccel_push_inside(skb
);
3062 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3063 const netdev_features_t features
)
3065 if (unlikely(skb
->csum_not_inet
))
3066 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3067 skb_crc32c_csum_help(skb
);
3069 return !!(features
& NETIF_F_CSUM_MASK
) ? 0 : skb_checksum_help(skb
);
3071 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3073 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3075 netdev_features_t features
;
3077 features
= netif_skb_features(skb
);
3078 skb
= validate_xmit_vlan(skb
, features
);
3082 if (netif_needs_gso(skb
, features
)) {
3083 struct sk_buff
*segs
;
3085 segs
= skb_gso_segment(skb
, features
);
3093 if (skb_needs_linearize(skb
, features
) &&
3094 __skb_linearize(skb
))
3097 /* If packet is not checksummed and device does not
3098 * support checksumming for this protocol, complete
3099 * checksumming here.
3101 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3102 if (skb
->encapsulation
)
3103 skb_set_inner_transport_header(skb
,
3104 skb_checksum_start_offset(skb
));
3106 skb_set_transport_header(skb
,
3107 skb_checksum_start_offset(skb
));
3108 if (skb_csum_hwoffload_help(skb
, features
))
3113 skb
= validate_xmit_xfrm(skb
, features
, again
);
3120 atomic_long_inc(&dev
->tx_dropped
);
3124 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3126 struct sk_buff
*next
, *head
= NULL
, *tail
;
3128 for (; skb
!= NULL
; skb
= next
) {
3132 /* in case skb wont be segmented, point to itself */
3135 skb
= validate_xmit_skb(skb
, dev
, again
);
3143 /* If skb was segmented, skb->prev points to
3144 * the last segment. If not, it still contains skb.
3150 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3152 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3154 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3156 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3158 /* To get more precise estimation of bytes sent on wire,
3159 * we add to pkt_len the headers size of all segments
3161 if (shinfo
->gso_size
) {
3162 unsigned int hdr_len
;
3163 u16 gso_segs
= shinfo
->gso_segs
;
3165 /* mac layer + network layer */
3166 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
3168 /* + transport layer */
3169 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3170 const struct tcphdr
*th
;
3171 struct tcphdr _tcphdr
;
3173 th
= skb_header_pointer(skb
, skb_transport_offset(skb
),
3174 sizeof(_tcphdr
), &_tcphdr
);
3176 hdr_len
+= __tcp_hdrlen(th
);
3178 struct udphdr _udphdr
;
3180 if (skb_header_pointer(skb
, skb_transport_offset(skb
),
3181 sizeof(_udphdr
), &_udphdr
))
3182 hdr_len
+= sizeof(struct udphdr
);
3185 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
3186 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
3189 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3193 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3194 struct net_device
*dev
,
3195 struct netdev_queue
*txq
)
3197 spinlock_t
*root_lock
= qdisc_lock(q
);
3198 struct sk_buff
*to_free
= NULL
;
3202 qdisc_calculate_pkt_len(skb
, q
);
3204 if (q
->flags
& TCQ_F_NOLOCK
) {
3205 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3206 __qdisc_drop(skb
, &to_free
);
3209 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3213 if (unlikely(to_free
))
3214 kfree_skb_list(to_free
);
3219 * Heuristic to force contended enqueues to serialize on a
3220 * separate lock before trying to get qdisc main lock.
3221 * This permits qdisc->running owner to get the lock more
3222 * often and dequeue packets faster.
3224 contended
= qdisc_is_running(q
);
3225 if (unlikely(contended
))
3226 spin_lock(&q
->busylock
);
3228 spin_lock(root_lock
);
3229 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3230 __qdisc_drop(skb
, &to_free
);
3232 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3233 qdisc_run_begin(q
)) {
3235 * This is a work-conserving queue; there are no old skbs
3236 * waiting to be sent out; and the qdisc is not running -
3237 * xmit the skb directly.
3240 qdisc_bstats_update(q
, skb
);
3242 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3243 if (unlikely(contended
)) {
3244 spin_unlock(&q
->busylock
);
3251 rc
= NET_XMIT_SUCCESS
;
3253 rc
= q
->enqueue(skb
, q
, &to_free
) & NET_XMIT_MASK
;
3254 if (qdisc_run_begin(q
)) {
3255 if (unlikely(contended
)) {
3256 spin_unlock(&q
->busylock
);
3263 spin_unlock(root_lock
);
3264 if (unlikely(to_free
))
3265 kfree_skb_list(to_free
);
3266 if (unlikely(contended
))
3267 spin_unlock(&q
->busylock
);
3271 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3272 static void skb_update_prio(struct sk_buff
*skb
)
3274 struct netprio_map
*map
= rcu_dereference_bh(skb
->dev
->priomap
);
3276 if (!skb
->priority
&& skb
->sk
&& map
) {
3277 unsigned int prioidx
=
3278 sock_cgroup_prioidx(&skb
->sk
->sk_cgrp_data
);
3280 if (prioidx
< map
->priomap_len
)
3281 skb
->priority
= map
->priomap
[prioidx
];
3285 #define skb_update_prio(skb)
3288 DEFINE_PER_CPU(int, xmit_recursion
);
3289 EXPORT_SYMBOL(xmit_recursion
);
3292 * dev_loopback_xmit - loop back @skb
3293 * @net: network namespace this loopback is happening in
3294 * @sk: sk needed to be a netfilter okfn
3295 * @skb: buffer to transmit
3297 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3299 skb_reset_mac_header(skb
);
3300 __skb_pull(skb
, skb_network_offset(skb
));
3301 skb
->pkt_type
= PACKET_LOOPBACK
;
3302 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3303 WARN_ON(!skb_dst(skb
));
3308 EXPORT_SYMBOL(dev_loopback_xmit
);
3310 #ifdef CONFIG_NET_EGRESS
3311 static struct sk_buff
*
3312 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
3314 struct mini_Qdisc
*miniq
= rcu_dereference_bh(dev
->miniq_egress
);
3315 struct tcf_result cl_res
;
3320 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3321 mini_qdisc_bstats_cpu_update(miniq
, skb
);
3323 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
3325 case TC_ACT_RECLASSIFY
:
3326 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
3329 mini_qdisc_qstats_cpu_drop(miniq
);
3330 *ret
= NET_XMIT_DROP
;
3336 *ret
= NET_XMIT_SUCCESS
;
3339 case TC_ACT_REDIRECT
:
3340 /* No need to push/pop skb's mac_header here on egress! */
3341 skb_do_redirect(skb
);
3342 *ret
= NET_XMIT_SUCCESS
;
3350 #endif /* CONFIG_NET_EGRESS */
3352 static inline int get_xps_queue(struct net_device
*dev
, struct sk_buff
*skb
)
3355 struct xps_dev_maps
*dev_maps
;
3356 struct xps_map
*map
;
3357 int queue_index
= -1;
3360 dev_maps
= rcu_dereference(dev
->xps_maps
);
3362 unsigned int tci
= skb
->sender_cpu
- 1;
3366 tci
+= netdev_get_prio_tc_map(dev
, skb
->priority
);
3369 map
= rcu_dereference(dev_maps
->cpu_map
[tci
]);
3372 queue_index
= map
->queues
[0];
3374 queue_index
= map
->queues
[reciprocal_scale(skb_get_hash(skb
),
3376 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
3388 static u16
__netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
)
3390 struct sock
*sk
= skb
->sk
;
3391 int queue_index
= sk_tx_queue_get(sk
);
3393 if (queue_index
< 0 || skb
->ooo_okay
||
3394 queue_index
>= dev
->real_num_tx_queues
) {
3395 int new_index
= get_xps_queue(dev
, skb
);
3398 new_index
= skb_tx_hash(dev
, skb
);
3400 if (queue_index
!= new_index
&& sk
&&
3402 rcu_access_pointer(sk
->sk_dst_cache
))
3403 sk_tx_queue_set(sk
, new_index
);
3405 queue_index
= new_index
;
3411 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
3412 struct sk_buff
*skb
,
3415 int queue_index
= 0;
3418 u32 sender_cpu
= skb
->sender_cpu
- 1;
3420 if (sender_cpu
>= (u32
)NR_CPUS
)
3421 skb
->sender_cpu
= raw_smp_processor_id() + 1;
3424 if (dev
->real_num_tx_queues
!= 1) {
3425 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3427 if (ops
->ndo_select_queue
)
3428 queue_index
= ops
->ndo_select_queue(dev
, skb
, accel_priv
,
3431 queue_index
= __netdev_pick_tx(dev
, skb
);
3433 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
3436 skb_set_queue_mapping(skb
, queue_index
);
3437 return netdev_get_tx_queue(dev
, queue_index
);
3441 * __dev_queue_xmit - transmit a buffer
3442 * @skb: buffer to transmit
3443 * @accel_priv: private data used for L2 forwarding offload
3445 * Queue a buffer for transmission to a network device. The caller must
3446 * have set the device and priority and built the buffer before calling
3447 * this function. The function can be called from an interrupt.
3449 * A negative errno code is returned on a failure. A success does not
3450 * guarantee the frame will be transmitted as it may be dropped due
3451 * to congestion or traffic shaping.
3453 * -----------------------------------------------------------------------------------
3454 * I notice this method can also return errors from the queue disciplines,
3455 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3458 * Regardless of the return value, the skb is consumed, so it is currently
3459 * difficult to retry a send to this method. (You can bump the ref count
3460 * before sending to hold a reference for retry if you are careful.)
3462 * When calling this method, interrupts MUST be enabled. This is because
3463 * the BH enable code must have IRQs enabled so that it will not deadlock.
3466 static int __dev_queue_xmit(struct sk_buff
*skb
, void *accel_priv
)
3468 struct net_device
*dev
= skb
->dev
;
3469 struct netdev_queue
*txq
;
3474 skb_reset_mac_header(skb
);
3476 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
3477 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
3479 /* Disable soft irqs for various locks below. Also
3480 * stops preemption for RCU.
3484 skb_update_prio(skb
);
3486 qdisc_pkt_len_init(skb
);
3487 #ifdef CONFIG_NET_CLS_ACT
3488 skb
->tc_at_ingress
= 0;
3489 # ifdef CONFIG_NET_EGRESS
3490 if (static_key_false(&egress_needed
)) {
3491 skb
= sch_handle_egress(skb
, &rc
, dev
);
3497 /* If device/qdisc don't need skb->dst, release it right now while
3498 * its hot in this cpu cache.
3500 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
3505 txq
= netdev_pick_tx(dev
, skb
, accel_priv
);
3506 q
= rcu_dereference_bh(txq
->qdisc
);
3508 trace_net_dev_queue(skb
);
3510 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
3514 /* The device has no queue. Common case for software devices:
3515 * loopback, all the sorts of tunnels...
3517 * Really, it is unlikely that netif_tx_lock protection is necessary
3518 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3520 * However, it is possible, that they rely on protection
3523 * Check this and shot the lock. It is not prone from deadlocks.
3524 *Either shot noqueue qdisc, it is even simpler 8)
3526 if (dev
->flags
& IFF_UP
) {
3527 int cpu
= smp_processor_id(); /* ok because BHs are off */
3529 if (txq
->xmit_lock_owner
!= cpu
) {
3530 if (unlikely(__this_cpu_read(xmit_recursion
) >
3531 XMIT_RECURSION_LIMIT
))
3532 goto recursion_alert
;
3534 skb
= validate_xmit_skb(skb
, dev
, &again
);
3538 HARD_TX_LOCK(dev
, txq
, cpu
);
3540 if (!netif_xmit_stopped(txq
)) {
3541 __this_cpu_inc(xmit_recursion
);
3542 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
3543 __this_cpu_dec(xmit_recursion
);
3544 if (dev_xmit_complete(rc
)) {
3545 HARD_TX_UNLOCK(dev
, txq
);
3549 HARD_TX_UNLOCK(dev
, txq
);
3550 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3553 /* Recursion is detected! It is possible,
3557 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3563 rcu_read_unlock_bh();
3565 atomic_long_inc(&dev
->tx_dropped
);
3566 kfree_skb_list(skb
);
3569 rcu_read_unlock_bh();
3573 int dev_queue_xmit(struct sk_buff
*skb
)
3575 return __dev_queue_xmit(skb
, NULL
);
3577 EXPORT_SYMBOL(dev_queue_xmit
);
3579 int dev_queue_xmit_accel(struct sk_buff
*skb
, void *accel_priv
)
3581 return __dev_queue_xmit(skb
, accel_priv
);
3583 EXPORT_SYMBOL(dev_queue_xmit_accel
);
3586 /*************************************************************************
3588 *************************************************************************/
3590 int netdev_max_backlog __read_mostly
= 1000;
3591 EXPORT_SYMBOL(netdev_max_backlog
);
3593 int netdev_tstamp_prequeue __read_mostly
= 1;
3594 int netdev_budget __read_mostly
= 300;
3595 unsigned int __read_mostly netdev_budget_usecs
= 2000;
3596 int weight_p __read_mostly
= 64; /* old backlog weight */
3597 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
3598 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
3599 int dev_rx_weight __read_mostly
= 64;
3600 int dev_tx_weight __read_mostly
= 64;
3602 /* Called with irq disabled */
3603 static inline void ____napi_schedule(struct softnet_data
*sd
,
3604 struct napi_struct
*napi
)
3606 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
3607 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3612 /* One global table that all flow-based protocols share. */
3613 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
3614 EXPORT_SYMBOL(rps_sock_flow_table
);
3615 u32 rps_cpu_mask __read_mostly
;
3616 EXPORT_SYMBOL(rps_cpu_mask
);
3618 struct static_key rps_needed __read_mostly
;
3619 EXPORT_SYMBOL(rps_needed
);
3620 struct static_key rfs_needed __read_mostly
;
3621 EXPORT_SYMBOL(rfs_needed
);
3623 static struct rps_dev_flow
*
3624 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3625 struct rps_dev_flow
*rflow
, u16 next_cpu
)
3627 if (next_cpu
< nr_cpu_ids
) {
3628 #ifdef CONFIG_RFS_ACCEL
3629 struct netdev_rx_queue
*rxqueue
;
3630 struct rps_dev_flow_table
*flow_table
;
3631 struct rps_dev_flow
*old_rflow
;
3636 /* Should we steer this flow to a different hardware queue? */
3637 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
3638 !(dev
->features
& NETIF_F_NTUPLE
))
3640 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
3641 if (rxq_index
== skb_get_rx_queue(skb
))
3644 rxqueue
= dev
->_rx
+ rxq_index
;
3645 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3648 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
3649 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
3650 rxq_index
, flow_id
);
3654 rflow
= &flow_table
->flows
[flow_id
];
3656 if (old_rflow
->filter
== rflow
->filter
)
3657 old_rflow
->filter
= RPS_NO_FILTER
;
3661 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
3664 rflow
->cpu
= next_cpu
;
3669 * get_rps_cpu is called from netif_receive_skb and returns the target
3670 * CPU from the RPS map of the receiving queue for a given skb.
3671 * rcu_read_lock must be held on entry.
3673 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3674 struct rps_dev_flow
**rflowp
)
3676 const struct rps_sock_flow_table
*sock_flow_table
;
3677 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
3678 struct rps_dev_flow_table
*flow_table
;
3679 struct rps_map
*map
;
3684 if (skb_rx_queue_recorded(skb
)) {
3685 u16 index
= skb_get_rx_queue(skb
);
3687 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3688 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3689 "%s received packet on queue %u, but number "
3690 "of RX queues is %u\n",
3691 dev
->name
, index
, dev
->real_num_rx_queues
);
3697 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3699 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3700 map
= rcu_dereference(rxqueue
->rps_map
);
3701 if (!flow_table
&& !map
)
3704 skb_reset_network_header(skb
);
3705 hash
= skb_get_hash(skb
);
3709 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
3710 if (flow_table
&& sock_flow_table
) {
3711 struct rps_dev_flow
*rflow
;
3715 /* First check into global flow table if there is a match */
3716 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
3717 if ((ident
^ hash
) & ~rps_cpu_mask
)
3720 next_cpu
= ident
& rps_cpu_mask
;
3722 /* OK, now we know there is a match,
3723 * we can look at the local (per receive queue) flow table
3725 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
3729 * If the desired CPU (where last recvmsg was done) is
3730 * different from current CPU (one in the rx-queue flow
3731 * table entry), switch if one of the following holds:
3732 * - Current CPU is unset (>= nr_cpu_ids).
3733 * - Current CPU is offline.
3734 * - The current CPU's queue tail has advanced beyond the
3735 * last packet that was enqueued using this table entry.
3736 * This guarantees that all previous packets for the flow
3737 * have been dequeued, thus preserving in order delivery.
3739 if (unlikely(tcpu
!= next_cpu
) &&
3740 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
3741 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
3742 rflow
->last_qtail
)) >= 0)) {
3744 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
3747 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
3757 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
3758 if (cpu_online(tcpu
)) {
3768 #ifdef CONFIG_RFS_ACCEL
3771 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3772 * @dev: Device on which the filter was set
3773 * @rxq_index: RX queue index
3774 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3775 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3777 * Drivers that implement ndo_rx_flow_steer() should periodically call
3778 * this function for each installed filter and remove the filters for
3779 * which it returns %true.
3781 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
3782 u32 flow_id
, u16 filter_id
)
3784 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
3785 struct rps_dev_flow_table
*flow_table
;
3786 struct rps_dev_flow
*rflow
;
3791 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3792 if (flow_table
&& flow_id
<= flow_table
->mask
) {
3793 rflow
= &flow_table
->flows
[flow_id
];
3794 cpu
= READ_ONCE(rflow
->cpu
);
3795 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
3796 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
3797 rflow
->last_qtail
) <
3798 (int)(10 * flow_table
->mask
)))
3804 EXPORT_SYMBOL(rps_may_expire_flow
);
3806 #endif /* CONFIG_RFS_ACCEL */
3808 /* Called from hardirq (IPI) context */
3809 static void rps_trigger_softirq(void *data
)
3811 struct softnet_data
*sd
= data
;
3813 ____napi_schedule(sd
, &sd
->backlog
);
3817 #endif /* CONFIG_RPS */
3820 * Check if this softnet_data structure is another cpu one
3821 * If yes, queue it to our IPI list and return 1
3824 static int rps_ipi_queued(struct softnet_data
*sd
)
3827 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
3830 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
3831 mysd
->rps_ipi_list
= sd
;
3833 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3836 #endif /* CONFIG_RPS */
3840 #ifdef CONFIG_NET_FLOW_LIMIT
3841 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
3844 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
3846 #ifdef CONFIG_NET_FLOW_LIMIT
3847 struct sd_flow_limit
*fl
;
3848 struct softnet_data
*sd
;
3849 unsigned int old_flow
, new_flow
;
3851 if (qlen
< (netdev_max_backlog
>> 1))
3854 sd
= this_cpu_ptr(&softnet_data
);
3857 fl
= rcu_dereference(sd
->flow_limit
);
3859 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
3860 old_flow
= fl
->history
[fl
->history_head
];
3861 fl
->history
[fl
->history_head
] = new_flow
;
3864 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
3866 if (likely(fl
->buckets
[old_flow
]))
3867 fl
->buckets
[old_flow
]--;
3869 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
3881 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3882 * queue (may be a remote CPU queue).
3884 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
3885 unsigned int *qtail
)
3887 struct softnet_data
*sd
;
3888 unsigned long flags
;
3891 sd
= &per_cpu(softnet_data
, cpu
);
3893 local_irq_save(flags
);
3896 if (!netif_running(skb
->dev
))
3898 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
3899 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
3902 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
3903 input_queue_tail_incr_save(sd
, qtail
);
3905 local_irq_restore(flags
);
3906 return NET_RX_SUCCESS
;
3909 /* Schedule NAPI for backlog device
3910 * We can use non atomic operation since we own the queue lock
3912 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
3913 if (!rps_ipi_queued(sd
))
3914 ____napi_schedule(sd
, &sd
->backlog
);
3923 local_irq_restore(flags
);
3925 atomic_long_inc(&skb
->dev
->rx_dropped
);
3930 static struct netdev_rx_queue
*netif_get_rxqueue(struct sk_buff
*skb
)
3932 struct net_device
*dev
= skb
->dev
;
3933 struct netdev_rx_queue
*rxqueue
;
3937 if (skb_rx_queue_recorded(skb
)) {
3938 u16 index
= skb_get_rx_queue(skb
);
3940 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3941 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3942 "%s received packet on queue %u, but number "
3943 "of RX queues is %u\n",
3944 dev
->name
, index
, dev
->real_num_rx_queues
);
3946 return rxqueue
; /* Return first rxqueue */
3953 static u32
netif_receive_generic_xdp(struct sk_buff
*skb
,
3954 struct bpf_prog
*xdp_prog
)
3956 struct netdev_rx_queue
*rxqueue
;
3957 u32 metalen
, act
= XDP_DROP
;
3958 struct xdp_buff xdp
;
3963 /* Reinjected packets coming from act_mirred or similar should
3964 * not get XDP generic processing.
3966 if (skb_cloned(skb
))
3969 /* XDP packets must be linear and must have sufficient headroom
3970 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
3971 * native XDP provides, thus we need to do it here as well.
3973 if (skb_is_nonlinear(skb
) ||
3974 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
3975 int hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
3976 int troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
3978 /* In case we have to go down the path and also linearize,
3979 * then lets do the pskb_expand_head() work just once here.
3981 if (pskb_expand_head(skb
,
3982 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
3983 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
))
3985 if (skb_linearize(skb
))
3989 /* The XDP program wants to see the packet starting at the MAC
3992 mac_len
= skb
->data
- skb_mac_header(skb
);
3993 hlen
= skb_headlen(skb
) + mac_len
;
3994 xdp
.data
= skb
->data
- mac_len
;
3995 xdp
.data_meta
= xdp
.data
;
3996 xdp
.data_end
= xdp
.data
+ hlen
;
3997 xdp
.data_hard_start
= skb
->data
- skb_headroom(skb
);
3998 orig_data
= xdp
.data
;
4000 rxqueue
= netif_get_rxqueue(skb
);
4001 xdp
.rxq
= &rxqueue
->xdp_rxq
;
4003 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
4005 off
= xdp
.data
- orig_data
;
4007 __skb_pull(skb
, off
);
4009 __skb_push(skb
, -off
);
4010 skb
->mac_header
+= off
;
4015 __skb_push(skb
, mac_len
);
4018 metalen
= xdp
.data
- xdp
.data_meta
;
4020 skb_metadata_set(skb
, metalen
);
4023 bpf_warn_invalid_xdp_action(act
);
4026 trace_xdp_exception(skb
->dev
, xdp_prog
, act
);
4037 /* When doing generic XDP we have to bypass the qdisc layer and the
4038 * network taps in order to match in-driver-XDP behavior.
4040 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
4042 struct net_device
*dev
= skb
->dev
;
4043 struct netdev_queue
*txq
;
4044 bool free_skb
= true;
4047 txq
= netdev_pick_tx(dev
, skb
, NULL
);
4048 cpu
= smp_processor_id();
4049 HARD_TX_LOCK(dev
, txq
, cpu
);
4050 if (!netif_xmit_stopped(txq
)) {
4051 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
4052 if (dev_xmit_complete(rc
))
4055 HARD_TX_UNLOCK(dev
, txq
);
4057 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
4061 EXPORT_SYMBOL_GPL(generic_xdp_tx
);
4063 static struct static_key generic_xdp_needed __read_mostly
;
4065 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
*skb
)
4068 u32 act
= netif_receive_generic_xdp(skb
, xdp_prog
);
4071 if (act
!= XDP_PASS
) {
4074 err
= xdp_do_generic_redirect(skb
->dev
, skb
,
4078 /* fallthru to submit skb */
4080 generic_xdp_tx(skb
, xdp_prog
);
4091 EXPORT_SYMBOL_GPL(do_xdp_generic
);
4093 static int netif_rx_internal(struct sk_buff
*skb
)
4097 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4099 trace_netif_rx(skb
);
4101 if (static_key_false(&generic_xdp_needed
)) {
4106 ret
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4110 /* Consider XDP consuming the packet a success from
4111 * the netdev point of view we do not want to count
4114 if (ret
!= XDP_PASS
)
4115 return NET_RX_SUCCESS
;
4119 if (static_key_false(&rps_needed
)) {
4120 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4126 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4128 cpu
= smp_processor_id();
4130 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4139 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
4146 * netif_rx - post buffer to the network code
4147 * @skb: buffer to post
4149 * This function receives a packet from a device driver and queues it for
4150 * the upper (protocol) levels to process. It always succeeds. The buffer
4151 * may be dropped during processing for congestion control or by the
4155 * NET_RX_SUCCESS (no congestion)
4156 * NET_RX_DROP (packet was dropped)
4160 int netif_rx(struct sk_buff
*skb
)
4162 trace_netif_rx_entry(skb
);
4164 return netif_rx_internal(skb
);
4166 EXPORT_SYMBOL(netif_rx
);
4168 int netif_rx_ni(struct sk_buff
*skb
)
4172 trace_netif_rx_ni_entry(skb
);
4175 err
= netif_rx_internal(skb
);
4176 if (local_softirq_pending())
4182 EXPORT_SYMBOL(netif_rx_ni
);
4184 static __latent_entropy
void net_tx_action(struct softirq_action
*h
)
4186 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4188 if (sd
->completion_queue
) {
4189 struct sk_buff
*clist
;
4191 local_irq_disable();
4192 clist
= sd
->completion_queue
;
4193 sd
->completion_queue
= NULL
;
4197 struct sk_buff
*skb
= clist
;
4199 clist
= clist
->next
;
4201 WARN_ON(refcount_read(&skb
->users
));
4202 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
4203 trace_consume_skb(skb
);
4205 trace_kfree_skb(skb
, net_tx_action
);
4207 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
4210 __kfree_skb_defer(skb
);
4213 __kfree_skb_flush();
4216 if (sd
->output_queue
) {
4219 local_irq_disable();
4220 head
= sd
->output_queue
;
4221 sd
->output_queue
= NULL
;
4222 sd
->output_queue_tailp
= &sd
->output_queue
;
4226 struct Qdisc
*q
= head
;
4227 spinlock_t
*root_lock
= NULL
;
4229 head
= head
->next_sched
;
4231 if (!(q
->flags
& TCQ_F_NOLOCK
)) {
4232 root_lock
= qdisc_lock(q
);
4233 spin_lock(root_lock
);
4235 /* We need to make sure head->next_sched is read
4236 * before clearing __QDISC_STATE_SCHED
4238 smp_mb__before_atomic();
4239 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
4242 spin_unlock(root_lock
);
4246 xfrm_dev_backlog(sd
);
4249 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4250 /* This hook is defined here for ATM LANE */
4251 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
4252 unsigned char *addr
) __read_mostly
;
4253 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
4256 static inline struct sk_buff
*
4257 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4258 struct net_device
*orig_dev
)
4260 #ifdef CONFIG_NET_CLS_ACT
4261 struct mini_Qdisc
*miniq
= rcu_dereference_bh(skb
->dev
->miniq_ingress
);
4262 struct tcf_result cl_res
;
4264 /* If there's at least one ingress present somewhere (so
4265 * we get here via enabled static key), remaining devices
4266 * that are not configured with an ingress qdisc will bail
4273 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4277 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4278 skb
->tc_at_ingress
= 1;
4279 mini_qdisc_bstats_cpu_update(miniq
, skb
);
4281 switch (tcf_classify(skb
, miniq
->filter_list
, &cl_res
, false)) {
4283 case TC_ACT_RECLASSIFY
:
4284 skb
->tc_index
= TC_H_MIN(cl_res
.classid
);
4287 mini_qdisc_qstats_cpu_drop(miniq
);
4295 case TC_ACT_REDIRECT
:
4296 /* skb_mac_header check was done by cls/act_bpf, so
4297 * we can safely push the L2 header back before
4298 * redirecting to another netdev
4300 __skb_push(skb
, skb
->mac_len
);
4301 skb_do_redirect(skb
);
4306 #endif /* CONFIG_NET_CLS_ACT */
4311 * netdev_is_rx_handler_busy - check if receive handler is registered
4312 * @dev: device to check
4314 * Check if a receive handler is already registered for a given device.
4315 * Return true if there one.
4317 * The caller must hold the rtnl_mutex.
4319 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
4322 return dev
&& rtnl_dereference(dev
->rx_handler
);
4324 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
4327 * netdev_rx_handler_register - register receive handler
4328 * @dev: device to register a handler for
4329 * @rx_handler: receive handler to register
4330 * @rx_handler_data: data pointer that is used by rx handler
4332 * Register a receive handler for a device. This handler will then be
4333 * called from __netif_receive_skb. A negative errno code is returned
4336 * The caller must hold the rtnl_mutex.
4338 * For a general description of rx_handler, see enum rx_handler_result.
4340 int netdev_rx_handler_register(struct net_device
*dev
,
4341 rx_handler_func_t
*rx_handler
,
4342 void *rx_handler_data
)
4344 if (netdev_is_rx_handler_busy(dev
))
4347 /* Note: rx_handler_data must be set before rx_handler */
4348 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
4349 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
4353 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
4356 * netdev_rx_handler_unregister - unregister receive handler
4357 * @dev: device to unregister a handler from
4359 * Unregister a receive handler from a device.
4361 * The caller must hold the rtnl_mutex.
4363 void netdev_rx_handler_unregister(struct net_device
*dev
)
4367 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
4368 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4369 * section has a guarantee to see a non NULL rx_handler_data
4373 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
4375 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
4378 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4379 * the special handling of PFMEMALLOC skbs.
4381 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
4383 switch (skb
->protocol
) {
4384 case htons(ETH_P_ARP
):
4385 case htons(ETH_P_IP
):
4386 case htons(ETH_P_IPV6
):
4387 case htons(ETH_P_8021Q
):
4388 case htons(ETH_P_8021AD
):
4395 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
4396 int *ret
, struct net_device
*orig_dev
)
4398 #ifdef CONFIG_NETFILTER_INGRESS
4399 if (nf_hook_ingress_active(skb
)) {
4403 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4408 ingress_retval
= nf_hook_ingress(skb
);
4410 return ingress_retval
;
4412 #endif /* CONFIG_NETFILTER_INGRESS */
4416 static int __netif_receive_skb_core(struct sk_buff
*skb
, bool pfmemalloc
)
4418 struct packet_type
*ptype
, *pt_prev
;
4419 rx_handler_func_t
*rx_handler
;
4420 struct net_device
*orig_dev
;
4421 bool deliver_exact
= false;
4422 int ret
= NET_RX_DROP
;
4425 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
4427 trace_netif_receive_skb(skb
);
4429 orig_dev
= skb
->dev
;
4431 skb_reset_network_header(skb
);
4432 if (!skb_transport_header_was_set(skb
))
4433 skb_reset_transport_header(skb
);
4434 skb_reset_mac_len(skb
);
4439 skb
->skb_iif
= skb
->dev
->ifindex
;
4441 __this_cpu_inc(softnet_data
.processed
);
4443 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
4444 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
4445 skb
= skb_vlan_untag(skb
);
4450 if (skb_skip_tc_classify(skb
))
4456 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
4458 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4462 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
4464 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4469 #ifdef CONFIG_NET_INGRESS
4470 if (static_key_false(&ingress_needed
)) {
4471 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
);
4475 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
4481 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
4484 if (skb_vlan_tag_present(skb
)) {
4486 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4489 if (vlan_do_receive(&skb
))
4491 else if (unlikely(!skb
))
4495 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
4498 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
4501 switch (rx_handler(&skb
)) {
4502 case RX_HANDLER_CONSUMED
:
4503 ret
= NET_RX_SUCCESS
;
4505 case RX_HANDLER_ANOTHER
:
4507 case RX_HANDLER_EXACT
:
4508 deliver_exact
= true;
4509 case RX_HANDLER_PASS
:
4516 if (unlikely(skb_vlan_tag_present(skb
))) {
4517 if (skb_vlan_tag_get_id(skb
))
4518 skb
->pkt_type
= PACKET_OTHERHOST
;
4519 /* Note: we might in the future use prio bits
4520 * and set skb->priority like in vlan_do_receive()
4521 * For the time being, just ignore Priority Code Point
4526 type
= skb
->protocol
;
4528 /* deliver only exact match when indicated */
4529 if (likely(!deliver_exact
)) {
4530 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4531 &ptype_base
[ntohs(type
) &
4535 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4536 &orig_dev
->ptype_specific
);
4538 if (unlikely(skb
->dev
!= orig_dev
)) {
4539 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
4540 &skb
->dev
->ptype_specific
);
4544 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
4547 ret
= pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
4551 atomic_long_inc(&skb
->dev
->rx_dropped
);
4553 atomic_long_inc(&skb
->dev
->rx_nohandler
);
4555 /* Jamal, now you will not able to escape explaining
4556 * me how you were going to use this. :-)
4566 * netif_receive_skb_core - special purpose version of netif_receive_skb
4567 * @skb: buffer to process
4569 * More direct receive version of netif_receive_skb(). It should
4570 * only be used by callers that have a need to skip RPS and Generic XDP.
4571 * Caller must also take care of handling if (page_is_)pfmemalloc.
4573 * This function may only be called from softirq context and interrupts
4574 * should be enabled.
4576 * Return values (usually ignored):
4577 * NET_RX_SUCCESS: no congestion
4578 * NET_RX_DROP: packet was dropped
4580 int netif_receive_skb_core(struct sk_buff
*skb
)
4585 ret
= __netif_receive_skb_core(skb
, false);
4590 EXPORT_SYMBOL(netif_receive_skb_core
);
4592 static int __netif_receive_skb(struct sk_buff
*skb
)
4596 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
4597 unsigned int noreclaim_flag
;
4600 * PFMEMALLOC skbs are special, they should
4601 * - be delivered to SOCK_MEMALLOC sockets only
4602 * - stay away from userspace
4603 * - have bounded memory usage
4605 * Use PF_MEMALLOC as this saves us from propagating the allocation
4606 * context down to all allocation sites.
4608 noreclaim_flag
= memalloc_noreclaim_save();
4609 ret
= __netif_receive_skb_core(skb
, true);
4610 memalloc_noreclaim_restore(noreclaim_flag
);
4612 ret
= __netif_receive_skb_core(skb
, false);
4617 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
4619 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
4620 struct bpf_prog
*new = xdp
->prog
;
4623 switch (xdp
->command
) {
4624 case XDP_SETUP_PROG
:
4625 rcu_assign_pointer(dev
->xdp_prog
, new);
4630 static_key_slow_dec(&generic_xdp_needed
);
4631 } else if (new && !old
) {
4632 static_key_slow_inc(&generic_xdp_needed
);
4633 dev_disable_lro(dev
);
4634 dev_disable_gro_hw(dev
);
4638 case XDP_QUERY_PROG
:
4639 xdp
->prog_attached
= !!old
;
4640 xdp
->prog_id
= old
? old
->aux
->id
: 0;
4651 static int netif_receive_skb_internal(struct sk_buff
*skb
)
4655 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
4657 if (skb_defer_rx_timestamp(skb
))
4658 return NET_RX_SUCCESS
;
4660 if (static_key_false(&generic_xdp_needed
)) {
4665 ret
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
4669 if (ret
!= XDP_PASS
)
4675 if (static_key_false(&rps_needed
)) {
4676 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
4677 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
4680 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
4686 ret
= __netif_receive_skb(skb
);
4692 * netif_receive_skb - process receive buffer from network
4693 * @skb: buffer to process
4695 * netif_receive_skb() is the main receive data processing function.
4696 * It always succeeds. The buffer may be dropped during processing
4697 * for congestion control or by the protocol layers.
4699 * This function may only be called from softirq context and interrupts
4700 * should be enabled.
4702 * Return values (usually ignored):
4703 * NET_RX_SUCCESS: no congestion
4704 * NET_RX_DROP: packet was dropped
4706 int netif_receive_skb(struct sk_buff
*skb
)
4708 trace_netif_receive_skb_entry(skb
);
4710 return netif_receive_skb_internal(skb
);
4712 EXPORT_SYMBOL(netif_receive_skb
);
4714 DEFINE_PER_CPU(struct work_struct
, flush_works
);
4716 /* Network device is going away, flush any packets still pending */
4717 static void flush_backlog(struct work_struct
*work
)
4719 struct sk_buff
*skb
, *tmp
;
4720 struct softnet_data
*sd
;
4723 sd
= this_cpu_ptr(&softnet_data
);
4725 local_irq_disable();
4727 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
4728 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
4729 __skb_unlink(skb
, &sd
->input_pkt_queue
);
4731 input_queue_head_incr(sd
);
4737 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
4738 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
4739 __skb_unlink(skb
, &sd
->process_queue
);
4741 input_queue_head_incr(sd
);
4747 static void flush_all_backlogs(void)
4753 for_each_online_cpu(cpu
)
4754 queue_work_on(cpu
, system_highpri_wq
,
4755 per_cpu_ptr(&flush_works
, cpu
));
4757 for_each_online_cpu(cpu
)
4758 flush_work(per_cpu_ptr(&flush_works
, cpu
));
4763 static int napi_gro_complete(struct sk_buff
*skb
)
4765 struct packet_offload
*ptype
;
4766 __be16 type
= skb
->protocol
;
4767 struct list_head
*head
= &offload_base
;
4770 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
4772 if (NAPI_GRO_CB(skb
)->count
== 1) {
4773 skb_shinfo(skb
)->gso_size
= 0;
4778 list_for_each_entry_rcu(ptype
, head
, list
) {
4779 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
4782 err
= ptype
->callbacks
.gro_complete(skb
, 0);
4788 WARN_ON(&ptype
->list
== head
);
4790 return NET_RX_SUCCESS
;
4794 return netif_receive_skb_internal(skb
);
4797 /* napi->gro_list contains packets ordered by age.
4798 * youngest packets at the head of it.
4799 * Complete skbs in reverse order to reduce latencies.
4801 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
4803 struct sk_buff
*skb
, *prev
= NULL
;
4805 /* scan list and build reverse chain */
4806 for (skb
= napi
->gro_list
; skb
!= NULL
; skb
= skb
->next
) {
4811 for (skb
= prev
; skb
; skb
= prev
) {
4814 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
4818 napi_gro_complete(skb
);
4822 napi
->gro_list
= NULL
;
4824 EXPORT_SYMBOL(napi_gro_flush
);
4826 static void gro_list_prepare(struct napi_struct
*napi
, struct sk_buff
*skb
)
4829 unsigned int maclen
= skb
->dev
->hard_header_len
;
4830 u32 hash
= skb_get_hash_raw(skb
);
4832 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
4833 unsigned long diffs
;
4835 NAPI_GRO_CB(p
)->flush
= 0;
4837 if (hash
!= skb_get_hash_raw(p
)) {
4838 NAPI_GRO_CB(p
)->same_flow
= 0;
4842 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
4843 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
4844 diffs
|= skb_metadata_dst_cmp(p
, skb
);
4845 diffs
|= skb_metadata_differs(p
, skb
);
4846 if (maclen
== ETH_HLEN
)
4847 diffs
|= compare_ether_header(skb_mac_header(p
),
4848 skb_mac_header(skb
));
4850 diffs
= memcmp(skb_mac_header(p
),
4851 skb_mac_header(skb
),
4853 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
4857 static void skb_gro_reset_offset(struct sk_buff
*skb
)
4859 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4860 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
4862 NAPI_GRO_CB(skb
)->data_offset
= 0;
4863 NAPI_GRO_CB(skb
)->frag0
= NULL
;
4864 NAPI_GRO_CB(skb
)->frag0_len
= 0;
4866 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
4868 !PageHighMem(skb_frag_page(frag0
))) {
4869 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
4870 NAPI_GRO_CB(skb
)->frag0_len
= min_t(unsigned int,
4871 skb_frag_size(frag0
),
4872 skb
->end
- skb
->tail
);
4876 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
4878 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
4880 BUG_ON(skb
->end
- skb
->tail
< grow
);
4882 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
4884 skb
->data_len
-= grow
;
4887 pinfo
->frags
[0].page_offset
+= grow
;
4888 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
4890 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
4891 skb_frag_unref(skb
, 0);
4892 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
4893 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
4897 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4899 struct sk_buff
**pp
= NULL
;
4900 struct packet_offload
*ptype
;
4901 __be16 type
= skb
->protocol
;
4902 struct list_head
*head
= &offload_base
;
4904 enum gro_result ret
;
4907 if (netif_elide_gro(skb
->dev
))
4910 gro_list_prepare(napi
, skb
);
4913 list_for_each_entry_rcu(ptype
, head
, list
) {
4914 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4917 skb_set_network_header(skb
, skb_gro_offset(skb
));
4918 skb_reset_mac_len(skb
);
4919 NAPI_GRO_CB(skb
)->same_flow
= 0;
4920 NAPI_GRO_CB(skb
)->flush
= skb_is_gso(skb
) || skb_has_frag_list(skb
);
4921 NAPI_GRO_CB(skb
)->free
= 0;
4922 NAPI_GRO_CB(skb
)->encap_mark
= 0;
4923 NAPI_GRO_CB(skb
)->recursion_counter
= 0;
4924 NAPI_GRO_CB(skb
)->is_fou
= 0;
4925 NAPI_GRO_CB(skb
)->is_atomic
= 1;
4926 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
4928 /* Setup for GRO checksum validation */
4929 switch (skb
->ip_summed
) {
4930 case CHECKSUM_COMPLETE
:
4931 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
4932 NAPI_GRO_CB(skb
)->csum_valid
= 1;
4933 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4935 case CHECKSUM_UNNECESSARY
:
4936 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
4937 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4940 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4941 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4944 pp
= ptype
->callbacks
.gro_receive(&napi
->gro_list
, skb
);
4949 if (&ptype
->list
== head
)
4952 if (IS_ERR(pp
) && PTR_ERR(pp
) == -EINPROGRESS
) {
4957 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
4958 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
4961 struct sk_buff
*nskb
= *pp
;
4965 napi_gro_complete(nskb
);
4972 if (NAPI_GRO_CB(skb
)->flush
)
4975 if (unlikely(napi
->gro_count
>= MAX_GRO_SKBS
)) {
4976 struct sk_buff
*nskb
= napi
->gro_list
;
4978 /* locate the end of the list to select the 'oldest' flow */
4979 while (nskb
->next
) {
4985 napi_gro_complete(nskb
);
4989 NAPI_GRO_CB(skb
)->count
= 1;
4990 NAPI_GRO_CB(skb
)->age
= jiffies
;
4991 NAPI_GRO_CB(skb
)->last
= skb
;
4992 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
4993 skb
->next
= napi
->gro_list
;
4994 napi
->gro_list
= skb
;
4998 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
5000 gro_pull_from_frag0(skb
, grow
);
5009 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
5011 struct list_head
*offload_head
= &offload_base
;
5012 struct packet_offload
*ptype
;
5014 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5015 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
5021 EXPORT_SYMBOL(gro_find_receive_by_type
);
5023 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
5025 struct list_head
*offload_head
= &offload_base
;
5026 struct packet_offload
*ptype
;
5028 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
5029 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
5035 EXPORT_SYMBOL(gro_find_complete_by_type
);
5037 static void napi_skb_free_stolen_head(struct sk_buff
*skb
)
5041 kmem_cache_free(skbuff_head_cache
, skb
);
5044 static gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
5048 if (netif_receive_skb_internal(skb
))
5056 case GRO_MERGED_FREE
:
5057 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5058 napi_skb_free_stolen_head(skb
);
5072 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
5074 skb_mark_napi_id(skb
, napi
);
5075 trace_napi_gro_receive_entry(skb
);
5077 skb_gro_reset_offset(skb
);
5079 return napi_skb_finish(dev_gro_receive(napi
, skb
), skb
);
5081 EXPORT_SYMBOL(napi_gro_receive
);
5083 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
5085 if (unlikely(skb
->pfmemalloc
)) {
5089 __skb_pull(skb
, skb_headlen(skb
));
5090 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5091 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
5093 skb
->dev
= napi
->dev
;
5095 skb
->encapsulation
= 0;
5096 skb_shinfo(skb
)->gso_type
= 0;
5097 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
5103 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
5105 struct sk_buff
*skb
= napi
->skb
;
5108 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
5111 skb_mark_napi_id(skb
, napi
);
5116 EXPORT_SYMBOL(napi_get_frags
);
5118 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
5119 struct sk_buff
*skb
,
5125 __skb_push(skb
, ETH_HLEN
);
5126 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5127 if (ret
== GRO_NORMAL
&& netif_receive_skb_internal(skb
))
5132 napi_reuse_skb(napi
, skb
);
5135 case GRO_MERGED_FREE
:
5136 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
5137 napi_skb_free_stolen_head(skb
);
5139 napi_reuse_skb(napi
, skb
);
5150 /* Upper GRO stack assumes network header starts at gro_offset=0
5151 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5152 * We copy ethernet header into skb->data to have a common layout.
5154 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
5156 struct sk_buff
*skb
= napi
->skb
;
5157 const struct ethhdr
*eth
;
5158 unsigned int hlen
= sizeof(*eth
);
5162 skb_reset_mac_header(skb
);
5163 skb_gro_reset_offset(skb
);
5165 eth
= skb_gro_header_fast(skb
, 0);
5166 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
5167 eth
= skb_gro_header_slow(skb
, hlen
, 0);
5168 if (unlikely(!eth
)) {
5169 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5170 __func__
, napi
->dev
->name
);
5171 napi_reuse_skb(napi
, skb
);
5175 gro_pull_from_frag0(skb
, hlen
);
5176 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
5177 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
5179 __skb_pull(skb
, hlen
);
5182 * This works because the only protocols we care about don't require
5184 * We'll fix it up properly in napi_frags_finish()
5186 skb
->protocol
= eth
->h_proto
;
5191 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
5193 struct sk_buff
*skb
= napi_frags_skb(napi
);
5198 trace_napi_gro_frags_entry(skb
);
5200 return napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
5202 EXPORT_SYMBOL(napi_gro_frags
);
5204 /* Compute the checksum from gro_offset and return the folded value
5205 * after adding in any pseudo checksum.
5207 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
5212 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
5214 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5215 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
5217 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
5218 !skb
->csum_complete_sw
)
5219 netdev_rx_csum_fault(skb
->dev
);
5222 NAPI_GRO_CB(skb
)->csum
= wsum
;
5223 NAPI_GRO_CB(skb
)->csum_valid
= 1;
5227 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
5229 static void net_rps_send_ipi(struct softnet_data
*remsd
)
5233 struct softnet_data
*next
= remsd
->rps_ipi_next
;
5235 if (cpu_online(remsd
->cpu
))
5236 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
5243 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5244 * Note: called with local irq disabled, but exits with local irq enabled.
5246 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
5249 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
5252 sd
->rps_ipi_list
= NULL
;
5256 /* Send pending IPI's to kick RPS processing on remote cpus. */
5257 net_rps_send_ipi(remsd
);
5263 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
5266 return sd
->rps_ipi_list
!= NULL
;
5272 static int process_backlog(struct napi_struct
*napi
, int quota
)
5274 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
5278 /* Check if we have pending ipi, its better to send them now,
5279 * not waiting net_rx_action() end.
5281 if (sd_has_rps_ipi_waiting(sd
)) {
5282 local_irq_disable();
5283 net_rps_action_and_irq_enable(sd
);
5286 napi
->weight
= dev_rx_weight
;
5288 struct sk_buff
*skb
;
5290 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
5292 __netif_receive_skb(skb
);
5294 input_queue_head_incr(sd
);
5295 if (++work
>= quota
)
5300 local_irq_disable();
5302 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
5304 * Inline a custom version of __napi_complete().
5305 * only current cpu owns and manipulates this napi,
5306 * and NAPI_STATE_SCHED is the only possible flag set
5308 * We can use a plain write instead of clear_bit(),
5309 * and we dont need an smp_mb() memory barrier.
5314 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
5315 &sd
->process_queue
);
5325 * __napi_schedule - schedule for receive
5326 * @n: entry to schedule
5328 * The entry's receive function will be scheduled to run.
5329 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5331 void __napi_schedule(struct napi_struct
*n
)
5333 unsigned long flags
;
5335 local_irq_save(flags
);
5336 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5337 local_irq_restore(flags
);
5339 EXPORT_SYMBOL(__napi_schedule
);
5342 * napi_schedule_prep - check if napi can be scheduled
5345 * Test if NAPI routine is already running, and if not mark
5346 * it as running. This is used as a condition variable
5347 * insure only one NAPI poll instance runs. We also make
5348 * sure there is no pending NAPI disable.
5350 bool napi_schedule_prep(struct napi_struct
*n
)
5352 unsigned long val
, new;
5355 val
= READ_ONCE(n
->state
);
5356 if (unlikely(val
& NAPIF_STATE_DISABLE
))
5358 new = val
| NAPIF_STATE_SCHED
;
5360 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5361 * This was suggested by Alexander Duyck, as compiler
5362 * emits better code than :
5363 * if (val & NAPIF_STATE_SCHED)
5364 * new |= NAPIF_STATE_MISSED;
5366 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
5368 } while (cmpxchg(&n
->state
, val
, new) != val
);
5370 return !(val
& NAPIF_STATE_SCHED
);
5372 EXPORT_SYMBOL(napi_schedule_prep
);
5375 * __napi_schedule_irqoff - schedule for receive
5376 * @n: entry to schedule
5378 * Variant of __napi_schedule() assuming hard irqs are masked
5380 void __napi_schedule_irqoff(struct napi_struct
*n
)
5382 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
5384 EXPORT_SYMBOL(__napi_schedule_irqoff
);
5386 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
5388 unsigned long flags
, val
, new;
5391 * 1) Don't let napi dequeue from the cpu poll list
5392 * just in case its running on a different cpu.
5393 * 2) If we are busy polling, do nothing here, we have
5394 * the guarantee we will be called later.
5396 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
5397 NAPIF_STATE_IN_BUSY_POLL
)))
5401 unsigned long timeout
= 0;
5404 timeout
= n
->dev
->gro_flush_timeout
;
5407 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
5408 HRTIMER_MODE_REL_PINNED
);
5410 napi_gro_flush(n
, false);
5412 if (unlikely(!list_empty(&n
->poll_list
))) {
5413 /* If n->poll_list is not empty, we need to mask irqs */
5414 local_irq_save(flags
);
5415 list_del_init(&n
->poll_list
);
5416 local_irq_restore(flags
);
5420 val
= READ_ONCE(n
->state
);
5422 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
5424 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
);
5426 /* If STATE_MISSED was set, leave STATE_SCHED set,
5427 * because we will call napi->poll() one more time.
5428 * This C code was suggested by Alexander Duyck to help gcc.
5430 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
5432 } while (cmpxchg(&n
->state
, val
, new) != val
);
5434 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
5441 EXPORT_SYMBOL(napi_complete_done
);
5443 /* must be called under rcu_read_lock(), as we dont take a reference */
5444 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
5446 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
5447 struct napi_struct
*napi
;
5449 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
5450 if (napi
->napi_id
== napi_id
)
5456 #if defined(CONFIG_NET_RX_BUSY_POLL)
5458 #define BUSY_POLL_BUDGET 8
5460 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
)
5464 /* Busy polling means there is a high chance device driver hard irq
5465 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5466 * set in napi_schedule_prep().
5467 * Since we are about to call napi->poll() once more, we can safely
5468 * clear NAPI_STATE_MISSED.
5470 * Note: x86 could use a single "lock and ..." instruction
5471 * to perform these two clear_bit()
5473 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
5474 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
5478 /* All we really want here is to re-enable device interrupts.
5479 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5481 rc
= napi
->poll(napi
, BUSY_POLL_BUDGET
);
5482 trace_napi_poll(napi
, rc
, BUSY_POLL_BUDGET
);
5483 netpoll_poll_unlock(have_poll_lock
);
5484 if (rc
== BUSY_POLL_BUDGET
)
5485 __napi_schedule(napi
);
5489 void napi_busy_loop(unsigned int napi_id
,
5490 bool (*loop_end
)(void *, unsigned long),
5493 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
5494 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
5495 void *have_poll_lock
= NULL
;
5496 struct napi_struct
*napi
;
5503 napi
= napi_by_id(napi_id
);
5513 unsigned long val
= READ_ONCE(napi
->state
);
5515 /* If multiple threads are competing for this napi,
5516 * we avoid dirtying napi->state as much as we can.
5518 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
5519 NAPIF_STATE_IN_BUSY_POLL
))
5521 if (cmpxchg(&napi
->state
, val
,
5522 val
| NAPIF_STATE_IN_BUSY_POLL
|
5523 NAPIF_STATE_SCHED
) != val
)
5525 have_poll_lock
= netpoll_poll_lock(napi
);
5526 napi_poll
= napi
->poll
;
5528 work
= napi_poll(napi
, BUSY_POLL_BUDGET
);
5529 trace_napi_poll(napi
, work
, BUSY_POLL_BUDGET
);
5532 __NET_ADD_STATS(dev_net(napi
->dev
),
5533 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
5536 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
5539 if (unlikely(need_resched())) {
5541 busy_poll_stop(napi
, have_poll_lock
);
5545 if (loop_end(loop_end_arg
, start_time
))
5552 busy_poll_stop(napi
, have_poll_lock
);
5557 EXPORT_SYMBOL(napi_busy_loop
);
5559 #endif /* CONFIG_NET_RX_BUSY_POLL */
5561 static void napi_hash_add(struct napi_struct
*napi
)
5563 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
) ||
5564 test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
))
5567 spin_lock(&napi_hash_lock
);
5569 /* 0..NR_CPUS range is reserved for sender_cpu use */
5571 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
5572 napi_gen_id
= MIN_NAPI_ID
;
5573 } while (napi_by_id(napi_gen_id
));
5574 napi
->napi_id
= napi_gen_id
;
5576 hlist_add_head_rcu(&napi
->napi_hash_node
,
5577 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
5579 spin_unlock(&napi_hash_lock
);
5582 /* Warning : caller is responsible to make sure rcu grace period
5583 * is respected before freeing memory containing @napi
5585 bool napi_hash_del(struct napi_struct
*napi
)
5587 bool rcu_sync_needed
= false;
5589 spin_lock(&napi_hash_lock
);
5591 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
5592 rcu_sync_needed
= true;
5593 hlist_del_rcu(&napi
->napi_hash_node
);
5595 spin_unlock(&napi_hash_lock
);
5596 return rcu_sync_needed
;
5598 EXPORT_SYMBOL_GPL(napi_hash_del
);
5600 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
5602 struct napi_struct
*napi
;
5604 napi
= container_of(timer
, struct napi_struct
, timer
);
5606 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5607 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5609 if (napi
->gro_list
&& !napi_disable_pending(napi
) &&
5610 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
))
5611 __napi_schedule_irqoff(napi
);
5613 return HRTIMER_NORESTART
;
5616 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
5617 int (*poll
)(struct napi_struct
*, int), int weight
)
5619 INIT_LIST_HEAD(&napi
->poll_list
);
5620 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
5621 napi
->timer
.function
= napi_watchdog
;
5622 napi
->gro_count
= 0;
5623 napi
->gro_list
= NULL
;
5626 if (weight
> NAPI_POLL_WEIGHT
)
5627 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5629 napi
->weight
= weight
;
5630 list_add(&napi
->dev_list
, &dev
->napi_list
);
5632 #ifdef CONFIG_NETPOLL
5633 napi
->poll_owner
= -1;
5635 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
5636 napi_hash_add(napi
);
5638 EXPORT_SYMBOL(netif_napi_add
);
5640 void napi_disable(struct napi_struct
*n
)
5643 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
5645 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
5647 while (test_and_set_bit(NAPI_STATE_NPSVC
, &n
->state
))
5650 hrtimer_cancel(&n
->timer
);
5652 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
5654 EXPORT_SYMBOL(napi_disable
);
5656 /* Must be called in process context */
5657 void netif_napi_del(struct napi_struct
*napi
)
5660 if (napi_hash_del(napi
))
5662 list_del_init(&napi
->dev_list
);
5663 napi_free_frags(napi
);
5665 kfree_skb_list(napi
->gro_list
);
5666 napi
->gro_list
= NULL
;
5667 napi
->gro_count
= 0;
5669 EXPORT_SYMBOL(netif_napi_del
);
5671 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
5676 list_del_init(&n
->poll_list
);
5678 have
= netpoll_poll_lock(n
);
5682 /* This NAPI_STATE_SCHED test is for avoiding a race
5683 * with netpoll's poll_napi(). Only the entity which
5684 * obtains the lock and sees NAPI_STATE_SCHED set will
5685 * actually make the ->poll() call. Therefore we avoid
5686 * accidentally calling ->poll() when NAPI is not scheduled.
5689 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
5690 work
= n
->poll(n
, weight
);
5691 trace_napi_poll(n
, work
, weight
);
5694 WARN_ON_ONCE(work
> weight
);
5696 if (likely(work
< weight
))
5699 /* Drivers must not modify the NAPI state if they
5700 * consume the entire weight. In such cases this code
5701 * still "owns" the NAPI instance and therefore can
5702 * move the instance around on the list at-will.
5704 if (unlikely(napi_disable_pending(n
))) {
5710 /* flush too old packets
5711 * If HZ < 1000, flush all packets.
5713 napi_gro_flush(n
, HZ
>= 1000);
5716 /* Some drivers may have called napi_schedule
5717 * prior to exhausting their budget.
5719 if (unlikely(!list_empty(&n
->poll_list
))) {
5720 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5721 n
->dev
? n
->dev
->name
: "backlog");
5725 list_add_tail(&n
->poll_list
, repoll
);
5728 netpoll_poll_unlock(have
);
5733 static __latent_entropy
void net_rx_action(struct softirq_action
*h
)
5735 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
5736 unsigned long time_limit
= jiffies
+
5737 usecs_to_jiffies(netdev_budget_usecs
);
5738 int budget
= netdev_budget
;
5742 local_irq_disable();
5743 list_splice_init(&sd
->poll_list
, &list
);
5747 struct napi_struct
*n
;
5749 if (list_empty(&list
)) {
5750 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
5755 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
5756 budget
-= napi_poll(n
, &repoll
);
5758 /* If softirq window is exhausted then punt.
5759 * Allow this to run for 2 jiffies since which will allow
5760 * an average latency of 1.5/HZ.
5762 if (unlikely(budget
<= 0 ||
5763 time_after_eq(jiffies
, time_limit
))) {
5769 local_irq_disable();
5771 list_splice_tail_init(&sd
->poll_list
, &list
);
5772 list_splice_tail(&repoll
, &list
);
5773 list_splice(&list
, &sd
->poll_list
);
5774 if (!list_empty(&sd
->poll_list
))
5775 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
5777 net_rps_action_and_irq_enable(sd
);
5779 __kfree_skb_flush();
5782 struct netdev_adjacent
{
5783 struct net_device
*dev
;
5785 /* upper master flag, there can only be one master device per list */
5788 /* counter for the number of times this device was added to us */
5791 /* private field for the users */
5794 struct list_head list
;
5795 struct rcu_head rcu
;
5798 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
5799 struct list_head
*adj_list
)
5801 struct netdev_adjacent
*adj
;
5803 list_for_each_entry(adj
, adj_list
, list
) {
5804 if (adj
->dev
== adj_dev
)
5810 static int __netdev_has_upper_dev(struct net_device
*upper_dev
, void *data
)
5812 struct net_device
*dev
= data
;
5814 return upper_dev
== dev
;
5818 * netdev_has_upper_dev - Check if device is linked to an upper device
5820 * @upper_dev: upper device to check
5822 * Find out if a device is linked to specified upper device and return true
5823 * in case it is. Note that this checks only immediate upper device,
5824 * not through a complete stack of devices. The caller must hold the RTNL lock.
5826 bool netdev_has_upper_dev(struct net_device
*dev
,
5827 struct net_device
*upper_dev
)
5831 return netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
5834 EXPORT_SYMBOL(netdev_has_upper_dev
);
5837 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5839 * @upper_dev: upper device to check
5841 * Find out if a device is linked to specified upper device and return true
5842 * in case it is. Note that this checks the entire upper device chain.
5843 * The caller must hold rcu lock.
5846 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
5847 struct net_device
*upper_dev
)
5849 return !!netdev_walk_all_upper_dev_rcu(dev
, __netdev_has_upper_dev
,
5852 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
5855 * netdev_has_any_upper_dev - Check if device is linked to some device
5858 * Find out if a device is linked to an upper device and return true in case
5859 * it is. The caller must hold the RTNL lock.
5861 bool netdev_has_any_upper_dev(struct net_device
*dev
)
5865 return !list_empty(&dev
->adj_list
.upper
);
5867 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
5870 * netdev_master_upper_dev_get - Get master upper device
5873 * Find a master upper device and return pointer to it or NULL in case
5874 * it's not there. The caller must hold the RTNL lock.
5876 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
5878 struct netdev_adjacent
*upper
;
5882 if (list_empty(&dev
->adj_list
.upper
))
5885 upper
= list_first_entry(&dev
->adj_list
.upper
,
5886 struct netdev_adjacent
, list
);
5887 if (likely(upper
->master
))
5891 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
5894 * netdev_has_any_lower_dev - Check if device is linked to some device
5897 * Find out if a device is linked to a lower device and return true in case
5898 * it is. The caller must hold the RTNL lock.
5900 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
5904 return !list_empty(&dev
->adj_list
.lower
);
5907 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
5909 struct netdev_adjacent
*adj
;
5911 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
5913 return adj
->private;
5915 EXPORT_SYMBOL(netdev_adjacent_get_private
);
5918 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5920 * @iter: list_head ** of the current position
5922 * Gets the next device from the dev's upper list, starting from iter
5923 * position. The caller must hold RCU read lock.
5925 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
5926 struct list_head
**iter
)
5928 struct netdev_adjacent
*upper
;
5930 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5932 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
5934 if (&upper
->list
== &dev
->adj_list
.upper
)
5937 *iter
= &upper
->list
;
5941 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
5943 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
5944 struct list_head
**iter
)
5946 struct netdev_adjacent
*upper
;
5948 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5950 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
5952 if (&upper
->list
== &dev
->adj_list
.upper
)
5955 *iter
= &upper
->list
;
5960 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
5961 int (*fn
)(struct net_device
*dev
,
5965 struct net_device
*udev
;
5966 struct list_head
*iter
;
5969 for (iter
= &dev
->adj_list
.upper
,
5970 udev
= netdev_next_upper_dev_rcu(dev
, &iter
);
5972 udev
= netdev_next_upper_dev_rcu(dev
, &iter
)) {
5973 /* first is the upper device itself */
5974 ret
= fn(udev
, data
);
5978 /* then look at all of its upper devices */
5979 ret
= netdev_walk_all_upper_dev_rcu(udev
, fn
, data
);
5986 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
5989 * netdev_lower_get_next_private - Get the next ->private from the
5990 * lower neighbour list
5992 * @iter: list_head ** of the current position
5994 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5995 * list, starting from iter position. The caller must hold either hold the
5996 * RTNL lock or its own locking that guarantees that the neighbour lower
5997 * list will remain unchanged.
5999 void *netdev_lower_get_next_private(struct net_device
*dev
,
6000 struct list_head
**iter
)
6002 struct netdev_adjacent
*lower
;
6004 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6006 if (&lower
->list
== &dev
->adj_list
.lower
)
6009 *iter
= lower
->list
.next
;
6011 return lower
->private;
6013 EXPORT_SYMBOL(netdev_lower_get_next_private
);
6016 * netdev_lower_get_next_private_rcu - Get the next ->private from the
6017 * lower neighbour list, RCU
6020 * @iter: list_head ** of the current position
6022 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6023 * list, starting from iter position. The caller must hold RCU read lock.
6025 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
6026 struct list_head
**iter
)
6028 struct netdev_adjacent
*lower
;
6030 WARN_ON_ONCE(!rcu_read_lock_held());
6032 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6034 if (&lower
->list
== &dev
->adj_list
.lower
)
6037 *iter
= &lower
->list
;
6039 return lower
->private;
6041 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
6044 * netdev_lower_get_next - Get the next device from the lower neighbour
6047 * @iter: list_head ** of the current position
6049 * Gets the next netdev_adjacent from the dev's lower neighbour
6050 * list, starting from iter position. The caller must hold RTNL lock or
6051 * its own locking that guarantees that the neighbour lower
6052 * list will remain unchanged.
6054 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
6056 struct netdev_adjacent
*lower
;
6058 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
6060 if (&lower
->list
== &dev
->adj_list
.lower
)
6063 *iter
= lower
->list
.next
;
6067 EXPORT_SYMBOL(netdev_lower_get_next
);
6069 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
6070 struct list_head
**iter
)
6072 struct netdev_adjacent
*lower
;
6074 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6076 if (&lower
->list
== &dev
->adj_list
.lower
)
6079 *iter
= &lower
->list
;
6084 int netdev_walk_all_lower_dev(struct net_device
*dev
,
6085 int (*fn
)(struct net_device
*dev
,
6089 struct net_device
*ldev
;
6090 struct list_head
*iter
;
6093 for (iter
= &dev
->adj_list
.lower
,
6094 ldev
= netdev_next_lower_dev(dev
, &iter
);
6096 ldev
= netdev_next_lower_dev(dev
, &iter
)) {
6097 /* first is the lower device itself */
6098 ret
= fn(ldev
, data
);
6102 /* then look at all of its lower devices */
6103 ret
= netdev_walk_all_lower_dev(ldev
, fn
, data
);
6110 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
6112 static struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
6113 struct list_head
**iter
)
6115 struct netdev_adjacent
*lower
;
6117 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6118 if (&lower
->list
== &dev
->adj_list
.lower
)
6121 *iter
= &lower
->list
;
6126 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
6127 int (*fn
)(struct net_device
*dev
,
6131 struct net_device
*ldev
;
6132 struct list_head
*iter
;
6135 for (iter
= &dev
->adj_list
.lower
,
6136 ldev
= netdev_next_lower_dev_rcu(dev
, &iter
);
6138 ldev
= netdev_next_lower_dev_rcu(dev
, &iter
)) {
6139 /* first is the lower device itself */
6140 ret
= fn(ldev
, data
);
6144 /* then look at all of its lower devices */
6145 ret
= netdev_walk_all_lower_dev_rcu(ldev
, fn
, data
);
6152 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
6155 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6156 * lower neighbour list, RCU
6160 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6161 * list. The caller must hold RCU read lock.
6163 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
6165 struct netdev_adjacent
*lower
;
6167 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
6168 struct netdev_adjacent
, list
);
6170 return lower
->private;
6173 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
6176 * netdev_master_upper_dev_get_rcu - Get master upper device
6179 * Find a master upper device and return pointer to it or NULL in case
6180 * it's not there. The caller must hold the RCU read lock.
6182 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
6184 struct netdev_adjacent
*upper
;
6186 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
6187 struct netdev_adjacent
, list
);
6188 if (upper
&& likely(upper
->master
))
6192 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
6194 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
6195 struct net_device
*adj_dev
,
6196 struct list_head
*dev_list
)
6198 char linkname
[IFNAMSIZ
+7];
6200 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
6201 "upper_%s" : "lower_%s", adj_dev
->name
);
6202 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
6205 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
6207 struct list_head
*dev_list
)
6209 char linkname
[IFNAMSIZ
+7];
6211 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
6212 "upper_%s" : "lower_%s", name
);
6213 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
6216 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
6217 struct net_device
*adj_dev
,
6218 struct list_head
*dev_list
)
6220 return (dev_list
== &dev
->adj_list
.upper
||
6221 dev_list
== &dev
->adj_list
.lower
) &&
6222 net_eq(dev_net(dev
), dev_net(adj_dev
));
6225 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
6226 struct net_device
*adj_dev
,
6227 struct list_head
*dev_list
,
6228 void *private, bool master
)
6230 struct netdev_adjacent
*adj
;
6233 adj
= __netdev_find_adj(adj_dev
, dev_list
);
6237 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6238 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
6243 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
6248 adj
->master
= master
;
6250 adj
->private = private;
6253 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6254 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
6256 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
6257 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
6262 /* Ensure that master link is always the first item in list. */
6264 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
6265 &(adj_dev
->dev
.kobj
), "master");
6267 goto remove_symlinks
;
6269 list_add_rcu(&adj
->list
, dev_list
);
6271 list_add_tail_rcu(&adj
->list
, dev_list
);
6277 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
6278 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
6286 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
6287 struct net_device
*adj_dev
,
6289 struct list_head
*dev_list
)
6291 struct netdev_adjacent
*adj
;
6293 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6294 dev
->name
, adj_dev
->name
, ref_nr
);
6296 adj
= __netdev_find_adj(adj_dev
, dev_list
);
6299 pr_err("Adjacency does not exist for device %s from %s\n",
6300 dev
->name
, adj_dev
->name
);
6305 if (adj
->ref_nr
> ref_nr
) {
6306 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6307 dev
->name
, adj_dev
->name
, ref_nr
,
6308 adj
->ref_nr
- ref_nr
);
6309 adj
->ref_nr
-= ref_nr
;
6314 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
6316 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
6317 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
6319 list_del_rcu(&adj
->list
);
6320 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6321 adj_dev
->name
, dev
->name
, adj_dev
->name
);
6323 kfree_rcu(adj
, rcu
);
6326 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
6327 struct net_device
*upper_dev
,
6328 struct list_head
*up_list
,
6329 struct list_head
*down_list
,
6330 void *private, bool master
)
6334 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
6339 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
6342 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
6349 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
6350 struct net_device
*upper_dev
,
6352 struct list_head
*up_list
,
6353 struct list_head
*down_list
)
6355 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
6356 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
6359 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
6360 struct net_device
*upper_dev
,
6361 void *private, bool master
)
6363 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
6364 &dev
->adj_list
.upper
,
6365 &upper_dev
->adj_list
.lower
,
6369 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
6370 struct net_device
*upper_dev
)
6372 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
6373 &dev
->adj_list
.upper
,
6374 &upper_dev
->adj_list
.lower
);
6377 static int __netdev_upper_dev_link(struct net_device
*dev
,
6378 struct net_device
*upper_dev
, bool master
,
6379 void *upper_priv
, void *upper_info
,
6380 struct netlink_ext_ack
*extack
)
6382 struct netdev_notifier_changeupper_info changeupper_info
= {
6387 .upper_dev
= upper_dev
,
6390 .upper_info
= upper_info
,
6396 if (dev
== upper_dev
)
6399 /* To prevent loops, check if dev is not upper device to upper_dev. */
6400 if (netdev_has_upper_dev(upper_dev
, dev
))
6403 if (netdev_has_upper_dev(dev
, upper_dev
))
6406 if (master
&& netdev_master_upper_dev_get(dev
))
6409 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
6410 &changeupper_info
.info
);
6411 ret
= notifier_to_errno(ret
);
6415 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
6420 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
6421 &changeupper_info
.info
);
6422 ret
= notifier_to_errno(ret
);
6429 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
6435 * netdev_upper_dev_link - Add a link to the upper device
6437 * @upper_dev: new upper device
6438 * @extack: netlink extended ack
6440 * Adds a link to device which is upper to this one. The caller must hold
6441 * the RTNL lock. On a failure a negative errno code is returned.
6442 * On success the reference counts are adjusted and the function
6445 int netdev_upper_dev_link(struct net_device
*dev
,
6446 struct net_device
*upper_dev
,
6447 struct netlink_ext_ack
*extack
)
6449 return __netdev_upper_dev_link(dev
, upper_dev
, false,
6450 NULL
, NULL
, extack
);
6452 EXPORT_SYMBOL(netdev_upper_dev_link
);
6455 * netdev_master_upper_dev_link - Add a master link to the upper device
6457 * @upper_dev: new upper device
6458 * @upper_priv: upper device private
6459 * @upper_info: upper info to be passed down via notifier
6460 * @extack: netlink extended ack
6462 * Adds a link to device which is upper to this one. In this case, only
6463 * one master upper device can be linked, although other non-master devices
6464 * might be linked as well. The caller must hold the RTNL lock.
6465 * On a failure a negative errno code is returned. On success the reference
6466 * counts are adjusted and the function returns zero.
6468 int netdev_master_upper_dev_link(struct net_device
*dev
,
6469 struct net_device
*upper_dev
,
6470 void *upper_priv
, void *upper_info
,
6471 struct netlink_ext_ack
*extack
)
6473 return __netdev_upper_dev_link(dev
, upper_dev
, true,
6474 upper_priv
, upper_info
, extack
);
6476 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
6479 * netdev_upper_dev_unlink - Removes a link to upper device
6481 * @upper_dev: new upper device
6483 * Removes a link to device which is upper to this one. The caller must hold
6486 void netdev_upper_dev_unlink(struct net_device
*dev
,
6487 struct net_device
*upper_dev
)
6489 struct netdev_notifier_changeupper_info changeupper_info
= {
6493 .upper_dev
= upper_dev
,
6499 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
6501 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
6502 &changeupper_info
.info
);
6504 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
6506 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
6507 &changeupper_info
.info
);
6509 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
6512 * netdev_bonding_info_change - Dispatch event about slave change
6514 * @bonding_info: info to dispatch
6516 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6517 * The caller must hold the RTNL lock.
6519 void netdev_bonding_info_change(struct net_device
*dev
,
6520 struct netdev_bonding_info
*bonding_info
)
6522 struct netdev_notifier_bonding_info info
= {
6526 memcpy(&info
.bonding_info
, bonding_info
,
6527 sizeof(struct netdev_bonding_info
));
6528 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
6531 EXPORT_SYMBOL(netdev_bonding_info_change
);
6533 static void netdev_adjacent_add_links(struct net_device
*dev
)
6535 struct netdev_adjacent
*iter
;
6537 struct net
*net
= dev_net(dev
);
6539 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6540 if (!net_eq(net
, dev_net(iter
->dev
)))
6542 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6543 &iter
->dev
->adj_list
.lower
);
6544 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
6545 &dev
->adj_list
.upper
);
6548 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6549 if (!net_eq(net
, dev_net(iter
->dev
)))
6551 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6552 &iter
->dev
->adj_list
.upper
);
6553 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
6554 &dev
->adj_list
.lower
);
6558 static void netdev_adjacent_del_links(struct net_device
*dev
)
6560 struct netdev_adjacent
*iter
;
6562 struct net
*net
= dev_net(dev
);
6564 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6565 if (!net_eq(net
, dev_net(iter
->dev
)))
6567 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
6568 &iter
->dev
->adj_list
.lower
);
6569 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
6570 &dev
->adj_list
.upper
);
6573 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6574 if (!net_eq(net
, dev_net(iter
->dev
)))
6576 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
6577 &iter
->dev
->adj_list
.upper
);
6578 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
6579 &dev
->adj_list
.lower
);
6583 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
6585 struct netdev_adjacent
*iter
;
6587 struct net
*net
= dev_net(dev
);
6589 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
6590 if (!net_eq(net
, dev_net(iter
->dev
)))
6592 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
6593 &iter
->dev
->adj_list
.lower
);
6594 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6595 &iter
->dev
->adj_list
.lower
);
6598 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
6599 if (!net_eq(net
, dev_net(iter
->dev
)))
6601 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
6602 &iter
->dev
->adj_list
.upper
);
6603 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
6604 &iter
->dev
->adj_list
.upper
);
6608 void *netdev_lower_dev_get_private(struct net_device
*dev
,
6609 struct net_device
*lower_dev
)
6611 struct netdev_adjacent
*lower
;
6615 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
6619 return lower
->private;
6621 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
6624 int dev_get_nest_level(struct net_device
*dev
)
6626 struct net_device
*lower
= NULL
;
6627 struct list_head
*iter
;
6633 netdev_for_each_lower_dev(dev
, lower
, iter
) {
6634 nest
= dev_get_nest_level(lower
);
6635 if (max_nest
< nest
)
6639 return max_nest
+ 1;
6641 EXPORT_SYMBOL(dev_get_nest_level
);
6644 * netdev_lower_change - Dispatch event about lower device state change
6645 * @lower_dev: device
6646 * @lower_state_info: state to dispatch
6648 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6649 * The caller must hold the RTNL lock.
6651 void netdev_lower_state_changed(struct net_device
*lower_dev
,
6652 void *lower_state_info
)
6654 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
6655 .info
.dev
= lower_dev
,
6659 changelowerstate_info
.lower_state_info
= lower_state_info
;
6660 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
6661 &changelowerstate_info
.info
);
6663 EXPORT_SYMBOL(netdev_lower_state_changed
);
6665 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
6667 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6669 if (ops
->ndo_change_rx_flags
)
6670 ops
->ndo_change_rx_flags(dev
, flags
);
6673 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
6675 unsigned int old_flags
= dev
->flags
;
6681 dev
->flags
|= IFF_PROMISC
;
6682 dev
->promiscuity
+= inc
;
6683 if (dev
->promiscuity
== 0) {
6686 * If inc causes overflow, untouch promisc and return error.
6689 dev
->flags
&= ~IFF_PROMISC
;
6691 dev
->promiscuity
-= inc
;
6692 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6697 if (dev
->flags
!= old_flags
) {
6698 pr_info("device %s %s promiscuous mode\n",
6700 dev
->flags
& IFF_PROMISC
? "entered" : "left");
6701 if (audit_enabled
) {
6702 current_uid_gid(&uid
, &gid
);
6703 audit_log(current
->audit_context
, GFP_ATOMIC
,
6704 AUDIT_ANOM_PROMISCUOUS
,
6705 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6706 dev
->name
, (dev
->flags
& IFF_PROMISC
),
6707 (old_flags
& IFF_PROMISC
),
6708 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
6709 from_kuid(&init_user_ns
, uid
),
6710 from_kgid(&init_user_ns
, gid
),
6711 audit_get_sessionid(current
));
6714 dev_change_rx_flags(dev
, IFF_PROMISC
);
6717 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
6722 * dev_set_promiscuity - update promiscuity count on a device
6726 * Add or remove promiscuity from a device. While the count in the device
6727 * remains above zero the interface remains promiscuous. Once it hits zero
6728 * the device reverts back to normal filtering operation. A negative inc
6729 * value is used to drop promiscuity on the device.
6730 * Return 0 if successful or a negative errno code on error.
6732 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
6734 unsigned int old_flags
= dev
->flags
;
6737 err
= __dev_set_promiscuity(dev
, inc
, true);
6740 if (dev
->flags
!= old_flags
)
6741 dev_set_rx_mode(dev
);
6744 EXPORT_SYMBOL(dev_set_promiscuity
);
6746 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
6748 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
6752 dev
->flags
|= IFF_ALLMULTI
;
6753 dev
->allmulti
+= inc
;
6754 if (dev
->allmulti
== 0) {
6757 * If inc causes overflow, untouch allmulti and return error.
6760 dev
->flags
&= ~IFF_ALLMULTI
;
6762 dev
->allmulti
-= inc
;
6763 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6768 if (dev
->flags
^ old_flags
) {
6769 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
6770 dev_set_rx_mode(dev
);
6772 __dev_notify_flags(dev
, old_flags
,
6773 dev
->gflags
^ old_gflags
);
6779 * dev_set_allmulti - update allmulti count on a device
6783 * Add or remove reception of all multicast frames to a device. While the
6784 * count in the device remains above zero the interface remains listening
6785 * to all interfaces. Once it hits zero the device reverts back to normal
6786 * filtering operation. A negative @inc value is used to drop the counter
6787 * when releasing a resource needing all multicasts.
6788 * Return 0 if successful or a negative errno code on error.
6791 int dev_set_allmulti(struct net_device
*dev
, int inc
)
6793 return __dev_set_allmulti(dev
, inc
, true);
6795 EXPORT_SYMBOL(dev_set_allmulti
);
6798 * Upload unicast and multicast address lists to device and
6799 * configure RX filtering. When the device doesn't support unicast
6800 * filtering it is put in promiscuous mode while unicast addresses
6803 void __dev_set_rx_mode(struct net_device
*dev
)
6805 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6807 /* dev_open will call this function so the list will stay sane. */
6808 if (!(dev
->flags
&IFF_UP
))
6811 if (!netif_device_present(dev
))
6814 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
6815 /* Unicast addresses changes may only happen under the rtnl,
6816 * therefore calling __dev_set_promiscuity here is safe.
6818 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
6819 __dev_set_promiscuity(dev
, 1, false);
6820 dev
->uc_promisc
= true;
6821 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
6822 __dev_set_promiscuity(dev
, -1, false);
6823 dev
->uc_promisc
= false;
6827 if (ops
->ndo_set_rx_mode
)
6828 ops
->ndo_set_rx_mode(dev
);
6831 void dev_set_rx_mode(struct net_device
*dev
)
6833 netif_addr_lock_bh(dev
);
6834 __dev_set_rx_mode(dev
);
6835 netif_addr_unlock_bh(dev
);
6839 * dev_get_flags - get flags reported to userspace
6842 * Get the combination of flag bits exported through APIs to userspace.
6844 unsigned int dev_get_flags(const struct net_device
*dev
)
6848 flags
= (dev
->flags
& ~(IFF_PROMISC
|
6853 (dev
->gflags
& (IFF_PROMISC
|
6856 if (netif_running(dev
)) {
6857 if (netif_oper_up(dev
))
6858 flags
|= IFF_RUNNING
;
6859 if (netif_carrier_ok(dev
))
6860 flags
|= IFF_LOWER_UP
;
6861 if (netif_dormant(dev
))
6862 flags
|= IFF_DORMANT
;
6867 EXPORT_SYMBOL(dev_get_flags
);
6869 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
)
6871 unsigned int old_flags
= dev
->flags
;
6877 * Set the flags on our device.
6880 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
6881 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
6883 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
6887 * Load in the correct multicast list now the flags have changed.
6890 if ((old_flags
^ flags
) & IFF_MULTICAST
)
6891 dev_change_rx_flags(dev
, IFF_MULTICAST
);
6893 dev_set_rx_mode(dev
);
6896 * Have we downed the interface. We handle IFF_UP ourselves
6897 * according to user attempts to set it, rather than blindly
6902 if ((old_flags
^ flags
) & IFF_UP
) {
6903 if (old_flags
& IFF_UP
)
6906 ret
= __dev_open(dev
);
6909 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
6910 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
6911 unsigned int old_flags
= dev
->flags
;
6913 dev
->gflags
^= IFF_PROMISC
;
6915 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
6916 if (dev
->flags
!= old_flags
)
6917 dev_set_rx_mode(dev
);
6920 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6921 * is important. Some (broken) drivers set IFF_PROMISC, when
6922 * IFF_ALLMULTI is requested not asking us and not reporting.
6924 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
6925 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
6927 dev
->gflags
^= IFF_ALLMULTI
;
6928 __dev_set_allmulti(dev
, inc
, false);
6934 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
6935 unsigned int gchanges
)
6937 unsigned int changes
= dev
->flags
^ old_flags
;
6940 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
6942 if (changes
& IFF_UP
) {
6943 if (dev
->flags
& IFF_UP
)
6944 call_netdevice_notifiers(NETDEV_UP
, dev
);
6946 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
6949 if (dev
->flags
& IFF_UP
&&
6950 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
6951 struct netdev_notifier_change_info change_info
= {
6955 .flags_changed
= changes
,
6958 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
6963 * dev_change_flags - change device settings
6965 * @flags: device state flags
6967 * Change settings on device based state flags. The flags are
6968 * in the userspace exported format.
6970 int dev_change_flags(struct net_device
*dev
, unsigned int flags
)
6973 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
6975 ret
= __dev_change_flags(dev
, flags
);
6979 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
6980 __dev_notify_flags(dev
, old_flags
, changes
);
6983 EXPORT_SYMBOL(dev_change_flags
);
6985 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
6987 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6989 if (ops
->ndo_change_mtu
)
6990 return ops
->ndo_change_mtu(dev
, new_mtu
);
6995 EXPORT_SYMBOL(__dev_set_mtu
);
6998 * dev_set_mtu - Change maximum transfer unit
7000 * @new_mtu: new transfer unit
7002 * Change the maximum transfer size of the network device.
7004 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
7008 if (new_mtu
== dev
->mtu
)
7011 /* MTU must be positive, and in range */
7012 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
7013 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
7014 dev
->name
, new_mtu
, dev
->min_mtu
);
7018 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
7019 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
7020 dev
->name
, new_mtu
, dev
->max_mtu
);
7024 if (!netif_device_present(dev
))
7027 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
7028 err
= notifier_to_errno(err
);
7032 orig_mtu
= dev
->mtu
;
7033 err
= __dev_set_mtu(dev
, new_mtu
);
7036 err
= call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
7037 err
= notifier_to_errno(err
);
7039 /* setting mtu back and notifying everyone again,
7040 * so that they have a chance to revert changes.
7042 __dev_set_mtu(dev
, orig_mtu
);
7043 call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
7048 EXPORT_SYMBOL(dev_set_mtu
);
7051 * dev_change_tx_queue_len - Change TX queue length of a netdevice
7053 * @new_len: new tx queue length
7055 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
7057 unsigned int orig_len
= dev
->tx_queue_len
;
7060 if (new_len
!= (unsigned int)new_len
)
7063 if (new_len
!= orig_len
) {
7064 dev
->tx_queue_len
= new_len
;
7065 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
7066 res
= notifier_to_errno(res
);
7069 "refused to change device tx_queue_len\n");
7070 dev
->tx_queue_len
= orig_len
;
7073 return dev_qdisc_change_tx_queue_len(dev
);
7080 * dev_set_group - Change group this device belongs to
7082 * @new_group: group this device should belong to
7084 void dev_set_group(struct net_device
*dev
, int new_group
)
7086 dev
->group
= new_group
;
7088 EXPORT_SYMBOL(dev_set_group
);
7091 * dev_set_mac_address - Change Media Access Control Address
7095 * Change the hardware (MAC) address of the device
7097 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
)
7099 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7102 if (!ops
->ndo_set_mac_address
)
7104 if (sa
->sa_family
!= dev
->type
)
7106 if (!netif_device_present(dev
))
7108 err
= ops
->ndo_set_mac_address(dev
, sa
);
7111 dev
->addr_assign_type
= NET_ADDR_SET
;
7112 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
7113 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
7116 EXPORT_SYMBOL(dev_set_mac_address
);
7119 * dev_change_carrier - Change device carrier
7121 * @new_carrier: new value
7123 * Change device carrier
7125 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
7127 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7129 if (!ops
->ndo_change_carrier
)
7131 if (!netif_device_present(dev
))
7133 return ops
->ndo_change_carrier(dev
, new_carrier
);
7135 EXPORT_SYMBOL(dev_change_carrier
);
7138 * dev_get_phys_port_id - Get device physical port ID
7142 * Get device physical port ID
7144 int dev_get_phys_port_id(struct net_device
*dev
,
7145 struct netdev_phys_item_id
*ppid
)
7147 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7149 if (!ops
->ndo_get_phys_port_id
)
7151 return ops
->ndo_get_phys_port_id(dev
, ppid
);
7153 EXPORT_SYMBOL(dev_get_phys_port_id
);
7156 * dev_get_phys_port_name - Get device physical port name
7159 * @len: limit of bytes to copy to name
7161 * Get device physical port name
7163 int dev_get_phys_port_name(struct net_device
*dev
,
7164 char *name
, size_t len
)
7166 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7168 if (!ops
->ndo_get_phys_port_name
)
7170 return ops
->ndo_get_phys_port_name(dev
, name
, len
);
7172 EXPORT_SYMBOL(dev_get_phys_port_name
);
7175 * dev_change_proto_down - update protocol port state information
7177 * @proto_down: new value
7179 * This info can be used by switch drivers to set the phys state of the
7182 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
7184 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7186 if (!ops
->ndo_change_proto_down
)
7188 if (!netif_device_present(dev
))
7190 return ops
->ndo_change_proto_down(dev
, proto_down
);
7192 EXPORT_SYMBOL(dev_change_proto_down
);
7194 void __dev_xdp_query(struct net_device
*dev
, bpf_op_t bpf_op
,
7195 struct netdev_bpf
*xdp
)
7197 memset(xdp
, 0, sizeof(*xdp
));
7198 xdp
->command
= XDP_QUERY_PROG
;
7200 /* Query must always succeed. */
7201 WARN_ON(bpf_op(dev
, xdp
) < 0);
7204 static u8
__dev_xdp_attached(struct net_device
*dev
, bpf_op_t bpf_op
)
7206 struct netdev_bpf xdp
;
7208 __dev_xdp_query(dev
, bpf_op
, &xdp
);
7210 return xdp
.prog_attached
;
7213 static int dev_xdp_install(struct net_device
*dev
, bpf_op_t bpf_op
,
7214 struct netlink_ext_ack
*extack
, u32 flags
,
7215 struct bpf_prog
*prog
)
7217 struct netdev_bpf xdp
;
7219 memset(&xdp
, 0, sizeof(xdp
));
7220 if (flags
& XDP_FLAGS_HW_MODE
)
7221 xdp
.command
= XDP_SETUP_PROG_HW
;
7223 xdp
.command
= XDP_SETUP_PROG
;
7224 xdp
.extack
= extack
;
7228 return bpf_op(dev
, &xdp
);
7231 static void dev_xdp_uninstall(struct net_device
*dev
)
7233 struct netdev_bpf xdp
;
7236 /* Remove generic XDP */
7237 WARN_ON(dev_xdp_install(dev
, generic_xdp_install
, NULL
, 0, NULL
));
7239 /* Remove from the driver */
7240 ndo_bpf
= dev
->netdev_ops
->ndo_bpf
;
7244 __dev_xdp_query(dev
, ndo_bpf
, &xdp
);
7245 if (xdp
.prog_attached
== XDP_ATTACHED_NONE
)
7248 /* Program removal should always succeed */
7249 WARN_ON(dev_xdp_install(dev
, ndo_bpf
, NULL
, xdp
.prog_flags
, NULL
));
7253 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
7255 * @extack: netlink extended ack
7256 * @fd: new program fd or negative value to clear
7257 * @flags: xdp-related flags
7259 * Set or clear a bpf program for a device
7261 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
7264 const struct net_device_ops
*ops
= dev
->netdev_ops
;
7265 struct bpf_prog
*prog
= NULL
;
7266 bpf_op_t bpf_op
, bpf_chk
;
7271 bpf_op
= bpf_chk
= ops
->ndo_bpf
;
7272 if (!bpf_op
&& (flags
& (XDP_FLAGS_DRV_MODE
| XDP_FLAGS_HW_MODE
)))
7274 if (!bpf_op
|| (flags
& XDP_FLAGS_SKB_MODE
))
7275 bpf_op
= generic_xdp_install
;
7276 if (bpf_op
== bpf_chk
)
7277 bpf_chk
= generic_xdp_install
;
7280 if (bpf_chk
&& __dev_xdp_attached(dev
, bpf_chk
))
7282 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) &&
7283 __dev_xdp_attached(dev
, bpf_op
))
7286 prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
7287 bpf_op
== ops
->ndo_bpf
);
7289 return PTR_ERR(prog
);
7291 if (!(flags
& XDP_FLAGS_HW_MODE
) &&
7292 bpf_prog_is_dev_bound(prog
->aux
)) {
7293 NL_SET_ERR_MSG(extack
, "using device-bound program without HW_MODE flag is not supported");
7299 err
= dev_xdp_install(dev
, bpf_op
, extack
, flags
, prog
);
7300 if (err
< 0 && prog
)
7307 * dev_new_index - allocate an ifindex
7308 * @net: the applicable net namespace
7310 * Returns a suitable unique value for a new device interface
7311 * number. The caller must hold the rtnl semaphore or the
7312 * dev_base_lock to be sure it remains unique.
7314 static int dev_new_index(struct net
*net
)
7316 int ifindex
= net
->ifindex
;
7321 if (!__dev_get_by_index(net
, ifindex
))
7322 return net
->ifindex
= ifindex
;
7326 /* Delayed registration/unregisteration */
7327 static LIST_HEAD(net_todo_list
);
7328 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
7330 static void net_set_todo(struct net_device
*dev
)
7332 list_add_tail(&dev
->todo_list
, &net_todo_list
);
7333 dev_net(dev
)->dev_unreg_count
++;
7336 static void rollback_registered_many(struct list_head
*head
)
7338 struct net_device
*dev
, *tmp
;
7339 LIST_HEAD(close_head
);
7341 BUG_ON(dev_boot_phase
);
7344 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
7345 /* Some devices call without registering
7346 * for initialization unwind. Remove those
7347 * devices and proceed with the remaining.
7349 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
7350 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7354 list_del(&dev
->unreg_list
);
7357 dev
->dismantle
= true;
7358 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
7361 /* If device is running, close it first. */
7362 list_for_each_entry(dev
, head
, unreg_list
)
7363 list_add_tail(&dev
->close_list
, &close_head
);
7364 dev_close_many(&close_head
, true);
7366 list_for_each_entry(dev
, head
, unreg_list
) {
7367 /* And unlink it from device chain. */
7368 unlist_netdevice(dev
);
7370 dev
->reg_state
= NETREG_UNREGISTERING
;
7372 flush_all_backlogs();
7376 list_for_each_entry(dev
, head
, unreg_list
) {
7377 struct sk_buff
*skb
= NULL
;
7379 /* Shutdown queueing discipline. */
7382 dev_xdp_uninstall(dev
);
7384 /* Notify protocols, that we are about to destroy
7385 * this device. They should clean all the things.
7387 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
7389 if (!dev
->rtnl_link_ops
||
7390 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
7391 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
7392 GFP_KERNEL
, NULL
, 0);
7395 * Flush the unicast and multicast chains
7400 if (dev
->netdev_ops
->ndo_uninit
)
7401 dev
->netdev_ops
->ndo_uninit(dev
);
7404 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
7406 /* Notifier chain MUST detach us all upper devices. */
7407 WARN_ON(netdev_has_any_upper_dev(dev
));
7408 WARN_ON(netdev_has_any_lower_dev(dev
));
7410 /* Remove entries from kobject tree */
7411 netdev_unregister_kobject(dev
);
7413 /* Remove XPS queueing entries */
7414 netif_reset_xps_queues_gt(dev
, 0);
7420 list_for_each_entry(dev
, head
, unreg_list
)
7424 static void rollback_registered(struct net_device
*dev
)
7428 list_add(&dev
->unreg_list
, &single
);
7429 rollback_registered_many(&single
);
7433 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
7434 struct net_device
*upper
, netdev_features_t features
)
7436 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
7437 netdev_features_t feature
;
7440 for_each_netdev_feature(&upper_disables
, feature_bit
) {
7441 feature
= __NETIF_F_BIT(feature_bit
);
7442 if (!(upper
->wanted_features
& feature
)
7443 && (features
& feature
)) {
7444 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
7445 &feature
, upper
->name
);
7446 features
&= ~feature
;
7453 static void netdev_sync_lower_features(struct net_device
*upper
,
7454 struct net_device
*lower
, netdev_features_t features
)
7456 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
7457 netdev_features_t feature
;
7460 for_each_netdev_feature(&upper_disables
, feature_bit
) {
7461 feature
= __NETIF_F_BIT(feature_bit
);
7462 if (!(features
& feature
) && (lower
->features
& feature
)) {
7463 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
7464 &feature
, lower
->name
);
7465 lower
->wanted_features
&= ~feature
;
7466 netdev_update_features(lower
);
7468 if (unlikely(lower
->features
& feature
))
7469 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
7470 &feature
, lower
->name
);
7475 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
7476 netdev_features_t features
)
7478 /* Fix illegal checksum combinations */
7479 if ((features
& NETIF_F_HW_CSUM
) &&
7480 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
7481 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
7482 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
7485 /* TSO requires that SG is present as well. */
7486 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
7487 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
7488 features
&= ~NETIF_F_ALL_TSO
;
7491 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
7492 !(features
& NETIF_F_IP_CSUM
)) {
7493 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
7494 features
&= ~NETIF_F_TSO
;
7495 features
&= ~NETIF_F_TSO_ECN
;
7498 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
7499 !(features
& NETIF_F_IPV6_CSUM
)) {
7500 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
7501 features
&= ~NETIF_F_TSO6
;
7504 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7505 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
7506 features
&= ~NETIF_F_TSO_MANGLEID
;
7508 /* TSO ECN requires that TSO is present as well. */
7509 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
7510 features
&= ~NETIF_F_TSO_ECN
;
7512 /* Software GSO depends on SG. */
7513 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
7514 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
7515 features
&= ~NETIF_F_GSO
;
7518 /* GSO partial features require GSO partial be set */
7519 if ((features
& dev
->gso_partial_features
) &&
7520 !(features
& NETIF_F_GSO_PARTIAL
)) {
7522 "Dropping partially supported GSO features since no GSO partial.\n");
7523 features
&= ~dev
->gso_partial_features
;
7526 if (!(features
& NETIF_F_RXCSUM
)) {
7527 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
7528 * successfully merged by hardware must also have the
7529 * checksum verified by hardware. If the user does not
7530 * want to enable RXCSUM, logically, we should disable GRO_HW.
7532 if (features
& NETIF_F_GRO_HW
) {
7533 netdev_dbg(dev
, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
7534 features
&= ~NETIF_F_GRO_HW
;
7541 int __netdev_update_features(struct net_device
*dev
)
7543 struct net_device
*upper
, *lower
;
7544 netdev_features_t features
;
7545 struct list_head
*iter
;
7550 features
= netdev_get_wanted_features(dev
);
7552 if (dev
->netdev_ops
->ndo_fix_features
)
7553 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
7555 /* driver might be less strict about feature dependencies */
7556 features
= netdev_fix_features(dev
, features
);
7558 /* some features can't be enabled if they're off an an upper device */
7559 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
7560 features
= netdev_sync_upper_features(dev
, upper
, features
);
7562 if (dev
->features
== features
)
7565 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
7566 &dev
->features
, &features
);
7568 if (dev
->netdev_ops
->ndo_set_features
)
7569 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
7573 if (unlikely(err
< 0)) {
7575 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7576 err
, &features
, &dev
->features
);
7577 /* return non-0 since some features might have changed and
7578 * it's better to fire a spurious notification than miss it
7584 /* some features must be disabled on lower devices when disabled
7585 * on an upper device (think: bonding master or bridge)
7587 netdev_for_each_lower_dev(dev
, lower
, iter
)
7588 netdev_sync_lower_features(dev
, lower
, features
);
7591 netdev_features_t diff
= features
^ dev
->features
;
7593 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
7594 /* udp_tunnel_{get,drop}_rx_info both need
7595 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
7596 * device, or they won't do anything.
7597 * Thus we need to update dev->features
7598 * *before* calling udp_tunnel_get_rx_info,
7599 * but *after* calling udp_tunnel_drop_rx_info.
7601 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
7602 dev
->features
= features
;
7603 udp_tunnel_get_rx_info(dev
);
7605 udp_tunnel_drop_rx_info(dev
);
7609 dev
->features
= features
;
7612 return err
< 0 ? 0 : 1;
7616 * netdev_update_features - recalculate device features
7617 * @dev: the device to check
7619 * Recalculate dev->features set and send notifications if it
7620 * has changed. Should be called after driver or hardware dependent
7621 * conditions might have changed that influence the features.
7623 void netdev_update_features(struct net_device
*dev
)
7625 if (__netdev_update_features(dev
))
7626 netdev_features_change(dev
);
7628 EXPORT_SYMBOL(netdev_update_features
);
7631 * netdev_change_features - recalculate device features
7632 * @dev: the device to check
7634 * Recalculate dev->features set and send notifications even
7635 * if they have not changed. Should be called instead of
7636 * netdev_update_features() if also dev->vlan_features might
7637 * have changed to allow the changes to be propagated to stacked
7640 void netdev_change_features(struct net_device
*dev
)
7642 __netdev_update_features(dev
);
7643 netdev_features_change(dev
);
7645 EXPORT_SYMBOL(netdev_change_features
);
7648 * netif_stacked_transfer_operstate - transfer operstate
7649 * @rootdev: the root or lower level device to transfer state from
7650 * @dev: the device to transfer operstate to
7652 * Transfer operational state from root to device. This is normally
7653 * called when a stacking relationship exists between the root
7654 * device and the device(a leaf device).
7656 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
7657 struct net_device
*dev
)
7659 if (rootdev
->operstate
== IF_OPER_DORMANT
)
7660 netif_dormant_on(dev
);
7662 netif_dormant_off(dev
);
7664 if (netif_carrier_ok(rootdev
))
7665 netif_carrier_on(dev
);
7667 netif_carrier_off(dev
);
7669 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
7671 static int netif_alloc_rx_queues(struct net_device
*dev
)
7673 unsigned int i
, count
= dev
->num_rx_queues
;
7674 struct netdev_rx_queue
*rx
;
7675 size_t sz
= count
* sizeof(*rx
);
7680 rx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
7686 for (i
= 0; i
< count
; i
++) {
7689 /* XDP RX-queue setup */
7690 err
= xdp_rxq_info_reg(&rx
[i
].xdp_rxq
, dev
, i
);
7697 /* Rollback successful reg's and free other resources */
7699 xdp_rxq_info_unreg(&rx
[i
].xdp_rxq
);
7705 static void netif_free_rx_queues(struct net_device
*dev
)
7707 unsigned int i
, count
= dev
->num_rx_queues
;
7709 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
7713 for (i
= 0; i
< count
; i
++)
7714 xdp_rxq_info_unreg(&dev
->_rx
[i
].xdp_rxq
);
7719 static void netdev_init_one_queue(struct net_device
*dev
,
7720 struct netdev_queue
*queue
, void *_unused
)
7722 /* Initialize queue lock */
7723 spin_lock_init(&queue
->_xmit_lock
);
7724 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
7725 queue
->xmit_lock_owner
= -1;
7726 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
7729 dql_init(&queue
->dql
, HZ
);
7733 static void netif_free_tx_queues(struct net_device
*dev
)
7738 static int netif_alloc_netdev_queues(struct net_device
*dev
)
7740 unsigned int count
= dev
->num_tx_queues
;
7741 struct netdev_queue
*tx
;
7742 size_t sz
= count
* sizeof(*tx
);
7744 if (count
< 1 || count
> 0xffff)
7747 tx
= kvzalloc(sz
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
7753 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
7754 spin_lock_init(&dev
->tx_global_lock
);
7759 void netif_tx_stop_all_queues(struct net_device
*dev
)
7763 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
7764 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
7766 netif_tx_stop_queue(txq
);
7769 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
7772 * register_netdevice - register a network device
7773 * @dev: device to register
7775 * Take a completed network device structure and add it to the kernel
7776 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7777 * chain. 0 is returned on success. A negative errno code is returned
7778 * on a failure to set up the device, or if the name is a duplicate.
7780 * Callers must hold the rtnl semaphore. You may want
7781 * register_netdev() instead of this.
7784 * The locking appears insufficient to guarantee two parallel registers
7785 * will not get the same name.
7788 int register_netdevice(struct net_device
*dev
)
7791 struct net
*net
= dev_net(dev
);
7793 BUG_ON(dev_boot_phase
);
7798 /* When net_device's are persistent, this will be fatal. */
7799 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
7802 spin_lock_init(&dev
->addr_list_lock
);
7803 netdev_set_addr_lockdep_class(dev
);
7805 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
7809 /* Init, if this function is available */
7810 if (dev
->netdev_ops
->ndo_init
) {
7811 ret
= dev
->netdev_ops
->ndo_init(dev
);
7819 if (((dev
->hw_features
| dev
->features
) &
7820 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
7821 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
7822 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
7823 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
7830 dev
->ifindex
= dev_new_index(net
);
7831 else if (__dev_get_by_index(net
, dev
->ifindex
))
7834 /* Transfer changeable features to wanted_features and enable
7835 * software offloads (GSO and GRO).
7837 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
7838 dev
->features
|= NETIF_F_SOFT_FEATURES
;
7840 if (dev
->netdev_ops
->ndo_udp_tunnel_add
) {
7841 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
7842 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
7845 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
7847 if (!(dev
->flags
& IFF_LOOPBACK
))
7848 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
7850 /* If IPv4 TCP segmentation offload is supported we should also
7851 * allow the device to enable segmenting the frame with the option
7852 * of ignoring a static IP ID value. This doesn't enable the
7853 * feature itself but allows the user to enable it later.
7855 if (dev
->hw_features
& NETIF_F_TSO
)
7856 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
7857 if (dev
->vlan_features
& NETIF_F_TSO
)
7858 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
7859 if (dev
->mpls_features
& NETIF_F_TSO
)
7860 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
7861 if (dev
->hw_enc_features
& NETIF_F_TSO
)
7862 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
7864 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
7866 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
7868 /* Make NETIF_F_SG inheritable to tunnel devices.
7870 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
7872 /* Make NETIF_F_SG inheritable to MPLS.
7874 dev
->mpls_features
|= NETIF_F_SG
;
7876 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
7877 ret
= notifier_to_errno(ret
);
7881 ret
= netdev_register_kobject(dev
);
7884 dev
->reg_state
= NETREG_REGISTERED
;
7886 __netdev_update_features(dev
);
7889 * Default initial state at registry is that the
7890 * device is present.
7893 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
7895 linkwatch_init_dev(dev
);
7897 dev_init_scheduler(dev
);
7899 list_netdevice(dev
);
7900 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
7902 /* If the device has permanent device address, driver should
7903 * set dev_addr and also addr_assign_type should be set to
7904 * NET_ADDR_PERM (default value).
7906 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
7907 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
7909 /* Notify protocols, that a new device appeared. */
7910 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
7911 ret
= notifier_to_errno(ret
);
7913 rollback_registered(dev
);
7914 dev
->reg_state
= NETREG_UNREGISTERED
;
7917 * Prevent userspace races by waiting until the network
7918 * device is fully setup before sending notifications.
7920 if (!dev
->rtnl_link_ops
||
7921 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
7922 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
7928 if (dev
->netdev_ops
->ndo_uninit
)
7929 dev
->netdev_ops
->ndo_uninit(dev
);
7930 if (dev
->priv_destructor
)
7931 dev
->priv_destructor(dev
);
7934 EXPORT_SYMBOL(register_netdevice
);
7937 * init_dummy_netdev - init a dummy network device for NAPI
7938 * @dev: device to init
7940 * This takes a network device structure and initialize the minimum
7941 * amount of fields so it can be used to schedule NAPI polls without
7942 * registering a full blown interface. This is to be used by drivers
7943 * that need to tie several hardware interfaces to a single NAPI
7944 * poll scheduler due to HW limitations.
7946 int init_dummy_netdev(struct net_device
*dev
)
7948 /* Clear everything. Note we don't initialize spinlocks
7949 * are they aren't supposed to be taken by any of the
7950 * NAPI code and this dummy netdev is supposed to be
7951 * only ever used for NAPI polls
7953 memset(dev
, 0, sizeof(struct net_device
));
7955 /* make sure we BUG if trying to hit standard
7956 * register/unregister code path
7958 dev
->reg_state
= NETREG_DUMMY
;
7960 /* NAPI wants this */
7961 INIT_LIST_HEAD(&dev
->napi_list
);
7963 /* a dummy interface is started by default */
7964 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
7965 set_bit(__LINK_STATE_START
, &dev
->state
);
7967 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7968 * because users of this 'device' dont need to change
7974 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
7978 * register_netdev - register a network device
7979 * @dev: device to register
7981 * Take a completed network device structure and add it to the kernel
7982 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7983 * chain. 0 is returned on success. A negative errno code is returned
7984 * on a failure to set up the device, or if the name is a duplicate.
7986 * This is a wrapper around register_netdevice that takes the rtnl semaphore
7987 * and expands the device name if you passed a format string to
7990 int register_netdev(struct net_device
*dev
)
7995 err
= register_netdevice(dev
);
7999 EXPORT_SYMBOL(register_netdev
);
8001 int netdev_refcnt_read(const struct net_device
*dev
)
8005 for_each_possible_cpu(i
)
8006 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
8009 EXPORT_SYMBOL(netdev_refcnt_read
);
8012 * netdev_wait_allrefs - wait until all references are gone.
8013 * @dev: target net_device
8015 * This is called when unregistering network devices.
8017 * Any protocol or device that holds a reference should register
8018 * for netdevice notification, and cleanup and put back the
8019 * reference if they receive an UNREGISTER event.
8020 * We can get stuck here if buggy protocols don't correctly
8023 static void netdev_wait_allrefs(struct net_device
*dev
)
8025 unsigned long rebroadcast_time
, warning_time
;
8028 linkwatch_forget_dev(dev
);
8030 rebroadcast_time
= warning_time
= jiffies
;
8031 refcnt
= netdev_refcnt_read(dev
);
8033 while (refcnt
!= 0) {
8034 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
8037 /* Rebroadcast unregister notification */
8038 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8044 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
8045 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
8047 /* We must not have linkwatch events
8048 * pending on unregister. If this
8049 * happens, we simply run the queue
8050 * unscheduled, resulting in a noop
8053 linkwatch_run_queue();
8058 rebroadcast_time
= jiffies
;
8063 refcnt
= netdev_refcnt_read(dev
);
8065 if (time_after(jiffies
, warning_time
+ 10 * HZ
)) {
8066 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
8068 warning_time
= jiffies
;
8077 * register_netdevice(x1);
8078 * register_netdevice(x2);
8080 * unregister_netdevice(y1);
8081 * unregister_netdevice(y2);
8087 * We are invoked by rtnl_unlock().
8088 * This allows us to deal with problems:
8089 * 1) We can delete sysfs objects which invoke hotplug
8090 * without deadlocking with linkwatch via keventd.
8091 * 2) Since we run with the RTNL semaphore not held, we can sleep
8092 * safely in order to wait for the netdev refcnt to drop to zero.
8094 * We must not return until all unregister events added during
8095 * the interval the lock was held have been completed.
8097 void netdev_run_todo(void)
8099 struct list_head list
;
8101 /* Snapshot list, allow later requests */
8102 list_replace_init(&net_todo_list
, &list
);
8107 /* Wait for rcu callbacks to finish before next phase */
8108 if (!list_empty(&list
))
8111 while (!list_empty(&list
)) {
8112 struct net_device
*dev
8113 = list_first_entry(&list
, struct net_device
, todo_list
);
8114 list_del(&dev
->todo_list
);
8117 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
8120 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
8121 pr_err("network todo '%s' but state %d\n",
8122 dev
->name
, dev
->reg_state
);
8127 dev
->reg_state
= NETREG_UNREGISTERED
;
8129 netdev_wait_allrefs(dev
);
8132 BUG_ON(netdev_refcnt_read(dev
));
8133 BUG_ON(!list_empty(&dev
->ptype_all
));
8134 BUG_ON(!list_empty(&dev
->ptype_specific
));
8135 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
8136 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
8137 WARN_ON(dev
->dn_ptr
);
8139 if (dev
->priv_destructor
)
8140 dev
->priv_destructor(dev
);
8141 if (dev
->needs_free_netdev
)
8144 /* Report a network device has been unregistered */
8146 dev_net(dev
)->dev_unreg_count
--;
8148 wake_up(&netdev_unregistering_wq
);
8150 /* Free network device */
8151 kobject_put(&dev
->dev
.kobj
);
8155 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
8156 * all the same fields in the same order as net_device_stats, with only
8157 * the type differing, but rtnl_link_stats64 may have additional fields
8158 * at the end for newer counters.
8160 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
8161 const struct net_device_stats
*netdev_stats
)
8163 #if BITS_PER_LONG == 64
8164 BUILD_BUG_ON(sizeof(*stats64
) < sizeof(*netdev_stats
));
8165 memcpy(stats64
, netdev_stats
, sizeof(*netdev_stats
));
8166 /* zero out counters that only exist in rtnl_link_stats64 */
8167 memset((char *)stats64
+ sizeof(*netdev_stats
), 0,
8168 sizeof(*stats64
) - sizeof(*netdev_stats
));
8170 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(unsigned long);
8171 const unsigned long *src
= (const unsigned long *)netdev_stats
;
8172 u64
*dst
= (u64
*)stats64
;
8174 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
8175 for (i
= 0; i
< n
; i
++)
8177 /* zero out counters that only exist in rtnl_link_stats64 */
8178 memset((char *)stats64
+ n
* sizeof(u64
), 0,
8179 sizeof(*stats64
) - n
* sizeof(u64
));
8182 EXPORT_SYMBOL(netdev_stats_to_stats64
);
8185 * dev_get_stats - get network device statistics
8186 * @dev: device to get statistics from
8187 * @storage: place to store stats
8189 * Get network statistics from device. Return @storage.
8190 * The device driver may provide its own method by setting
8191 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
8192 * otherwise the internal statistics structure is used.
8194 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
8195 struct rtnl_link_stats64
*storage
)
8197 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8199 if (ops
->ndo_get_stats64
) {
8200 memset(storage
, 0, sizeof(*storage
));
8201 ops
->ndo_get_stats64(dev
, storage
);
8202 } else if (ops
->ndo_get_stats
) {
8203 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
8205 netdev_stats_to_stats64(storage
, &dev
->stats
);
8207 storage
->rx_dropped
+= (unsigned long)atomic_long_read(&dev
->rx_dropped
);
8208 storage
->tx_dropped
+= (unsigned long)atomic_long_read(&dev
->tx_dropped
);
8209 storage
->rx_nohandler
+= (unsigned long)atomic_long_read(&dev
->rx_nohandler
);
8212 EXPORT_SYMBOL(dev_get_stats
);
8214 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
8216 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
8218 #ifdef CONFIG_NET_CLS_ACT
8221 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
8224 netdev_init_one_queue(dev
, queue
, NULL
);
8225 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
8226 queue
->qdisc_sleeping
= &noop_qdisc
;
8227 rcu_assign_pointer(dev
->ingress_queue
, queue
);
8232 static const struct ethtool_ops default_ethtool_ops
;
8234 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
8235 const struct ethtool_ops
*ops
)
8237 if (dev
->ethtool_ops
== &default_ethtool_ops
)
8238 dev
->ethtool_ops
= ops
;
8240 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
8242 void netdev_freemem(struct net_device
*dev
)
8244 char *addr
= (char *)dev
- dev
->padded
;
8250 * alloc_netdev_mqs - allocate network device
8251 * @sizeof_priv: size of private data to allocate space for
8252 * @name: device name format string
8253 * @name_assign_type: origin of device name
8254 * @setup: callback to initialize device
8255 * @txqs: the number of TX subqueues to allocate
8256 * @rxqs: the number of RX subqueues to allocate
8258 * Allocates a struct net_device with private data area for driver use
8259 * and performs basic initialization. Also allocates subqueue structs
8260 * for each queue on the device.
8262 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
8263 unsigned char name_assign_type
,
8264 void (*setup
)(struct net_device
*),
8265 unsigned int txqs
, unsigned int rxqs
)
8267 struct net_device
*dev
;
8268 unsigned int alloc_size
;
8269 struct net_device
*p
;
8271 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
8274 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
8279 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
8283 alloc_size
= sizeof(struct net_device
);
8285 /* ensure 32-byte alignment of private area */
8286 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
8287 alloc_size
+= sizeof_priv
;
8289 /* ensure 32-byte alignment of whole construct */
8290 alloc_size
+= NETDEV_ALIGN
- 1;
8292 p
= kvzalloc(alloc_size
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
8296 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
8297 dev
->padded
= (char *)dev
- (char *)p
;
8299 dev
->pcpu_refcnt
= alloc_percpu(int);
8300 if (!dev
->pcpu_refcnt
)
8303 if (dev_addr_init(dev
))
8309 dev_net_set(dev
, &init_net
);
8311 dev
->gso_max_size
= GSO_MAX_SIZE
;
8312 dev
->gso_max_segs
= GSO_MAX_SEGS
;
8314 INIT_LIST_HEAD(&dev
->napi_list
);
8315 INIT_LIST_HEAD(&dev
->unreg_list
);
8316 INIT_LIST_HEAD(&dev
->close_list
);
8317 INIT_LIST_HEAD(&dev
->link_watch_list
);
8318 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
8319 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
8320 INIT_LIST_HEAD(&dev
->ptype_all
);
8321 INIT_LIST_HEAD(&dev
->ptype_specific
);
8322 #ifdef CONFIG_NET_SCHED
8323 hash_init(dev
->qdisc_hash
);
8325 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
8328 if (!dev
->tx_queue_len
) {
8329 dev
->priv_flags
|= IFF_NO_QUEUE
;
8330 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
8333 dev
->num_tx_queues
= txqs
;
8334 dev
->real_num_tx_queues
= txqs
;
8335 if (netif_alloc_netdev_queues(dev
))
8338 dev
->num_rx_queues
= rxqs
;
8339 dev
->real_num_rx_queues
= rxqs
;
8340 if (netif_alloc_rx_queues(dev
))
8343 strcpy(dev
->name
, name
);
8344 dev
->name_assign_type
= name_assign_type
;
8345 dev
->group
= INIT_NETDEV_GROUP
;
8346 if (!dev
->ethtool_ops
)
8347 dev
->ethtool_ops
= &default_ethtool_ops
;
8349 nf_hook_ingress_init(dev
);
8358 free_percpu(dev
->pcpu_refcnt
);
8360 netdev_freemem(dev
);
8363 EXPORT_SYMBOL(alloc_netdev_mqs
);
8366 * free_netdev - free network device
8369 * This function does the last stage of destroying an allocated device
8370 * interface. The reference to the device object is released. If this
8371 * is the last reference then it will be freed.Must be called in process
8374 void free_netdev(struct net_device
*dev
)
8376 struct napi_struct
*p
, *n
;
8379 netif_free_tx_queues(dev
);
8380 netif_free_rx_queues(dev
);
8382 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
8384 /* Flush device addresses */
8385 dev_addr_flush(dev
);
8387 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
8390 free_percpu(dev
->pcpu_refcnt
);
8391 dev
->pcpu_refcnt
= NULL
;
8393 /* Compatibility with error handling in drivers */
8394 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
8395 netdev_freemem(dev
);
8399 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
8400 dev
->reg_state
= NETREG_RELEASED
;
8402 /* will free via device release */
8403 put_device(&dev
->dev
);
8405 EXPORT_SYMBOL(free_netdev
);
8408 * synchronize_net - Synchronize with packet receive processing
8410 * Wait for packets currently being received to be done.
8411 * Does not block later packets from starting.
8413 void synchronize_net(void)
8416 if (rtnl_is_locked())
8417 synchronize_rcu_expedited();
8421 EXPORT_SYMBOL(synchronize_net
);
8424 * unregister_netdevice_queue - remove device from the kernel
8428 * This function shuts down a device interface and removes it
8429 * from the kernel tables.
8430 * If head not NULL, device is queued to be unregistered later.
8432 * Callers must hold the rtnl semaphore. You may want
8433 * unregister_netdev() instead of this.
8436 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
8441 list_move_tail(&dev
->unreg_list
, head
);
8443 rollback_registered(dev
);
8444 /* Finish processing unregister after unlock */
8448 EXPORT_SYMBOL(unregister_netdevice_queue
);
8451 * unregister_netdevice_many - unregister many devices
8452 * @head: list of devices
8454 * Note: As most callers use a stack allocated list_head,
8455 * we force a list_del() to make sure stack wont be corrupted later.
8457 void unregister_netdevice_many(struct list_head
*head
)
8459 struct net_device
*dev
;
8461 if (!list_empty(head
)) {
8462 rollback_registered_many(head
);
8463 list_for_each_entry(dev
, head
, unreg_list
)
8468 EXPORT_SYMBOL(unregister_netdevice_many
);
8471 * unregister_netdev - remove device from the kernel
8474 * This function shuts down a device interface and removes it
8475 * from the kernel tables.
8477 * This is just a wrapper for unregister_netdevice that takes
8478 * the rtnl semaphore. In general you want to use this and not
8479 * unregister_netdevice.
8481 void unregister_netdev(struct net_device
*dev
)
8484 unregister_netdevice(dev
);
8487 EXPORT_SYMBOL(unregister_netdev
);
8490 * dev_change_net_namespace - move device to different nethost namespace
8492 * @net: network namespace
8493 * @pat: If not NULL name pattern to try if the current device name
8494 * is already taken in the destination network namespace.
8496 * This function shuts down a device interface and moves it
8497 * to a new network namespace. On success 0 is returned, on
8498 * a failure a netagive errno code is returned.
8500 * Callers must hold the rtnl semaphore.
8503 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
8505 int err
, new_nsid
, new_ifindex
;
8509 /* Don't allow namespace local devices to be moved. */
8511 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
8514 /* Ensure the device has been registrered */
8515 if (dev
->reg_state
!= NETREG_REGISTERED
)
8518 /* Get out if there is nothing todo */
8520 if (net_eq(dev_net(dev
), net
))
8523 /* Pick the destination device name, and ensure
8524 * we can use it in the destination network namespace.
8527 if (__dev_get_by_name(net
, dev
->name
)) {
8528 /* We get here if we can't use the current device name */
8531 if (dev_get_valid_name(net
, dev
, pat
) < 0)
8536 * And now a mini version of register_netdevice unregister_netdevice.
8539 /* If device is running close it first. */
8542 /* And unlink it from device chain */
8544 unlist_netdevice(dev
);
8548 /* Shutdown queueing discipline. */
8551 /* Notify protocols, that we are about to destroy
8552 * this device. They should clean all the things.
8554 * Note that dev->reg_state stays at NETREG_REGISTERED.
8555 * This is wanted because this way 8021q and macvlan know
8556 * the device is just moving and can keep their slaves up.
8558 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
8560 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
8562 new_nsid
= peernet2id_alloc(dev_net(dev
), net
);
8563 /* If there is an ifindex conflict assign a new one */
8564 if (__dev_get_by_index(net
, dev
->ifindex
))
8565 new_ifindex
= dev_new_index(net
);
8567 new_ifindex
= dev
->ifindex
;
8569 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
8573 * Flush the unicast and multicast chains
8578 /* Send a netdev-removed uevent to the old namespace */
8579 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
8580 netdev_adjacent_del_links(dev
);
8582 /* Actually switch the network namespace */
8583 dev_net_set(dev
, net
);
8584 dev
->ifindex
= new_ifindex
;
8586 /* Send a netdev-add uevent to the new namespace */
8587 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
8588 netdev_adjacent_add_links(dev
);
8590 /* Fixup kobjects */
8591 err
= device_rename(&dev
->dev
, dev
->name
);
8594 /* Add the device back in the hashes */
8595 list_netdevice(dev
);
8597 /* Notify protocols, that a new device appeared. */
8598 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
8601 * Prevent userspace races by waiting until the network
8602 * device is fully setup before sending notifications.
8604 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
8611 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
8613 static int dev_cpu_dead(unsigned int oldcpu
)
8615 struct sk_buff
**list_skb
;
8616 struct sk_buff
*skb
;
8618 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
8620 local_irq_disable();
8621 cpu
= smp_processor_id();
8622 sd
= &per_cpu(softnet_data
, cpu
);
8623 oldsd
= &per_cpu(softnet_data
, oldcpu
);
8625 /* Find end of our completion_queue. */
8626 list_skb
= &sd
->completion_queue
;
8628 list_skb
= &(*list_skb
)->next
;
8629 /* Append completion queue from offline CPU. */
8630 *list_skb
= oldsd
->completion_queue
;
8631 oldsd
->completion_queue
= NULL
;
8633 /* Append output queue from offline CPU. */
8634 if (oldsd
->output_queue
) {
8635 *sd
->output_queue_tailp
= oldsd
->output_queue
;
8636 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
8637 oldsd
->output_queue
= NULL
;
8638 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
8640 /* Append NAPI poll list from offline CPU, with one exception :
8641 * process_backlog() must be called by cpu owning percpu backlog.
8642 * We properly handle process_queue & input_pkt_queue later.
8644 while (!list_empty(&oldsd
->poll_list
)) {
8645 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
8649 list_del_init(&napi
->poll_list
);
8650 if (napi
->poll
== process_backlog
)
8653 ____napi_schedule(sd
, napi
);
8656 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
8660 remsd
= oldsd
->rps_ipi_list
;
8661 oldsd
->rps_ipi_list
= NULL
;
8663 /* send out pending IPI's on offline CPU */
8664 net_rps_send_ipi(remsd
);
8666 /* Process offline CPU's input_pkt_queue */
8667 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
8669 input_queue_head_incr(oldsd
);
8671 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
8673 input_queue_head_incr(oldsd
);
8680 * netdev_increment_features - increment feature set by one
8681 * @all: current feature set
8682 * @one: new feature set
8683 * @mask: mask feature set
8685 * Computes a new feature set after adding a device with feature set
8686 * @one to the master device with current feature set @all. Will not
8687 * enable anything that is off in @mask. Returns the new feature set.
8689 netdev_features_t
netdev_increment_features(netdev_features_t all
,
8690 netdev_features_t one
, netdev_features_t mask
)
8692 if (mask
& NETIF_F_HW_CSUM
)
8693 mask
|= NETIF_F_CSUM_MASK
;
8694 mask
|= NETIF_F_VLAN_CHALLENGED
;
8696 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
8697 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
8699 /* If one device supports hw checksumming, set for all. */
8700 if (all
& NETIF_F_HW_CSUM
)
8701 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
8705 EXPORT_SYMBOL(netdev_increment_features
);
8707 static struct hlist_head
* __net_init
netdev_create_hash(void)
8710 struct hlist_head
*hash
;
8712 hash
= kmalloc(sizeof(*hash
) * NETDEV_HASHENTRIES
, GFP_KERNEL
);
8714 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
8715 INIT_HLIST_HEAD(&hash
[i
]);
8720 /* Initialize per network namespace state */
8721 static int __net_init
netdev_init(struct net
*net
)
8723 if (net
!= &init_net
)
8724 INIT_LIST_HEAD(&net
->dev_base_head
);
8726 net
->dev_name_head
= netdev_create_hash();
8727 if (net
->dev_name_head
== NULL
)
8730 net
->dev_index_head
= netdev_create_hash();
8731 if (net
->dev_index_head
== NULL
)
8737 kfree(net
->dev_name_head
);
8743 * netdev_drivername - network driver for the device
8744 * @dev: network device
8746 * Determine network driver for device.
8748 const char *netdev_drivername(const struct net_device
*dev
)
8750 const struct device_driver
*driver
;
8751 const struct device
*parent
;
8752 const char *empty
= "";
8754 parent
= dev
->dev
.parent
;
8758 driver
= parent
->driver
;
8759 if (driver
&& driver
->name
)
8760 return driver
->name
;
8764 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
8765 struct va_format
*vaf
)
8767 if (dev
&& dev
->dev
.parent
) {
8768 dev_printk_emit(level
[1] - '0',
8771 dev_driver_string(dev
->dev
.parent
),
8772 dev_name(dev
->dev
.parent
),
8773 netdev_name(dev
), netdev_reg_state(dev
),
8776 printk("%s%s%s: %pV",
8777 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
8779 printk("%s(NULL net_device): %pV", level
, vaf
);
8783 void netdev_printk(const char *level
, const struct net_device
*dev
,
8784 const char *format
, ...)
8786 struct va_format vaf
;
8789 va_start(args
, format
);
8794 __netdev_printk(level
, dev
, &vaf
);
8798 EXPORT_SYMBOL(netdev_printk
);
8800 #define define_netdev_printk_level(func, level) \
8801 void func(const struct net_device *dev, const char *fmt, ...) \
8803 struct va_format vaf; \
8806 va_start(args, fmt); \
8811 __netdev_printk(level, dev, &vaf); \
8815 EXPORT_SYMBOL(func);
8817 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
8818 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
8819 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
8820 define_netdev_printk_level(netdev_err
, KERN_ERR
);
8821 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
8822 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
8823 define_netdev_printk_level(netdev_info
, KERN_INFO
);
8825 static void __net_exit
netdev_exit(struct net
*net
)
8827 kfree(net
->dev_name_head
);
8828 kfree(net
->dev_index_head
);
8829 if (net
!= &init_net
)
8830 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
8833 static struct pernet_operations __net_initdata netdev_net_ops
= {
8834 .init
= netdev_init
,
8835 .exit
= netdev_exit
,
8838 static void __net_exit
default_device_exit(struct net
*net
)
8840 struct net_device
*dev
, *aux
;
8842 * Push all migratable network devices back to the
8843 * initial network namespace
8846 for_each_netdev_safe(net
, dev
, aux
) {
8848 char fb_name
[IFNAMSIZ
];
8850 /* Ignore unmoveable devices (i.e. loopback) */
8851 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
8854 /* Leave virtual devices for the generic cleanup */
8855 if (dev
->rtnl_link_ops
)
8858 /* Push remaining network devices to init_net */
8859 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
8860 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
8862 pr_emerg("%s: failed to move %s to init_net: %d\n",
8863 __func__
, dev
->name
, err
);
8870 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
8872 /* Return with the rtnl_lock held when there are no network
8873 * devices unregistering in any network namespace in net_list.
8877 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
8879 add_wait_queue(&netdev_unregistering_wq
, &wait
);
8881 unregistering
= false;
8883 list_for_each_entry(net
, net_list
, exit_list
) {
8884 if (net
->dev_unreg_count
> 0) {
8885 unregistering
= true;
8893 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
8895 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
8898 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
8900 /* At exit all network devices most be removed from a network
8901 * namespace. Do this in the reverse order of registration.
8902 * Do this across as many network namespaces as possible to
8903 * improve batching efficiency.
8905 struct net_device
*dev
;
8907 LIST_HEAD(dev_kill_list
);
8909 /* To prevent network device cleanup code from dereferencing
8910 * loopback devices or network devices that have been freed
8911 * wait here for all pending unregistrations to complete,
8912 * before unregistring the loopback device and allowing the
8913 * network namespace be freed.
8915 * The netdev todo list containing all network devices
8916 * unregistrations that happen in default_device_exit_batch
8917 * will run in the rtnl_unlock() at the end of
8918 * default_device_exit_batch.
8920 rtnl_lock_unregistering(net_list
);
8921 list_for_each_entry(net
, net_list
, exit_list
) {
8922 for_each_netdev_reverse(net
, dev
) {
8923 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
8924 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
8926 unregister_netdevice_queue(dev
, &dev_kill_list
);
8929 unregister_netdevice_many(&dev_kill_list
);
8933 static struct pernet_operations __net_initdata default_device_ops
= {
8934 .exit
= default_device_exit
,
8935 .exit_batch
= default_device_exit_batch
,
8939 * Initialize the DEV module. At boot time this walks the device list and
8940 * unhooks any devices that fail to initialise (normally hardware not
8941 * present) and leaves us with a valid list of present and active devices.
8946 * This is called single threaded during boot, so no need
8947 * to take the rtnl semaphore.
8949 static int __init
net_dev_init(void)
8951 int i
, rc
= -ENOMEM
;
8953 BUG_ON(!dev_boot_phase
);
8955 if (dev_proc_init())
8958 if (netdev_kobject_init())
8961 INIT_LIST_HEAD(&ptype_all
);
8962 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
8963 INIT_LIST_HEAD(&ptype_base
[i
]);
8965 INIT_LIST_HEAD(&offload_base
);
8967 if (register_pernet_subsys(&netdev_net_ops
))
8971 * Initialise the packet receive queues.
8974 for_each_possible_cpu(i
) {
8975 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
8976 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
8978 INIT_WORK(flush
, flush_backlog
);
8980 skb_queue_head_init(&sd
->input_pkt_queue
);
8981 skb_queue_head_init(&sd
->process_queue
);
8982 #ifdef CONFIG_XFRM_OFFLOAD
8983 skb_queue_head_init(&sd
->xfrm_backlog
);
8985 INIT_LIST_HEAD(&sd
->poll_list
);
8986 sd
->output_queue_tailp
= &sd
->output_queue
;
8988 sd
->csd
.func
= rps_trigger_softirq
;
8993 sd
->backlog
.poll
= process_backlog
;
8994 sd
->backlog
.weight
= weight_p
;
8999 /* The loopback device is special if any other network devices
9000 * is present in a network namespace the loopback device must
9001 * be present. Since we now dynamically allocate and free the
9002 * loopback device ensure this invariant is maintained by
9003 * keeping the loopback device as the first device on the
9004 * list of network devices. Ensuring the loopback devices
9005 * is the first device that appears and the last network device
9008 if (register_pernet_device(&loopback_net_ops
))
9011 if (register_pernet_device(&default_device_ops
))
9014 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
9015 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
9017 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
9018 NULL
, dev_cpu_dead
);
9025 subsys_initcall(net_dev_init
);