1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3 Protocol independent device support routines.
5 * Derived from the non IP parts of dev.c 1.0.19
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
47 * Rudi Cilibrasi : Pass the right thing to
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
71 #include <linux/uaccess.h>
72 #include <linux/bitmap.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/isolation.h>
81 #include <linux/sched/mm.h>
82 #include <linux/smpboot.h>
83 #include <linux/mutex.h>
84 #include <linux/rwsem.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/ethtool_netlink.h>
96 #include <linux/skbuff.h>
97 #include <linux/kthread.h>
98 #include <linux/bpf.h>
99 #include <linux/bpf_trace.h>
100 #include <net/net_namespace.h>
101 #include <net/sock.h>
102 #include <net/busy_poll.h>
103 #include <linux/rtnetlink.h>
104 #include <linux/stat.h>
107 #include <net/dst_metadata.h>
109 #include <net/netdev_queues.h>
110 #include <net/pkt_sched.h>
111 #include <net/pkt_cls.h>
112 #include <net/checksum.h>
113 #include <net/xfrm.h>
115 #include <linux/highmem.h>
116 #include <linux/init.h>
117 #include <linux/module.h>
118 #include <linux/netpoll.h>
119 #include <linux/rcupdate.h>
120 #include <linux/delay.h>
121 #include <net/iw_handler.h>
122 #include <asm/current.h>
123 #include <linux/audit.h>
124 #include <linux/dmaengine.h>
125 #include <linux/err.h>
126 #include <linux/ctype.h>
127 #include <linux/if_arp.h>
128 #include <linux/if_vlan.h>
129 #include <linux/ip.h>
131 #include <net/mpls.h>
132 #include <linux/ipv6.h>
133 #include <linux/in.h>
134 #include <linux/jhash.h>
135 #include <linux/random.h>
136 #include <trace/events/napi.h>
137 #include <trace/events/net.h>
138 #include <trace/events/skb.h>
139 #include <trace/events/qdisc.h>
140 #include <trace/events/xdp.h>
141 #include <linux/inetdevice.h>
142 #include <linux/cpu_rmap.h>
143 #include <linux/static_key.h>
144 #include <linux/hashtable.h>
145 #include <linux/vmalloc.h>
146 #include <linux/if_macvlan.h>
147 #include <linux/errqueue.h>
148 #include <linux/hrtimer.h>
149 #include <linux/netfilter_netdev.h>
150 #include <linux/crash_dump.h>
151 #include <linux/sctp.h>
152 #include <net/udp_tunnel.h>
153 #include <linux/net_namespace.h>
154 #include <linux/indirect_call_wrapper.h>
155 #include <net/devlink.h>
156 #include <linux/pm_runtime.h>
157 #include <linux/prandom.h>
158 #include <linux/once_lite.h>
159 #include <net/netdev_rx_queue.h>
160 #include <net/page_pool/types.h>
161 #include <net/page_pool/helpers.h>
163 #include <linux/phy_link_topology.h>
167 #include "net-sysfs.h"
169 static DEFINE_SPINLOCK(ptype_lock
);
170 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
172 static int netif_rx_internal(struct sk_buff
*skb
);
173 static int call_netdevice_notifiers_extack(unsigned long val
,
174 struct net_device
*dev
,
175 struct netlink_ext_ack
*extack
);
177 static DEFINE_MUTEX(ifalias_mutex
);
179 /* protects napi_hash addition/deletion and napi_gen_id */
180 static DEFINE_SPINLOCK(napi_hash_lock
);
182 static unsigned int napi_gen_id
= NR_CPUS
;
183 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
185 static inline void dev_base_seq_inc(struct net
*net
)
187 unsigned int val
= net
->dev_base_seq
+ 1;
189 WRITE_ONCE(net
->dev_base_seq
, val
?: 1);
192 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
194 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
196 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
199 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
201 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
204 #ifndef CONFIG_PREEMPT_RT
206 static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key
);
208 static int __init
setup_backlog_napi_threads(char *arg
)
210 static_branch_enable(&use_backlog_threads_key
);
213 early_param("thread_backlog_napi", setup_backlog_napi_threads
);
215 static bool use_backlog_threads(void)
217 return static_branch_unlikely(&use_backlog_threads_key
);
222 static bool use_backlog_threads(void)
229 static inline void backlog_lock_irq_save(struct softnet_data
*sd
,
230 unsigned long *flags
)
232 if (IS_ENABLED(CONFIG_RPS
) || use_backlog_threads())
233 spin_lock_irqsave(&sd
->input_pkt_queue
.lock
, *flags
);
235 local_irq_save(*flags
);
238 static inline void backlog_lock_irq_disable(struct softnet_data
*sd
)
240 if (IS_ENABLED(CONFIG_RPS
) || use_backlog_threads())
241 spin_lock_irq(&sd
->input_pkt_queue
.lock
);
246 static inline void backlog_unlock_irq_restore(struct softnet_data
*sd
,
247 unsigned long *flags
)
249 if (IS_ENABLED(CONFIG_RPS
) || use_backlog_threads())
250 spin_unlock_irqrestore(&sd
->input_pkt_queue
.lock
, *flags
);
252 local_irq_restore(*flags
);
255 static inline void backlog_unlock_irq_enable(struct softnet_data
*sd
)
257 if (IS_ENABLED(CONFIG_RPS
) || use_backlog_threads())
258 spin_unlock_irq(&sd
->input_pkt_queue
.lock
);
263 static struct netdev_name_node
*netdev_name_node_alloc(struct net_device
*dev
,
266 struct netdev_name_node
*name_node
;
268 name_node
= kmalloc(sizeof(*name_node
), GFP_KERNEL
);
271 INIT_HLIST_NODE(&name_node
->hlist
);
272 name_node
->dev
= dev
;
273 name_node
->name
= name
;
277 static struct netdev_name_node
*
278 netdev_name_node_head_alloc(struct net_device
*dev
)
280 struct netdev_name_node
*name_node
;
282 name_node
= netdev_name_node_alloc(dev
, dev
->name
);
285 INIT_LIST_HEAD(&name_node
->list
);
289 static void netdev_name_node_free(struct netdev_name_node
*name_node
)
294 static void netdev_name_node_add(struct net
*net
,
295 struct netdev_name_node
*name_node
)
297 hlist_add_head_rcu(&name_node
->hlist
,
298 dev_name_hash(net
, name_node
->name
));
301 static void netdev_name_node_del(struct netdev_name_node
*name_node
)
303 hlist_del_rcu(&name_node
->hlist
);
306 static struct netdev_name_node
*netdev_name_node_lookup(struct net
*net
,
309 struct hlist_head
*head
= dev_name_hash(net
, name
);
310 struct netdev_name_node
*name_node
;
312 hlist_for_each_entry(name_node
, head
, hlist
)
313 if (!strcmp(name_node
->name
, name
))
318 static struct netdev_name_node
*netdev_name_node_lookup_rcu(struct net
*net
,
321 struct hlist_head
*head
= dev_name_hash(net
, name
);
322 struct netdev_name_node
*name_node
;
324 hlist_for_each_entry_rcu(name_node
, head
, hlist
)
325 if (!strcmp(name_node
->name
, name
))
330 bool netdev_name_in_use(struct net
*net
, const char *name
)
332 return netdev_name_node_lookup(net
, name
);
334 EXPORT_SYMBOL(netdev_name_in_use
);
336 int netdev_name_node_alt_create(struct net_device
*dev
, const char *name
)
338 struct netdev_name_node
*name_node
;
339 struct net
*net
= dev_net(dev
);
341 name_node
= netdev_name_node_lookup(net
, name
);
344 name_node
= netdev_name_node_alloc(dev
, name
);
347 netdev_name_node_add(net
, name_node
);
348 /* The node that holds dev->name acts as a head of per-device list. */
349 list_add_tail_rcu(&name_node
->list
, &dev
->name_node
->list
);
354 static void netdev_name_node_alt_free(struct rcu_head
*head
)
356 struct netdev_name_node
*name_node
=
357 container_of(head
, struct netdev_name_node
, rcu
);
359 kfree(name_node
->name
);
360 netdev_name_node_free(name_node
);
363 static void __netdev_name_node_alt_destroy(struct netdev_name_node
*name_node
)
365 netdev_name_node_del(name_node
);
366 list_del(&name_node
->list
);
367 call_rcu(&name_node
->rcu
, netdev_name_node_alt_free
);
370 int netdev_name_node_alt_destroy(struct net_device
*dev
, const char *name
)
372 struct netdev_name_node
*name_node
;
373 struct net
*net
= dev_net(dev
);
375 name_node
= netdev_name_node_lookup(net
, name
);
378 /* lookup might have found our primary name or a name belonging
381 if (name_node
== dev
->name_node
|| name_node
->dev
!= dev
)
384 __netdev_name_node_alt_destroy(name_node
);
388 static void netdev_name_node_alt_flush(struct net_device
*dev
)
390 struct netdev_name_node
*name_node
, *tmp
;
392 list_for_each_entry_safe(name_node
, tmp
, &dev
->name_node
->list
, list
) {
393 list_del(&name_node
->list
);
394 netdev_name_node_alt_free(&name_node
->rcu
);
398 /* Device list insertion */
399 static void list_netdevice(struct net_device
*dev
)
401 struct netdev_name_node
*name_node
;
402 struct net
*net
= dev_net(dev
);
406 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
407 netdev_name_node_add(net
, dev
->name_node
);
408 hlist_add_head_rcu(&dev
->index_hlist
,
409 dev_index_hash(net
, dev
->ifindex
));
411 netdev_for_each_altname(dev
, name_node
)
412 netdev_name_node_add(net
, name_node
);
414 /* We reserved the ifindex, this can't fail */
415 WARN_ON(xa_store(&net
->dev_by_index
, dev
->ifindex
, dev
, GFP_KERNEL
));
417 dev_base_seq_inc(net
);
420 /* Device list removal
421 * caller must respect a RCU grace period before freeing/reusing dev
423 static void unlist_netdevice(struct net_device
*dev
)
425 struct netdev_name_node
*name_node
;
426 struct net
*net
= dev_net(dev
);
430 xa_erase(&net
->dev_by_index
, dev
->ifindex
);
432 netdev_for_each_altname(dev
, name_node
)
433 netdev_name_node_del(name_node
);
435 /* Unlink dev from the device chain */
436 list_del_rcu(&dev
->dev_list
);
437 netdev_name_node_del(dev
->name_node
);
438 hlist_del_rcu(&dev
->index_hlist
);
440 dev_base_seq_inc(dev_net(dev
));
447 static RAW_NOTIFIER_HEAD(netdev_chain
);
450 * Device drivers call our routines to queue packets here. We empty the
451 * queue in the local softnet handler.
454 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
) = {
455 .process_queue_bh_lock
= INIT_LOCAL_LOCK(process_queue_bh_lock
),
457 EXPORT_PER_CPU_SYMBOL(softnet_data
);
459 /* Page_pool has a lockless array/stack to alloc/recycle pages.
460 * PP consumers must pay attention to run APIs in the appropriate context
461 * (e.g. NAPI context).
463 DEFINE_PER_CPU(struct page_pool
*, system_page_pool
);
465 #ifdef CONFIG_LOCKDEP
467 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
468 * according to dev->type
470 static const unsigned short netdev_lock_type
[] = {
471 ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
472 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
473 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
474 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
475 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
476 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
477 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
478 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
479 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
480 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
481 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
482 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
483 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
484 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
485 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
487 static const char *const netdev_lock_name
[] = {
488 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
489 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
490 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
491 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
492 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
493 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
494 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
495 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
496 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
497 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
498 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
499 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
500 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
501 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
502 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
504 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
505 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
507 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
511 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
512 if (netdev_lock_type
[i
] == dev_type
)
514 /* the last key is used by default */
515 return ARRAY_SIZE(netdev_lock_type
) - 1;
518 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
519 unsigned short dev_type
)
523 i
= netdev_lock_pos(dev_type
);
524 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
525 netdev_lock_name
[i
]);
528 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
532 i
= netdev_lock_pos(dev
->type
);
533 lockdep_set_class_and_name(&dev
->addr_list_lock
,
534 &netdev_addr_lock_key
[i
],
535 netdev_lock_name
[i
]);
538 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
539 unsigned short dev_type
)
543 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
548 /*******************************************************************************
550 * Protocol management and registration routines
552 *******************************************************************************/
556 * Add a protocol ID to the list. Now that the input handler is
557 * smarter we can dispense with all the messy stuff that used to be
560 * BEWARE!!! Protocol handlers, mangling input packets,
561 * MUST BE last in hash buckets and checking protocol handlers
562 * MUST start from promiscuous ptype_all chain in net_bh.
563 * It is true now, do not change it.
564 * Explanation follows: if protocol handler, mangling packet, will
565 * be the first on list, it is not able to sense, that packet
566 * is cloned and should be copied-on-write, so that it will
567 * change it and subsequent readers will get broken packet.
571 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
573 if (pt
->type
== htons(ETH_P_ALL
))
574 return pt
->dev
? &pt
->dev
->ptype_all
: &net_hotdata
.ptype_all
;
576 return pt
->dev
? &pt
->dev
->ptype_specific
:
577 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
581 * dev_add_pack - add packet handler
582 * @pt: packet type declaration
584 * Add a protocol handler to the networking stack. The passed &packet_type
585 * is linked into kernel lists and may not be freed until it has been
586 * removed from the kernel lists.
588 * This call does not sleep therefore it can not
589 * guarantee all CPU's that are in middle of receiving packets
590 * will see the new packet type (until the next received packet).
593 void dev_add_pack(struct packet_type
*pt
)
595 struct list_head
*head
= ptype_head(pt
);
597 spin_lock(&ptype_lock
);
598 list_add_rcu(&pt
->list
, head
);
599 spin_unlock(&ptype_lock
);
601 EXPORT_SYMBOL(dev_add_pack
);
604 * __dev_remove_pack - remove packet handler
605 * @pt: packet type declaration
607 * Remove a protocol handler that was previously added to the kernel
608 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
609 * from the kernel lists and can be freed or reused once this function
612 * The packet type might still be in use by receivers
613 * and must not be freed until after all the CPU's have gone
614 * through a quiescent state.
616 void __dev_remove_pack(struct packet_type
*pt
)
618 struct list_head
*head
= ptype_head(pt
);
619 struct packet_type
*pt1
;
621 spin_lock(&ptype_lock
);
623 list_for_each_entry(pt1
, head
, list
) {
625 list_del_rcu(&pt
->list
);
630 pr_warn("dev_remove_pack: %p not found\n", pt
);
632 spin_unlock(&ptype_lock
);
634 EXPORT_SYMBOL(__dev_remove_pack
);
637 * dev_remove_pack - remove packet handler
638 * @pt: packet type declaration
640 * Remove a protocol handler that was previously added to the kernel
641 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
642 * from the kernel lists and can be freed or reused once this function
645 * This call sleeps to guarantee that no CPU is looking at the packet
648 void dev_remove_pack(struct packet_type
*pt
)
650 __dev_remove_pack(pt
);
654 EXPORT_SYMBOL(dev_remove_pack
);
657 /*******************************************************************************
659 * Device Interface Subroutines
661 *******************************************************************************/
664 * dev_get_iflink - get 'iflink' value of a interface
665 * @dev: targeted interface
667 * Indicates the ifindex the interface is linked to.
668 * Physical interfaces have the same 'ifindex' and 'iflink' values.
671 int dev_get_iflink(const struct net_device
*dev
)
673 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
674 return dev
->netdev_ops
->ndo_get_iflink(dev
);
676 return READ_ONCE(dev
->ifindex
);
678 EXPORT_SYMBOL(dev_get_iflink
);
681 * dev_fill_metadata_dst - Retrieve tunnel egress information.
682 * @dev: targeted interface
685 * For better visibility of tunnel traffic OVS needs to retrieve
686 * egress tunnel information for a packet. Following API allows
687 * user to get this info.
689 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
691 struct ip_tunnel_info
*info
;
693 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
696 info
= skb_tunnel_info_unclone(skb
);
699 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
702 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
704 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
706 static struct net_device_path
*dev_fwd_path(struct net_device_path_stack
*stack
)
708 int k
= stack
->num_paths
++;
710 if (WARN_ON_ONCE(k
>= NET_DEVICE_PATH_STACK_MAX
))
713 return &stack
->path
[k
];
716 int dev_fill_forward_path(const struct net_device
*dev
, const u8
*daddr
,
717 struct net_device_path_stack
*stack
)
719 const struct net_device
*last_dev
;
720 struct net_device_path_ctx ctx
= {
723 struct net_device_path
*path
;
726 memcpy(ctx
.daddr
, daddr
, sizeof(ctx
.daddr
));
727 stack
->num_paths
= 0;
728 while (ctx
.dev
&& ctx
.dev
->netdev_ops
->ndo_fill_forward_path
) {
730 path
= dev_fwd_path(stack
);
734 memset(path
, 0, sizeof(struct net_device_path
));
735 ret
= ctx
.dev
->netdev_ops
->ndo_fill_forward_path(&ctx
, path
);
739 if (WARN_ON_ONCE(last_dev
== ctx
.dev
))
746 path
= dev_fwd_path(stack
);
749 path
->type
= DEV_PATH_ETHERNET
;
754 EXPORT_SYMBOL_GPL(dev_fill_forward_path
);
756 /* must be called under rcu_read_lock(), as we dont take a reference */
757 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
759 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
760 struct napi_struct
*napi
;
762 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
763 if (napi
->napi_id
== napi_id
)
769 /* must be called under rcu_read_lock(), as we dont take a reference */
770 static struct napi_struct
*
771 netdev_napi_by_id(struct net
*net
, unsigned int napi_id
)
773 struct napi_struct
*napi
;
775 napi
= napi_by_id(napi_id
);
779 if (WARN_ON_ONCE(!napi
->dev
))
781 if (!net_eq(net
, dev_net(napi
->dev
)))
788 * netdev_napi_by_id_lock() - find a device by NAPI ID and lock it
789 * @net: the applicable net namespace
790 * @napi_id: ID of a NAPI of a target device
792 * Find a NAPI instance with @napi_id. Lock its device.
793 * The device must be in %NETREG_REGISTERED state for lookup to succeed.
794 * netdev_unlock() must be called to release it.
796 * Return: pointer to NAPI, its device with lock held, NULL if not found.
799 netdev_napi_by_id_lock(struct net
*net
, unsigned int napi_id
)
801 struct napi_struct
*napi
;
802 struct net_device
*dev
;
805 napi
= netdev_napi_by_id(net
, napi_id
);
806 if (!napi
|| READ_ONCE(napi
->dev
->reg_state
) != NETREG_REGISTERED
) {
815 dev
= __netdev_put_lock(dev
);
820 napi
= netdev_napi_by_id(net
, napi_id
);
821 if (napi
&& napi
->dev
!= dev
)
831 * __dev_get_by_name - find a device by its name
832 * @net: the applicable net namespace
833 * @name: name to find
835 * Find an interface by name. Must be called under RTNL semaphore.
836 * If the name is found a pointer to the device is returned.
837 * If the name is not found then %NULL is returned. The
838 * reference counters are not incremented so the caller must be
839 * careful with locks.
842 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
844 struct netdev_name_node
*node_name
;
846 node_name
= netdev_name_node_lookup(net
, name
);
847 return node_name
? node_name
->dev
: NULL
;
849 EXPORT_SYMBOL(__dev_get_by_name
);
852 * dev_get_by_name_rcu - find a device by its name
853 * @net: the applicable net namespace
854 * @name: name to find
856 * Find an interface by name.
857 * If the name is found a pointer to the device is returned.
858 * If the name is not found then %NULL is returned.
859 * The reference counters are not incremented so the caller must be
860 * careful with locks. The caller must hold RCU lock.
863 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
865 struct netdev_name_node
*node_name
;
867 node_name
= netdev_name_node_lookup_rcu(net
, name
);
868 return node_name
? node_name
->dev
: NULL
;
870 EXPORT_SYMBOL(dev_get_by_name_rcu
);
872 /* Deprecated for new users, call netdev_get_by_name() instead */
873 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
875 struct net_device
*dev
;
878 dev
= dev_get_by_name_rcu(net
, name
);
883 EXPORT_SYMBOL(dev_get_by_name
);
886 * netdev_get_by_name() - find a device by its name
887 * @net: the applicable net namespace
888 * @name: name to find
889 * @tracker: tracking object for the acquired reference
890 * @gfp: allocation flags for the tracker
892 * Find an interface by name. This can be called from any
893 * context and does its own locking. The returned handle has
894 * the usage count incremented and the caller must use netdev_put() to
895 * release it when it is no longer needed. %NULL is returned if no
896 * matching device is found.
898 struct net_device
*netdev_get_by_name(struct net
*net
, const char *name
,
899 netdevice_tracker
*tracker
, gfp_t gfp
)
901 struct net_device
*dev
;
903 dev
= dev_get_by_name(net
, name
);
905 netdev_tracker_alloc(dev
, tracker
, gfp
);
908 EXPORT_SYMBOL(netdev_get_by_name
);
911 * __dev_get_by_index - find a device by its ifindex
912 * @net: the applicable net namespace
913 * @ifindex: index of device
915 * Search for an interface by index. Returns %NULL if the device
916 * is not found or a pointer to the device. The device has not
917 * had its reference counter increased so the caller must be careful
918 * about locking. The caller must hold the RTNL semaphore.
921 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
923 struct net_device
*dev
;
924 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
926 hlist_for_each_entry(dev
, head
, index_hlist
)
927 if (dev
->ifindex
== ifindex
)
932 EXPORT_SYMBOL(__dev_get_by_index
);
935 * dev_get_by_index_rcu - find a device by its ifindex
936 * @net: the applicable net namespace
937 * @ifindex: index of device
939 * Search for an interface by index. Returns %NULL if the device
940 * is not found or a pointer to the device. The device has not
941 * had its reference counter increased so the caller must be careful
942 * about locking. The caller must hold RCU lock.
945 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
947 struct net_device
*dev
;
948 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
950 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
951 if (dev
->ifindex
== ifindex
)
956 EXPORT_SYMBOL(dev_get_by_index_rcu
);
958 /* Deprecated for new users, call netdev_get_by_index() instead */
959 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
961 struct net_device
*dev
;
964 dev
= dev_get_by_index_rcu(net
, ifindex
);
969 EXPORT_SYMBOL(dev_get_by_index
);
972 * netdev_get_by_index() - find a device by its ifindex
973 * @net: the applicable net namespace
974 * @ifindex: index of device
975 * @tracker: tracking object for the acquired reference
976 * @gfp: allocation flags for the tracker
978 * Search for an interface by index. Returns NULL if the device
979 * is not found or a pointer to the device. The device returned has
980 * had a reference added and the pointer is safe until the user calls
981 * netdev_put() to indicate they have finished with it.
983 struct net_device
*netdev_get_by_index(struct net
*net
, int ifindex
,
984 netdevice_tracker
*tracker
, gfp_t gfp
)
986 struct net_device
*dev
;
988 dev
= dev_get_by_index(net
, ifindex
);
990 netdev_tracker_alloc(dev
, tracker
, gfp
);
993 EXPORT_SYMBOL(netdev_get_by_index
);
996 * dev_get_by_napi_id - find a device by napi_id
997 * @napi_id: ID of the NAPI struct
999 * Search for an interface by NAPI ID. Returns %NULL if the device
1000 * is not found or a pointer to the device. The device has not had
1001 * its reference counter increased so the caller must be careful
1002 * about locking. The caller must hold RCU lock.
1004 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
1006 struct napi_struct
*napi
;
1008 WARN_ON_ONCE(!rcu_read_lock_held());
1010 if (napi_id
< MIN_NAPI_ID
)
1013 napi
= napi_by_id(napi_id
);
1015 return napi
? napi
->dev
: NULL
;
1018 /* Release the held reference on the net_device, and if the net_device
1019 * is still registered try to lock the instance lock. If device is being
1020 * unregistered NULL will be returned (but the reference has been released,
1023 * This helper is intended for locking net_device after it has been looked up
1024 * using a lockless lookup helper. Lock prevents the instance from going away.
1026 struct net_device
*__netdev_put_lock(struct net_device
*dev
)
1029 if (dev
->reg_state
> NETREG_REGISTERED
) {
1039 * netdev_get_by_index_lock() - find a device by its ifindex
1040 * @net: the applicable net namespace
1041 * @ifindex: index of device
1043 * Search for an interface by index. If a valid device
1044 * with @ifindex is found it will be returned with netdev->lock held.
1045 * netdev_unlock() must be called to release it.
1047 * Return: pointer to a device with lock held, NULL if not found.
1049 struct net_device
*netdev_get_by_index_lock(struct net
*net
, int ifindex
)
1051 struct net_device
*dev
;
1053 dev
= dev_get_by_index(net
, ifindex
);
1057 return __netdev_put_lock(dev
);
1061 netdev_xa_find_lock(struct net
*net
, struct net_device
*dev
,
1062 unsigned long *index
)
1069 dev
= xa_find(&net
->dev_by_index
, index
, ULONG_MAX
, XA_PRESENT
);
1077 dev
= __netdev_put_lock(dev
);
1085 static DEFINE_SEQLOCK(netdev_rename_lock
);
1087 void netdev_copy_name(struct net_device
*dev
, char *name
)
1092 seq
= read_seqbegin(&netdev_rename_lock
);
1093 strscpy(name
, dev
->name
, IFNAMSIZ
);
1094 } while (read_seqretry(&netdev_rename_lock
, seq
));
1098 * netdev_get_name - get a netdevice name, knowing its ifindex.
1099 * @net: network namespace
1100 * @name: a pointer to the buffer where the name will be stored.
1101 * @ifindex: the ifindex of the interface to get the name from.
1103 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
1105 struct net_device
*dev
;
1110 dev
= dev_get_by_index_rcu(net
, ifindex
);
1116 netdev_copy_name(dev
, name
);
1125 * dev_getbyhwaddr_rcu - find a device by its hardware address
1126 * @net: the applicable net namespace
1127 * @type: media type of device
1128 * @ha: hardware address
1130 * Search for an interface by MAC address. Returns NULL if the device
1131 * is not found or a pointer to the device.
1132 * The caller must hold RCU or RTNL.
1133 * The returned device has not had its ref count increased
1134 * and the caller must therefore be careful about locking
1138 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
1141 struct net_device
*dev
;
1143 for_each_netdev_rcu(net
, dev
)
1144 if (dev
->type
== type
&&
1145 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
1150 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
1152 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
1154 struct net_device
*dev
, *ret
= NULL
;
1157 for_each_netdev_rcu(net
, dev
)
1158 if (dev
->type
== type
) {
1166 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
1169 * __dev_get_by_flags - find any device with given flags
1170 * @net: the applicable net namespace
1171 * @if_flags: IFF_* values
1172 * @mask: bitmask of bits in if_flags to check
1174 * Search for any interface with the given flags. Returns NULL if a device
1175 * is not found or a pointer to the device. Must be called inside
1176 * rtnl_lock(), and result refcount is unchanged.
1179 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
1180 unsigned short mask
)
1182 struct net_device
*dev
, *ret
;
1187 for_each_netdev(net
, dev
) {
1188 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
1195 EXPORT_SYMBOL(__dev_get_by_flags
);
1198 * dev_valid_name - check if name is okay for network device
1199 * @name: name string
1201 * Network device names need to be valid file names to
1202 * allow sysfs to work. We also disallow any kind of
1205 bool dev_valid_name(const char *name
)
1209 if (strnlen(name
, IFNAMSIZ
) == IFNAMSIZ
)
1211 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
1215 if (*name
== '/' || *name
== ':' || isspace(*name
))
1221 EXPORT_SYMBOL(dev_valid_name
);
1224 * __dev_alloc_name - allocate a name for a device
1225 * @net: network namespace to allocate the device name in
1226 * @name: name format string
1227 * @res: result name string
1229 * Passed a format string - eg "lt%d" it will try and find a suitable
1230 * id. It scans list of devices to build up a free map, then chooses
1231 * the first empty slot. The caller must hold the dev_base or rtnl lock
1232 * while allocating the name and adding the device in order to avoid
1234 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1235 * Returns the number of the unit assigned or a negative errno code.
1238 static int __dev_alloc_name(struct net
*net
, const char *name
, char *res
)
1242 const int max_netdevices
= 8*PAGE_SIZE
;
1243 unsigned long *inuse
;
1244 struct net_device
*d
;
1247 /* Verify the string as this thing may have come from the user.
1248 * There must be one "%d" and no other "%" characters.
1250 p
= strchr(name
, '%');
1251 if (!p
|| p
[1] != 'd' || strchr(p
+ 2, '%'))
1254 /* Use one page as a bit array of possible slots */
1255 inuse
= bitmap_zalloc(max_netdevices
, GFP_ATOMIC
);
1259 for_each_netdev(net
, d
) {
1260 struct netdev_name_node
*name_node
;
1262 netdev_for_each_altname(d
, name_node
) {
1263 if (!sscanf(name_node
->name
, name
, &i
))
1265 if (i
< 0 || i
>= max_netdevices
)
1268 /* avoid cases where sscanf is not exact inverse of printf */
1269 snprintf(buf
, IFNAMSIZ
, name
, i
);
1270 if (!strncmp(buf
, name_node
->name
, IFNAMSIZ
))
1271 __set_bit(i
, inuse
);
1273 if (!sscanf(d
->name
, name
, &i
))
1275 if (i
< 0 || i
>= max_netdevices
)
1278 /* avoid cases where sscanf is not exact inverse of printf */
1279 snprintf(buf
, IFNAMSIZ
, name
, i
);
1280 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1281 __set_bit(i
, inuse
);
1284 i
= find_first_zero_bit(inuse
, max_netdevices
);
1286 if (i
== max_netdevices
)
1289 /* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */
1290 strscpy(buf
, name
, IFNAMSIZ
);
1291 snprintf(res
, IFNAMSIZ
, buf
, i
);
1295 /* Returns negative errno or allocated unit id (see __dev_alloc_name()) */
1296 static int dev_prep_valid_name(struct net
*net
, struct net_device
*dev
,
1297 const char *want_name
, char *out_name
,
1300 if (!dev_valid_name(want_name
))
1303 if (strchr(want_name
, '%'))
1304 return __dev_alloc_name(net
, want_name
, out_name
);
1306 if (netdev_name_in_use(net
, want_name
))
1308 if (out_name
!= want_name
)
1309 strscpy(out_name
, want_name
, IFNAMSIZ
);
1314 * dev_alloc_name - allocate a name for a device
1316 * @name: name format string
1318 * Passed a format string - eg "lt%d" it will try and find a suitable
1319 * id. It scans list of devices to build up a free map, then chooses
1320 * the first empty slot. The caller must hold the dev_base or rtnl lock
1321 * while allocating the name and adding the device in order to avoid
1323 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1324 * Returns the number of the unit assigned or a negative errno code.
1327 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1329 return dev_prep_valid_name(dev_net(dev
), dev
, name
, dev
->name
, ENFILE
);
1331 EXPORT_SYMBOL(dev_alloc_name
);
1333 static int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1338 ret
= dev_prep_valid_name(net
, dev
, name
, dev
->name
, EEXIST
);
1339 return ret
< 0 ? ret
: 0;
1343 * dev_change_name - change name of a device
1345 * @newname: name (or format string) must be at least IFNAMSIZ
1347 * Change name of a device, can pass format strings "eth%d".
1350 int dev_change_name(struct net_device
*dev
, const char *newname
)
1352 struct net
*net
= dev_net(dev
);
1353 unsigned char old_assign_type
;
1354 char oldname
[IFNAMSIZ
];
1358 ASSERT_RTNL_NET(net
);
1360 if (!strncmp(newname
, dev
->name
, IFNAMSIZ
))
1363 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1365 write_seqlock_bh(&netdev_rename_lock
);
1366 err
= dev_get_valid_name(net
, dev
, newname
);
1367 write_sequnlock_bh(&netdev_rename_lock
);
1372 if (oldname
[0] && !strchr(oldname
, '%'))
1373 netdev_info(dev
, "renamed from %s%s\n", oldname
,
1374 dev
->flags
& IFF_UP
? " (while UP)" : "");
1376 old_assign_type
= dev
->name_assign_type
;
1377 WRITE_ONCE(dev
->name_assign_type
, NET_NAME_RENAMED
);
1380 ret
= device_rename(&dev
->dev
, dev
->name
);
1382 write_seqlock_bh(&netdev_rename_lock
);
1383 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1384 write_sequnlock_bh(&netdev_rename_lock
);
1385 WRITE_ONCE(dev
->name_assign_type
, old_assign_type
);
1389 netdev_adjacent_rename_links(dev
, oldname
);
1391 netdev_name_node_del(dev
->name_node
);
1395 netdev_name_node_add(net
, dev
->name_node
);
1397 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1398 ret
= notifier_to_errno(ret
);
1401 /* err >= 0 after dev_alloc_name() or stores the first errno */
1404 write_seqlock_bh(&netdev_rename_lock
);
1405 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1406 write_sequnlock_bh(&netdev_rename_lock
);
1407 memcpy(oldname
, newname
, IFNAMSIZ
);
1408 WRITE_ONCE(dev
->name_assign_type
, old_assign_type
);
1409 old_assign_type
= NET_NAME_RENAMED
;
1412 netdev_err(dev
, "name change rollback failed: %d\n",
1421 * dev_set_alias - change ifalias of a device
1423 * @alias: name up to IFALIASZ
1424 * @len: limit of bytes to copy from info
1426 * Set ifalias for a device,
1428 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1430 struct dev_ifalias
*new_alias
= NULL
;
1432 if (len
>= IFALIASZ
)
1436 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1440 memcpy(new_alias
->ifalias
, alias
, len
);
1441 new_alias
->ifalias
[len
] = 0;
1444 mutex_lock(&ifalias_mutex
);
1445 new_alias
= rcu_replace_pointer(dev
->ifalias
, new_alias
,
1446 mutex_is_locked(&ifalias_mutex
));
1447 mutex_unlock(&ifalias_mutex
);
1450 kfree_rcu(new_alias
, rcuhead
);
1454 EXPORT_SYMBOL(dev_set_alias
);
1457 * dev_get_alias - get ifalias of a device
1459 * @name: buffer to store name of ifalias
1460 * @len: size of buffer
1462 * get ifalias for a device. Caller must make sure dev cannot go
1463 * away, e.g. rcu read lock or own a reference count to device.
1465 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1467 const struct dev_ifalias
*alias
;
1471 alias
= rcu_dereference(dev
->ifalias
);
1473 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1480 * netdev_features_change - device changes features
1481 * @dev: device to cause notification
1483 * Called to indicate a device has changed features.
1485 void netdev_features_change(struct net_device
*dev
)
1487 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1489 EXPORT_SYMBOL(netdev_features_change
);
1492 * netdev_state_change - device changes state
1493 * @dev: device to cause notification
1495 * Called to indicate a device has changed state. This function calls
1496 * the notifier chains for netdev_chain and sends a NEWLINK message
1497 * to the routing socket.
1499 void netdev_state_change(struct net_device
*dev
)
1501 if (dev
->flags
& IFF_UP
) {
1502 struct netdev_notifier_change_info change_info
= {
1506 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1508 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
, 0, NULL
);
1511 EXPORT_SYMBOL(netdev_state_change
);
1514 * __netdev_notify_peers - notify network peers about existence of @dev,
1515 * to be called when rtnl lock is already held.
1516 * @dev: network device
1518 * Generate traffic such that interested network peers are aware of
1519 * @dev, such as by generating a gratuitous ARP. This may be used when
1520 * a device wants to inform the rest of the network about some sort of
1521 * reconfiguration such as a failover event or virtual machine
1524 void __netdev_notify_peers(struct net_device
*dev
)
1527 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1528 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1530 EXPORT_SYMBOL(__netdev_notify_peers
);
1533 * netdev_notify_peers - notify network peers about existence of @dev
1534 * @dev: network device
1536 * Generate traffic such that interested network peers are aware of
1537 * @dev, such as by generating a gratuitous ARP. This may be used when
1538 * a device wants to inform the rest of the network about some sort of
1539 * reconfiguration such as a failover event or virtual machine
1542 void netdev_notify_peers(struct net_device
*dev
)
1545 __netdev_notify_peers(dev
);
1548 EXPORT_SYMBOL(netdev_notify_peers
);
1550 static int napi_threaded_poll(void *data
);
1552 static int napi_kthread_create(struct napi_struct
*n
)
1556 /* Create and wake up the kthread once to put it in
1557 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1558 * warning and work with loadavg.
1560 n
->thread
= kthread_run(napi_threaded_poll
, n
, "napi/%s-%d",
1561 n
->dev
->name
, n
->napi_id
);
1562 if (IS_ERR(n
->thread
)) {
1563 err
= PTR_ERR(n
->thread
);
1564 pr_err("kthread_run failed with err %d\n", err
);
1571 static int __dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1573 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1577 dev_addr_check(dev
);
1579 if (!netif_device_present(dev
)) {
1580 /* may be detached because parent is runtime-suspended */
1581 if (dev
->dev
.parent
)
1582 pm_runtime_resume(dev
->dev
.parent
);
1583 if (!netif_device_present(dev
))
1587 /* Block netpoll from trying to do any rx path servicing.
1588 * If we don't do this there is a chance ndo_poll_controller
1589 * or ndo_poll may be running while we open the device
1591 netpoll_poll_disable(dev
);
1593 ret
= call_netdevice_notifiers_extack(NETDEV_PRE_UP
, dev
, extack
);
1594 ret
= notifier_to_errno(ret
);
1598 set_bit(__LINK_STATE_START
, &dev
->state
);
1600 if (ops
->ndo_validate_addr
)
1601 ret
= ops
->ndo_validate_addr(dev
);
1603 if (!ret
&& ops
->ndo_open
)
1604 ret
= ops
->ndo_open(dev
);
1606 netpoll_poll_enable(dev
);
1609 clear_bit(__LINK_STATE_START
, &dev
->state
);
1611 netif_set_up(dev
, true);
1612 dev_set_rx_mode(dev
);
1614 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1621 * dev_open - prepare an interface for use.
1622 * @dev: device to open
1623 * @extack: netlink extended ack
1625 * Takes a device from down to up state. The device's private open
1626 * function is invoked and then the multicast lists are loaded. Finally
1627 * the device is moved into the up state and a %NETDEV_UP message is
1628 * sent to the netdev notifier chain.
1630 * Calling this function on an active interface is a nop. On a failure
1631 * a negative errno code is returned.
1633 int dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1637 if (dev
->flags
& IFF_UP
)
1640 ret
= __dev_open(dev
, extack
);
1644 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
| IFF_RUNNING
, GFP_KERNEL
, 0, NULL
);
1645 call_netdevice_notifiers(NETDEV_UP
, dev
);
1649 EXPORT_SYMBOL(dev_open
);
1651 static void __dev_close_many(struct list_head
*head
)
1653 struct net_device
*dev
;
1658 list_for_each_entry(dev
, head
, close_list
) {
1659 /* Temporarily disable netpoll until the interface is down */
1660 netpoll_poll_disable(dev
);
1662 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1664 clear_bit(__LINK_STATE_START
, &dev
->state
);
1666 /* Synchronize to scheduled poll. We cannot touch poll list, it
1667 * can be even on different cpu. So just clear netif_running().
1669 * dev->stop() will invoke napi_disable() on all of it's
1670 * napi_struct instances on this device.
1672 smp_mb__after_atomic(); /* Commit netif_running(). */
1675 dev_deactivate_many(head
);
1677 list_for_each_entry(dev
, head
, close_list
) {
1678 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1681 * Call the device specific close. This cannot fail.
1682 * Only if device is UP
1684 * We allow it to be called even after a DETACH hot-plug
1690 netif_set_up(dev
, false);
1691 netpoll_poll_enable(dev
);
1695 static void __dev_close(struct net_device
*dev
)
1699 list_add(&dev
->close_list
, &single
);
1700 __dev_close_many(&single
);
1704 void dev_close_many(struct list_head
*head
, bool unlink
)
1706 struct net_device
*dev
, *tmp
;
1708 /* Remove the devices that don't need to be closed */
1709 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1710 if (!(dev
->flags
& IFF_UP
))
1711 list_del_init(&dev
->close_list
);
1713 __dev_close_many(head
);
1715 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1716 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
| IFF_RUNNING
, GFP_KERNEL
, 0, NULL
);
1717 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1719 list_del_init(&dev
->close_list
);
1722 EXPORT_SYMBOL(dev_close_many
);
1725 * dev_close - shutdown an interface.
1726 * @dev: device to shutdown
1728 * This function moves an active device into down state. A
1729 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1730 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1733 void dev_close(struct net_device
*dev
)
1735 if (dev
->flags
& IFF_UP
) {
1738 list_add(&dev
->close_list
, &single
);
1739 dev_close_many(&single
, true);
1743 EXPORT_SYMBOL(dev_close
);
1747 * dev_disable_lro - disable Large Receive Offload on a device
1750 * Disable Large Receive Offload (LRO) on a net device. Must be
1751 * called under RTNL. This is needed if received packets may be
1752 * forwarded to another interface.
1754 void dev_disable_lro(struct net_device
*dev
)
1756 struct net_device
*lower_dev
;
1757 struct list_head
*iter
;
1759 dev
->wanted_features
&= ~NETIF_F_LRO
;
1760 netdev_update_features(dev
);
1762 if (unlikely(dev
->features
& NETIF_F_LRO
))
1763 netdev_WARN(dev
, "failed to disable LRO!\n");
1765 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1766 dev_disable_lro(lower_dev
);
1768 EXPORT_SYMBOL(dev_disable_lro
);
1771 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1774 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1775 * called under RTNL. This is needed if Generic XDP is installed on
1778 static void dev_disable_gro_hw(struct net_device
*dev
)
1780 dev
->wanted_features
&= ~NETIF_F_GRO_HW
;
1781 netdev_update_features(dev
);
1783 if (unlikely(dev
->features
& NETIF_F_GRO_HW
))
1784 netdev_WARN(dev
, "failed to disable GRO_HW!\n");
1787 const char *netdev_cmd_to_name(enum netdev_cmd cmd
)
1790 case NETDEV_##val: \
1791 return "NETDEV_" __stringify(val);
1793 N(UP
) N(DOWN
) N(REBOOT
) N(CHANGE
) N(REGISTER
) N(UNREGISTER
)
1794 N(CHANGEMTU
) N(CHANGEADDR
) N(GOING_DOWN
) N(CHANGENAME
) N(FEAT_CHANGE
)
1795 N(BONDING_FAILOVER
) N(PRE_UP
) N(PRE_TYPE_CHANGE
) N(POST_TYPE_CHANGE
)
1796 N(POST_INIT
) N(PRE_UNINIT
) N(RELEASE
) N(NOTIFY_PEERS
) N(JOIN
)
1797 N(CHANGEUPPER
) N(RESEND_IGMP
) N(PRECHANGEMTU
) N(CHANGEINFODATA
)
1798 N(BONDING_INFO
) N(PRECHANGEUPPER
) N(CHANGELOWERSTATE
)
1799 N(UDP_TUNNEL_PUSH_INFO
) N(UDP_TUNNEL_DROP_INFO
) N(CHANGE_TX_QUEUE_LEN
)
1800 N(CVLAN_FILTER_PUSH_INFO
) N(CVLAN_FILTER_DROP_INFO
)
1801 N(SVLAN_FILTER_PUSH_INFO
) N(SVLAN_FILTER_DROP_INFO
)
1802 N(PRE_CHANGEADDR
) N(OFFLOAD_XSTATS_ENABLE
) N(OFFLOAD_XSTATS_DISABLE
)
1803 N(OFFLOAD_XSTATS_REPORT_USED
) N(OFFLOAD_XSTATS_REPORT_DELTA
)
1807 return "UNKNOWN_NETDEV_EVENT";
1809 EXPORT_SYMBOL_GPL(netdev_cmd_to_name
);
1811 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1812 struct net_device
*dev
)
1814 struct netdev_notifier_info info
= {
1818 return nb
->notifier_call(nb
, val
, &info
);
1821 static int call_netdevice_register_notifiers(struct notifier_block
*nb
,
1822 struct net_device
*dev
)
1826 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1827 err
= notifier_to_errno(err
);
1831 if (!(dev
->flags
& IFF_UP
))
1834 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1838 static void call_netdevice_unregister_notifiers(struct notifier_block
*nb
,
1839 struct net_device
*dev
)
1841 if (dev
->flags
& IFF_UP
) {
1842 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1844 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1846 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1849 static int call_netdevice_register_net_notifiers(struct notifier_block
*nb
,
1852 struct net_device
*dev
;
1855 for_each_netdev(net
, dev
) {
1856 err
= call_netdevice_register_notifiers(nb
, dev
);
1863 for_each_netdev_continue_reverse(net
, dev
)
1864 call_netdevice_unregister_notifiers(nb
, dev
);
1868 static void call_netdevice_unregister_net_notifiers(struct notifier_block
*nb
,
1871 struct net_device
*dev
;
1873 for_each_netdev(net
, dev
)
1874 call_netdevice_unregister_notifiers(nb
, dev
);
1877 static int dev_boot_phase
= 1;
1880 * register_netdevice_notifier - register a network notifier block
1883 * Register a notifier to be called when network device events occur.
1884 * The notifier passed is linked into the kernel structures and must
1885 * not be reused until it has been unregistered. A negative errno code
1886 * is returned on a failure.
1888 * When registered all registration and up events are replayed
1889 * to the new notifier to allow device to have a race free
1890 * view of the network device list.
1893 int register_netdevice_notifier(struct notifier_block
*nb
)
1898 /* Close race with setup_net() and cleanup_net() */
1899 down_write(&pernet_ops_rwsem
);
1901 /* When RTNL is removed, we need protection for netdev_chain. */
1904 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1910 __rtnl_net_lock(net
);
1911 err
= call_netdevice_register_net_notifiers(nb
, net
);
1912 __rtnl_net_unlock(net
);
1919 up_write(&pernet_ops_rwsem
);
1923 for_each_net_continue_reverse(net
) {
1924 __rtnl_net_lock(net
);
1925 call_netdevice_unregister_net_notifiers(nb
, net
);
1926 __rtnl_net_unlock(net
);
1929 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1932 EXPORT_SYMBOL(register_netdevice_notifier
);
1935 * unregister_netdevice_notifier - unregister a network notifier block
1938 * Unregister a notifier previously registered by
1939 * register_netdevice_notifier(). The notifier is unlinked into the
1940 * kernel structures and may then be reused. A negative errno code
1941 * is returned on a failure.
1943 * After unregistering unregister and down device events are synthesized
1944 * for all devices on the device list to the removed notifier to remove
1945 * the need for special case cleanup code.
1948 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1953 /* Close race with setup_net() and cleanup_net() */
1954 down_write(&pernet_ops_rwsem
);
1956 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1961 __rtnl_net_lock(net
);
1962 call_netdevice_unregister_net_notifiers(nb
, net
);
1963 __rtnl_net_unlock(net
);
1968 up_write(&pernet_ops_rwsem
);
1971 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1973 static int __register_netdevice_notifier_net(struct net
*net
,
1974 struct notifier_block
*nb
,
1975 bool ignore_call_fail
)
1979 err
= raw_notifier_chain_register(&net
->netdev_chain
, nb
);
1985 err
= call_netdevice_register_net_notifiers(nb
, net
);
1986 if (err
&& !ignore_call_fail
)
1987 goto chain_unregister
;
1992 raw_notifier_chain_unregister(&net
->netdev_chain
, nb
);
1996 static int __unregister_netdevice_notifier_net(struct net
*net
,
1997 struct notifier_block
*nb
)
2001 err
= raw_notifier_chain_unregister(&net
->netdev_chain
, nb
);
2005 call_netdevice_unregister_net_notifiers(nb
, net
);
2010 * register_netdevice_notifier_net - register a per-netns network notifier block
2011 * @net: network namespace
2014 * Register a notifier to be called when network device events occur.
2015 * The notifier passed is linked into the kernel structures and must
2016 * not be reused until it has been unregistered. A negative errno code
2017 * is returned on a failure.
2019 * When registered all registration and up events are replayed
2020 * to the new notifier to allow device to have a race free
2021 * view of the network device list.
2024 int register_netdevice_notifier_net(struct net
*net
, struct notifier_block
*nb
)
2029 err
= __register_netdevice_notifier_net(net
, nb
, false);
2030 rtnl_net_unlock(net
);
2034 EXPORT_SYMBOL(register_netdevice_notifier_net
);
2037 * unregister_netdevice_notifier_net - unregister a per-netns
2038 * network notifier block
2039 * @net: network namespace
2042 * Unregister a notifier previously registered by
2043 * register_netdevice_notifier_net(). The notifier is unlinked from the
2044 * kernel structures and may then be reused. A negative errno code
2045 * is returned on a failure.
2047 * After unregistering unregister and down device events are synthesized
2048 * for all devices on the device list to the removed notifier to remove
2049 * the need for special case cleanup code.
2052 int unregister_netdevice_notifier_net(struct net
*net
,
2053 struct notifier_block
*nb
)
2058 err
= __unregister_netdevice_notifier_net(net
, nb
);
2059 rtnl_net_unlock(net
);
2063 EXPORT_SYMBOL(unregister_netdevice_notifier_net
);
2065 static void __move_netdevice_notifier_net(struct net
*src_net
,
2066 struct net
*dst_net
,
2067 struct notifier_block
*nb
)
2069 __unregister_netdevice_notifier_net(src_net
, nb
);
2070 __register_netdevice_notifier_net(dst_net
, nb
, true);
2073 int register_netdevice_notifier_dev_net(struct net_device
*dev
,
2074 struct notifier_block
*nb
,
2075 struct netdev_net_notifier
*nn
)
2077 struct net
*net
= dev_net(dev
);
2081 err
= __register_netdevice_notifier_net(net
, nb
, false);
2084 list_add(&nn
->list
, &dev
->net_notifier_list
);
2086 rtnl_net_unlock(net
);
2090 EXPORT_SYMBOL(register_netdevice_notifier_dev_net
);
2092 int unregister_netdevice_notifier_dev_net(struct net_device
*dev
,
2093 struct notifier_block
*nb
,
2094 struct netdev_net_notifier
*nn
)
2096 struct net
*net
= dev_net(dev
);
2100 list_del(&nn
->list
);
2101 err
= __unregister_netdevice_notifier_net(net
, nb
);
2102 rtnl_net_unlock(net
);
2106 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net
);
2108 static void move_netdevice_notifiers_dev_net(struct net_device
*dev
,
2111 struct netdev_net_notifier
*nn
;
2113 list_for_each_entry(nn
, &dev
->net_notifier_list
, list
)
2114 __move_netdevice_notifier_net(dev_net(dev
), net
, nn
->nb
);
2118 * call_netdevice_notifiers_info - call all network notifier blocks
2119 * @val: value passed unmodified to notifier function
2120 * @info: notifier information data
2122 * Call all network notifier blocks. Parameters and return value
2123 * are as for raw_notifier_call_chain().
2126 int call_netdevice_notifiers_info(unsigned long val
,
2127 struct netdev_notifier_info
*info
)
2129 struct net
*net
= dev_net(info
->dev
);
2134 /* Run per-netns notifier block chain first, then run the global one.
2135 * Hopefully, one day, the global one is going to be removed after
2136 * all notifier block registrators get converted to be per-netns.
2138 ret
= raw_notifier_call_chain(&net
->netdev_chain
, val
, info
);
2139 if (ret
& NOTIFY_STOP_MASK
)
2141 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
2145 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks
2146 * for and rollback on error
2147 * @val_up: value passed unmodified to notifier function
2148 * @val_down: value passed unmodified to the notifier function when
2149 * recovering from an error on @val_up
2150 * @info: notifier information data
2152 * Call all per-netns network notifier blocks, but not notifier blocks on
2153 * the global notifier chain. Parameters and return value are as for
2154 * raw_notifier_call_chain_robust().
2158 call_netdevice_notifiers_info_robust(unsigned long val_up
,
2159 unsigned long val_down
,
2160 struct netdev_notifier_info
*info
)
2162 struct net
*net
= dev_net(info
->dev
);
2166 return raw_notifier_call_chain_robust(&net
->netdev_chain
,
2167 val_up
, val_down
, info
);
2170 static int call_netdevice_notifiers_extack(unsigned long val
,
2171 struct net_device
*dev
,
2172 struct netlink_ext_ack
*extack
)
2174 struct netdev_notifier_info info
= {
2179 return call_netdevice_notifiers_info(val
, &info
);
2183 * call_netdevice_notifiers - call all network notifier blocks
2184 * @val: value passed unmodified to notifier function
2185 * @dev: net_device pointer passed unmodified to notifier function
2187 * Call all network notifier blocks. Parameters and return value
2188 * are as for raw_notifier_call_chain().
2191 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
2193 return call_netdevice_notifiers_extack(val
, dev
, NULL
);
2195 EXPORT_SYMBOL(call_netdevice_notifiers
);
2198 * call_netdevice_notifiers_mtu - call all network notifier blocks
2199 * @val: value passed unmodified to notifier function
2200 * @dev: net_device pointer passed unmodified to notifier function
2201 * @arg: additional u32 argument passed to the notifier function
2203 * Call all network notifier blocks. Parameters and return value
2204 * are as for raw_notifier_call_chain().
2206 static int call_netdevice_notifiers_mtu(unsigned long val
,
2207 struct net_device
*dev
, u32 arg
)
2209 struct netdev_notifier_info_ext info
= {
2214 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext
, info
) != 0);
2216 return call_netdevice_notifiers_info(val
, &info
.info
);
2219 #ifdef CONFIG_NET_INGRESS
2220 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key
);
2222 void net_inc_ingress_queue(void)
2224 static_branch_inc(&ingress_needed_key
);
2226 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
2228 void net_dec_ingress_queue(void)
2230 static_branch_dec(&ingress_needed_key
);
2232 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
2235 #ifdef CONFIG_NET_EGRESS
2236 static DEFINE_STATIC_KEY_FALSE(egress_needed_key
);
2238 void net_inc_egress_queue(void)
2240 static_branch_inc(&egress_needed_key
);
2242 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
2244 void net_dec_egress_queue(void)
2246 static_branch_dec(&egress_needed_key
);
2248 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
2251 #ifdef CONFIG_NET_CLS_ACT
2252 DEFINE_STATIC_KEY_FALSE(tcf_sw_enabled_key
);
2253 EXPORT_SYMBOL(tcf_sw_enabled_key
);
2256 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key
);
2257 EXPORT_SYMBOL(netstamp_needed_key
);
2258 #ifdef CONFIG_JUMP_LABEL
2259 static atomic_t netstamp_needed_deferred
;
2260 static atomic_t netstamp_wanted
;
2261 static void netstamp_clear(struct work_struct
*work
)
2263 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
2266 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
2268 static_branch_enable(&netstamp_needed_key
);
2270 static_branch_disable(&netstamp_needed_key
);
2272 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
2275 void net_enable_timestamp(void)
2277 #ifdef CONFIG_JUMP_LABEL
2278 int wanted
= atomic_read(&netstamp_wanted
);
2280 while (wanted
> 0) {
2281 if (atomic_try_cmpxchg(&netstamp_wanted
, &wanted
, wanted
+ 1))
2284 atomic_inc(&netstamp_needed_deferred
);
2285 schedule_work(&netstamp_work
);
2287 static_branch_inc(&netstamp_needed_key
);
2290 EXPORT_SYMBOL(net_enable_timestamp
);
2292 void net_disable_timestamp(void)
2294 #ifdef CONFIG_JUMP_LABEL
2295 int wanted
= atomic_read(&netstamp_wanted
);
2297 while (wanted
> 1) {
2298 if (atomic_try_cmpxchg(&netstamp_wanted
, &wanted
, wanted
- 1))
2301 atomic_dec(&netstamp_needed_deferred
);
2302 schedule_work(&netstamp_work
);
2304 static_branch_dec(&netstamp_needed_key
);
2307 EXPORT_SYMBOL(net_disable_timestamp
);
2309 static inline void net_timestamp_set(struct sk_buff
*skb
)
2312 skb
->tstamp_type
= SKB_CLOCK_REALTIME
;
2313 if (static_branch_unlikely(&netstamp_needed_key
))
2314 skb
->tstamp
= ktime_get_real();
2317 #define net_timestamp_check(COND, SKB) \
2318 if (static_branch_unlikely(&netstamp_needed_key)) { \
2319 if ((COND) && !(SKB)->tstamp) \
2320 (SKB)->tstamp = ktime_get_real(); \
2323 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2325 return __is_skb_forwardable(dev
, skb
, true);
2327 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
2329 static int __dev_forward_skb2(struct net_device
*dev
, struct sk_buff
*skb
,
2332 int ret
= ____dev_forward_skb(dev
, skb
, check_mtu
);
2335 skb
->protocol
= eth_type_trans(skb
, dev
);
2336 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
2342 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2344 return __dev_forward_skb2(dev
, skb
, true);
2346 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
2349 * dev_forward_skb - loopback an skb to another netif
2351 * @dev: destination network device
2352 * @skb: buffer to forward
2355 * NET_RX_SUCCESS (no congestion)
2356 * NET_RX_DROP (packet was dropped, but freed)
2358 * dev_forward_skb can be used for injecting an skb from the
2359 * start_xmit function of one device into the receive queue
2360 * of another device.
2362 * The receiving device may be in another namespace, so
2363 * we have to clear all information in the skb that could
2364 * impact namespace isolation.
2366 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2368 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
2370 EXPORT_SYMBOL_GPL(dev_forward_skb
);
2372 int dev_forward_skb_nomtu(struct net_device
*dev
, struct sk_buff
*skb
)
2374 return __dev_forward_skb2(dev
, skb
, false) ?: netif_rx_internal(skb
);
2377 static inline int deliver_skb(struct sk_buff
*skb
,
2378 struct packet_type
*pt_prev
,
2379 struct net_device
*orig_dev
)
2381 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
2383 refcount_inc(&skb
->users
);
2384 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
2387 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
2388 struct packet_type
**pt
,
2389 struct net_device
*orig_dev
,
2391 struct list_head
*ptype_list
)
2393 struct packet_type
*ptype
, *pt_prev
= *pt
;
2395 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2396 if (ptype
->type
!= type
)
2399 deliver_skb(skb
, pt_prev
, orig_dev
);
2405 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
2407 if (!ptype
->af_packet_priv
|| !skb
->sk
)
2410 if (ptype
->id_match
)
2411 return ptype
->id_match(ptype
, skb
->sk
);
2412 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
2419 * dev_nit_active - return true if any network interface taps are in use
2421 * @dev: network device to check for the presence of taps
2423 bool dev_nit_active(struct net_device
*dev
)
2425 return !list_empty(&net_hotdata
.ptype_all
) ||
2426 !list_empty(&dev
->ptype_all
);
2428 EXPORT_SYMBOL_GPL(dev_nit_active
);
2431 * Support routine. Sends outgoing frames to any network
2432 * taps currently in use.
2435 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
2437 struct list_head
*ptype_list
= &net_hotdata
.ptype_all
;
2438 struct packet_type
*ptype
, *pt_prev
= NULL
;
2439 struct sk_buff
*skb2
= NULL
;
2443 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2444 if (READ_ONCE(ptype
->ignore_outgoing
))
2447 /* Never send packets back to the socket
2448 * they originated from - MvS (miquels@drinkel.ow.org)
2450 if (skb_loop_sk(ptype
, skb
))
2454 deliver_skb(skb2
, pt_prev
, skb
->dev
);
2459 /* need to clone skb, done only once */
2460 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2464 net_timestamp_set(skb2
);
2466 /* skb->nh should be correctly
2467 * set by sender, so that the second statement is
2468 * just protection against buggy protocols.
2470 skb_reset_mac_header(skb2
);
2472 if (skb_network_header(skb2
) < skb2
->data
||
2473 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
2474 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2475 ntohs(skb2
->protocol
),
2477 skb_reset_network_header(skb2
);
2480 skb2
->transport_header
= skb2
->network_header
;
2481 skb2
->pkt_type
= PACKET_OUTGOING
;
2485 if (ptype_list
== &net_hotdata
.ptype_all
) {
2486 ptype_list
= &dev
->ptype_all
;
2491 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
2492 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
2498 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2501 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2502 * @dev: Network device
2503 * @txq: number of queues available
2505 * If real_num_tx_queues is changed the tc mappings may no longer be
2506 * valid. To resolve this verify the tc mapping remains valid and if
2507 * not NULL the mapping. With no priorities mapping to this
2508 * offset/count pair it will no longer be used. In the worst case TC0
2509 * is invalid nothing can be done so disable priority mappings. If is
2510 * expected that drivers will fix this mapping if they can before
2511 * calling netif_set_real_num_tx_queues.
2513 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2516 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2518 /* If TC0 is invalidated disable TC mapping */
2519 if (tc
->offset
+ tc
->count
> txq
) {
2520 netdev_warn(dev
, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2525 /* Invalidated prio to tc mappings set to TC0 */
2526 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2527 int q
= netdev_get_prio_tc_map(dev
, i
);
2529 tc
= &dev
->tc_to_txq
[q
];
2530 if (tc
->offset
+ tc
->count
> txq
) {
2531 netdev_warn(dev
, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2533 netdev_set_prio_tc_map(dev
, i
, 0);
2538 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2541 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2544 /* walk through the TCs and see if it falls into any of them */
2545 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2546 if ((txq
- tc
->offset
) < tc
->count
)
2550 /* didn't find it, just return -1 to indicate no match */
2556 EXPORT_SYMBOL(netdev_txq_to_tc
);
2559 static struct static_key xps_needed __read_mostly
;
2560 static struct static_key xps_rxqs_needed __read_mostly
;
2561 static DEFINE_MUTEX(xps_map_mutex
);
2562 #define xmap_dereference(P) \
2563 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2565 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2566 struct xps_dev_maps
*old_maps
, int tci
, u16 index
)
2568 struct xps_map
*map
= NULL
;
2571 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2575 for (pos
= map
->len
; pos
--;) {
2576 if (map
->queues
[pos
] != index
)
2580 map
->queues
[pos
] = map
->queues
[--map
->len
];
2585 RCU_INIT_POINTER(old_maps
->attr_map
[tci
], NULL
);
2586 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2587 kfree_rcu(map
, rcu
);
2594 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2595 struct xps_dev_maps
*dev_maps
,
2596 int cpu
, u16 offset
, u16 count
)
2598 int num_tc
= dev_maps
->num_tc
;
2599 bool active
= false;
2602 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2605 for (i
= count
, j
= offset
; i
--; j
++) {
2606 if (!remove_xps_queue(dev_maps
, NULL
, tci
, j
))
2616 static void reset_xps_maps(struct net_device
*dev
,
2617 struct xps_dev_maps
*dev_maps
,
2618 enum xps_map_type type
)
2620 static_key_slow_dec_cpuslocked(&xps_needed
);
2621 if (type
== XPS_RXQS
)
2622 static_key_slow_dec_cpuslocked(&xps_rxqs_needed
);
2624 RCU_INIT_POINTER(dev
->xps_maps
[type
], NULL
);
2626 kfree_rcu(dev_maps
, rcu
);
2629 static void clean_xps_maps(struct net_device
*dev
, enum xps_map_type type
,
2630 u16 offset
, u16 count
)
2632 struct xps_dev_maps
*dev_maps
;
2633 bool active
= false;
2636 dev_maps
= xmap_dereference(dev
->xps_maps
[type
]);
2640 for (j
= 0; j
< dev_maps
->nr_ids
; j
++)
2641 active
|= remove_xps_queue_cpu(dev
, dev_maps
, j
, offset
, count
);
2643 reset_xps_maps(dev
, dev_maps
, type
);
2645 if (type
== XPS_CPUS
) {
2646 for (i
= offset
+ (count
- 1); count
--; i
--)
2647 netdev_queue_numa_node_write(
2648 netdev_get_tx_queue(dev
, i
), NUMA_NO_NODE
);
2652 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2655 if (!static_key_false(&xps_needed
))
2659 mutex_lock(&xps_map_mutex
);
2661 if (static_key_false(&xps_rxqs_needed
))
2662 clean_xps_maps(dev
, XPS_RXQS
, offset
, count
);
2664 clean_xps_maps(dev
, XPS_CPUS
, offset
, count
);
2666 mutex_unlock(&xps_map_mutex
);
2670 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2672 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2675 static struct xps_map
*expand_xps_map(struct xps_map
*map
, int attr_index
,
2676 u16 index
, bool is_rxqs_map
)
2678 struct xps_map
*new_map
;
2679 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2682 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2683 if (map
->queues
[pos
] != index
)
2688 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2690 if (pos
< map
->alloc_len
)
2693 alloc_len
= map
->alloc_len
* 2;
2696 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2700 new_map
= kzalloc(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
);
2702 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2703 cpu_to_node(attr_index
));
2707 for (i
= 0; i
< pos
; i
++)
2708 new_map
->queues
[i
] = map
->queues
[i
];
2709 new_map
->alloc_len
= alloc_len
;
2715 /* Copy xps maps at a given index */
2716 static void xps_copy_dev_maps(struct xps_dev_maps
*dev_maps
,
2717 struct xps_dev_maps
*new_dev_maps
, int index
,
2718 int tc
, bool skip_tc
)
2720 int i
, tci
= index
* dev_maps
->num_tc
;
2721 struct xps_map
*map
;
2723 /* copy maps belonging to foreign traffic classes */
2724 for (i
= 0; i
< dev_maps
->num_tc
; i
++, tci
++) {
2725 if (i
== tc
&& skip_tc
)
2728 /* fill in the new device map from the old device map */
2729 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2730 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2734 /* Must be called under cpus_read_lock */
2735 int __netif_set_xps_queue(struct net_device
*dev
, const unsigned long *mask
,
2736 u16 index
, enum xps_map_type type
)
2738 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
, *old_dev_maps
= NULL
;
2739 const unsigned long *online_mask
= NULL
;
2740 bool active
= false, copy
= false;
2741 int i
, j
, tci
, numa_node_id
= -2;
2742 int maps_sz
, num_tc
= 1, tc
= 0;
2743 struct xps_map
*map
, *new_map
;
2744 unsigned int nr_ids
;
2746 WARN_ON_ONCE(index
>= dev
->num_tx_queues
);
2749 /* Do not allow XPS on subordinate device directly */
2750 num_tc
= dev
->num_tc
;
2754 /* If queue belongs to subordinate dev use its map */
2755 dev
= netdev_get_tx_queue(dev
, index
)->sb_dev
? : dev
;
2757 tc
= netdev_txq_to_tc(dev
, index
);
2762 mutex_lock(&xps_map_mutex
);
2764 dev_maps
= xmap_dereference(dev
->xps_maps
[type
]);
2765 if (type
== XPS_RXQS
) {
2766 maps_sz
= XPS_RXQ_DEV_MAPS_SIZE(num_tc
, dev
->num_rx_queues
);
2767 nr_ids
= dev
->num_rx_queues
;
2769 maps_sz
= XPS_CPU_DEV_MAPS_SIZE(num_tc
);
2770 if (num_possible_cpus() > 1)
2771 online_mask
= cpumask_bits(cpu_online_mask
);
2772 nr_ids
= nr_cpu_ids
;
2775 if (maps_sz
< L1_CACHE_BYTES
)
2776 maps_sz
= L1_CACHE_BYTES
;
2778 /* The old dev_maps could be larger or smaller than the one we're
2779 * setting up now, as dev->num_tc or nr_ids could have been updated in
2780 * between. We could try to be smart, but let's be safe instead and only
2781 * copy foreign traffic classes if the two map sizes match.
2784 dev_maps
->num_tc
== num_tc
&& dev_maps
->nr_ids
== nr_ids
)
2787 /* allocate memory for queue storage */
2788 for (j
= -1; j
= netif_attrmask_next_and(j
, online_mask
, mask
, nr_ids
),
2790 if (!new_dev_maps
) {
2791 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2792 if (!new_dev_maps
) {
2793 mutex_unlock(&xps_map_mutex
);
2797 new_dev_maps
->nr_ids
= nr_ids
;
2798 new_dev_maps
->num_tc
= num_tc
;
2801 tci
= j
* num_tc
+ tc
;
2802 map
= copy
? xmap_dereference(dev_maps
->attr_map
[tci
]) : NULL
;
2804 map
= expand_xps_map(map
, j
, index
, type
== XPS_RXQS
);
2808 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2812 goto out_no_new_maps
;
2815 /* Increment static keys at most once per type */
2816 static_key_slow_inc_cpuslocked(&xps_needed
);
2817 if (type
== XPS_RXQS
)
2818 static_key_slow_inc_cpuslocked(&xps_rxqs_needed
);
2821 for (j
= 0; j
< nr_ids
; j
++) {
2822 bool skip_tc
= false;
2824 tci
= j
* num_tc
+ tc
;
2825 if (netif_attr_test_mask(j
, mask
, nr_ids
) &&
2826 netif_attr_test_online(j
, online_mask
, nr_ids
)) {
2827 /* add tx-queue to CPU/rx-queue maps */
2832 map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2833 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2836 if (pos
== map
->len
)
2837 map
->queues
[map
->len
++] = index
;
2839 if (type
== XPS_CPUS
) {
2840 if (numa_node_id
== -2)
2841 numa_node_id
= cpu_to_node(j
);
2842 else if (numa_node_id
!= cpu_to_node(j
))
2849 xps_copy_dev_maps(dev_maps
, new_dev_maps
, j
, tc
,
2853 rcu_assign_pointer(dev
->xps_maps
[type
], new_dev_maps
);
2855 /* Cleanup old maps */
2857 goto out_no_old_maps
;
2859 for (j
= 0; j
< dev_maps
->nr_ids
; j
++) {
2860 for (i
= num_tc
, tci
= j
* dev_maps
->num_tc
; i
--; tci
++) {
2861 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2866 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2871 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2872 kfree_rcu(map
, rcu
);
2876 old_dev_maps
= dev_maps
;
2879 dev_maps
= new_dev_maps
;
2883 if (type
== XPS_CPUS
)
2884 /* update Tx queue numa node */
2885 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2886 (numa_node_id
>= 0) ?
2887 numa_node_id
: NUMA_NO_NODE
);
2892 /* removes tx-queue from unused CPUs/rx-queues */
2893 for (j
= 0; j
< dev_maps
->nr_ids
; j
++) {
2894 tci
= j
* dev_maps
->num_tc
;
2896 for (i
= 0; i
< dev_maps
->num_tc
; i
++, tci
++) {
2898 netif_attr_test_mask(j
, mask
, dev_maps
->nr_ids
) &&
2899 netif_attr_test_online(j
, online_mask
, dev_maps
->nr_ids
))
2902 active
|= remove_xps_queue(dev_maps
,
2903 copy
? old_dev_maps
: NULL
,
2909 kfree_rcu(old_dev_maps
, rcu
);
2911 /* free map if not active */
2913 reset_xps_maps(dev
, dev_maps
, type
);
2916 mutex_unlock(&xps_map_mutex
);
2920 /* remove any maps that we added */
2921 for (j
= 0; j
< nr_ids
; j
++) {
2922 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2923 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2925 xmap_dereference(dev_maps
->attr_map
[tci
]) :
2927 if (new_map
&& new_map
!= map
)
2932 mutex_unlock(&xps_map_mutex
);
2934 kfree(new_dev_maps
);
2937 EXPORT_SYMBOL_GPL(__netif_set_xps_queue
);
2939 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2945 ret
= __netif_set_xps_queue(dev
, cpumask_bits(mask
), index
, XPS_CPUS
);
2950 EXPORT_SYMBOL(netif_set_xps_queue
);
2953 static void netdev_unbind_all_sb_channels(struct net_device
*dev
)
2955 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2957 /* Unbind any subordinate channels */
2958 while (txq
-- != &dev
->_tx
[0]) {
2960 netdev_unbind_sb_channel(dev
, txq
->sb_dev
);
2964 void netdev_reset_tc(struct net_device
*dev
)
2967 netif_reset_xps_queues_gt(dev
, 0);
2969 netdev_unbind_all_sb_channels(dev
);
2971 /* Reset TC configuration of device */
2973 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2974 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2976 EXPORT_SYMBOL(netdev_reset_tc
);
2978 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2980 if (tc
>= dev
->num_tc
)
2984 netif_reset_xps_queues(dev
, offset
, count
);
2986 dev
->tc_to_txq
[tc
].count
= count
;
2987 dev
->tc_to_txq
[tc
].offset
= offset
;
2990 EXPORT_SYMBOL(netdev_set_tc_queue
);
2992 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2994 if (num_tc
> TC_MAX_QUEUE
)
2998 netif_reset_xps_queues_gt(dev
, 0);
3000 netdev_unbind_all_sb_channels(dev
);
3002 dev
->num_tc
= num_tc
;
3005 EXPORT_SYMBOL(netdev_set_num_tc
);
3007 void netdev_unbind_sb_channel(struct net_device
*dev
,
3008 struct net_device
*sb_dev
)
3010 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
3013 netif_reset_xps_queues_gt(sb_dev
, 0);
3015 memset(sb_dev
->tc_to_txq
, 0, sizeof(sb_dev
->tc_to_txq
));
3016 memset(sb_dev
->prio_tc_map
, 0, sizeof(sb_dev
->prio_tc_map
));
3018 while (txq
-- != &dev
->_tx
[0]) {
3019 if (txq
->sb_dev
== sb_dev
)
3023 EXPORT_SYMBOL(netdev_unbind_sb_channel
);
3025 int netdev_bind_sb_channel_queue(struct net_device
*dev
,
3026 struct net_device
*sb_dev
,
3027 u8 tc
, u16 count
, u16 offset
)
3029 /* Make certain the sb_dev and dev are already configured */
3030 if (sb_dev
->num_tc
>= 0 || tc
>= dev
->num_tc
)
3033 /* We cannot hand out queues we don't have */
3034 if ((offset
+ count
) > dev
->real_num_tx_queues
)
3037 /* Record the mapping */
3038 sb_dev
->tc_to_txq
[tc
].count
= count
;
3039 sb_dev
->tc_to_txq
[tc
].offset
= offset
;
3041 /* Provide a way for Tx queue to find the tc_to_txq map or
3042 * XPS map for itself.
3045 netdev_get_tx_queue(dev
, count
+ offset
)->sb_dev
= sb_dev
;
3049 EXPORT_SYMBOL(netdev_bind_sb_channel_queue
);
3051 int netdev_set_sb_channel(struct net_device
*dev
, u16 channel
)
3053 /* Do not use a multiqueue device to represent a subordinate channel */
3054 if (netif_is_multiqueue(dev
))
3057 /* We allow channels 1 - 32767 to be used for subordinate channels.
3058 * Channel 0 is meant to be "native" mode and used only to represent
3059 * the main root device. We allow writing 0 to reset the device back
3060 * to normal mode after being used as a subordinate channel.
3062 if (channel
> S16_MAX
)
3065 dev
->num_tc
= -channel
;
3069 EXPORT_SYMBOL(netdev_set_sb_channel
);
3072 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
3073 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
3075 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
3080 disabling
= txq
< dev
->real_num_tx_queues
;
3082 if (txq
< 1 || txq
> dev
->num_tx_queues
)
3085 if (dev
->reg_state
== NETREG_REGISTERED
||
3086 dev
->reg_state
== NETREG_UNREGISTERING
) {
3089 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
3095 netif_setup_tc(dev
, txq
);
3097 net_shaper_set_real_num_tx_queues(dev
, txq
);
3099 dev_qdisc_change_real_num_tx(dev
, txq
);
3101 dev
->real_num_tx_queues
= txq
;
3105 qdisc_reset_all_tx_gt(dev
, txq
);
3107 netif_reset_xps_queues_gt(dev
, txq
);
3111 dev
->real_num_tx_queues
= txq
;
3116 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
3120 * netif_set_real_num_rx_queues - set actual number of RX queues used
3121 * @dev: Network device
3122 * @rxq: Actual number of RX queues
3124 * This must be called either with the rtnl_lock held or before
3125 * registration of the net device. Returns 0 on success, or a
3126 * negative error code. If called before registration, it always
3129 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
3133 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
3136 if (dev
->reg_state
== NETREG_REGISTERED
) {
3139 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
3145 dev
->real_num_rx_queues
= rxq
;
3148 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
3152 * netif_set_real_num_queues - set actual number of RX and TX queues used
3153 * @dev: Network device
3154 * @txq: Actual number of TX queues
3155 * @rxq: Actual number of RX queues
3157 * Set the real number of both TX and RX queues.
3158 * Does nothing if the number of queues is already correct.
3160 int netif_set_real_num_queues(struct net_device
*dev
,
3161 unsigned int txq
, unsigned int rxq
)
3163 unsigned int old_rxq
= dev
->real_num_rx_queues
;
3166 if (txq
< 1 || txq
> dev
->num_tx_queues
||
3167 rxq
< 1 || rxq
> dev
->num_rx_queues
)
3170 /* Start from increases, so the error path only does decreases -
3171 * decreases can't fail.
3173 if (rxq
> dev
->real_num_rx_queues
) {
3174 err
= netif_set_real_num_rx_queues(dev
, rxq
);
3178 if (txq
> dev
->real_num_tx_queues
) {
3179 err
= netif_set_real_num_tx_queues(dev
, txq
);
3183 if (rxq
< dev
->real_num_rx_queues
)
3184 WARN_ON(netif_set_real_num_rx_queues(dev
, rxq
));
3185 if (txq
< dev
->real_num_tx_queues
)
3186 WARN_ON(netif_set_real_num_tx_queues(dev
, txq
));
3190 WARN_ON(netif_set_real_num_rx_queues(dev
, old_rxq
));
3193 EXPORT_SYMBOL(netif_set_real_num_queues
);
3196 * netif_set_tso_max_size() - set the max size of TSO frames supported
3197 * @dev: netdev to update
3198 * @size: max skb->len of a TSO frame
3200 * Set the limit on the size of TSO super-frames the device can handle.
3201 * Unless explicitly set the stack will assume the value of
3202 * %GSO_LEGACY_MAX_SIZE.
3204 void netif_set_tso_max_size(struct net_device
*dev
, unsigned int size
)
3206 dev
->tso_max_size
= min(GSO_MAX_SIZE
, size
);
3207 if (size
< READ_ONCE(dev
->gso_max_size
))
3208 netif_set_gso_max_size(dev
, size
);
3209 if (size
< READ_ONCE(dev
->gso_ipv4_max_size
))
3210 netif_set_gso_ipv4_max_size(dev
, size
);
3212 EXPORT_SYMBOL(netif_set_tso_max_size
);
3215 * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3216 * @dev: netdev to update
3217 * @segs: max number of TCP segments
3219 * Set the limit on the number of TCP segments the device can generate from
3220 * a single TSO super-frame.
3221 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3223 void netif_set_tso_max_segs(struct net_device
*dev
, unsigned int segs
)
3225 dev
->tso_max_segs
= segs
;
3226 if (segs
< READ_ONCE(dev
->gso_max_segs
))
3227 netif_set_gso_max_segs(dev
, segs
);
3229 EXPORT_SYMBOL(netif_set_tso_max_segs
);
3232 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3233 * @to: netdev to update
3234 * @from: netdev from which to copy the limits
3236 void netif_inherit_tso_max(struct net_device
*to
, const struct net_device
*from
)
3238 netif_set_tso_max_size(to
, from
->tso_max_size
);
3239 netif_set_tso_max_segs(to
, from
->tso_max_segs
);
3241 EXPORT_SYMBOL(netif_inherit_tso_max
);
3244 * netif_get_num_default_rss_queues - default number of RSS queues
3246 * Default value is the number of physical cores if there are only 1 or 2, or
3247 * divided by 2 if there are more.
3249 int netif_get_num_default_rss_queues(void)
3254 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus
, GFP_KERNEL
)))
3257 cpumask_copy(cpus
, cpu_online_mask
);
3258 for_each_cpu(cpu
, cpus
) {
3260 cpumask_andnot(cpus
, cpus
, topology_sibling_cpumask(cpu
));
3262 free_cpumask_var(cpus
);
3264 return count
> 2 ? DIV_ROUND_UP(count
, 2) : count
;
3266 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
3268 static void __netif_reschedule(struct Qdisc
*q
)
3270 struct softnet_data
*sd
;
3271 unsigned long flags
;
3273 local_irq_save(flags
);
3274 sd
= this_cpu_ptr(&softnet_data
);
3275 q
->next_sched
= NULL
;
3276 *sd
->output_queue_tailp
= q
;
3277 sd
->output_queue_tailp
= &q
->next_sched
;
3278 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
3279 local_irq_restore(flags
);
3282 void __netif_schedule(struct Qdisc
*q
)
3284 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
3285 __netif_reschedule(q
);
3287 EXPORT_SYMBOL(__netif_schedule
);
3289 struct dev_kfree_skb_cb
{
3290 enum skb_drop_reason reason
;
3293 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
3295 return (struct dev_kfree_skb_cb
*)skb
->cb
;
3298 void netif_schedule_queue(struct netdev_queue
*txq
)
3301 if (!netif_xmit_stopped(txq
)) {
3302 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
3304 __netif_schedule(q
);
3308 EXPORT_SYMBOL(netif_schedule_queue
);
3310 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
3312 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
3316 q
= rcu_dereference(dev_queue
->qdisc
);
3317 __netif_schedule(q
);
3321 EXPORT_SYMBOL(netif_tx_wake_queue
);
3323 void dev_kfree_skb_irq_reason(struct sk_buff
*skb
, enum skb_drop_reason reason
)
3325 unsigned long flags
;
3330 if (likely(refcount_read(&skb
->users
) == 1)) {
3332 refcount_set(&skb
->users
, 0);
3333 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
3336 get_kfree_skb_cb(skb
)->reason
= reason
;
3337 local_irq_save(flags
);
3338 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
3339 __this_cpu_write(softnet_data
.completion_queue
, skb
);
3340 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
3341 local_irq_restore(flags
);
3343 EXPORT_SYMBOL(dev_kfree_skb_irq_reason
);
3345 void dev_kfree_skb_any_reason(struct sk_buff
*skb
, enum skb_drop_reason reason
)
3347 if (in_hardirq() || irqs_disabled())
3348 dev_kfree_skb_irq_reason(skb
, reason
);
3350 kfree_skb_reason(skb
, reason
);
3352 EXPORT_SYMBOL(dev_kfree_skb_any_reason
);
3356 * netif_device_detach - mark device as removed
3357 * @dev: network device
3359 * Mark device as removed from system and therefore no longer available.
3361 void netif_device_detach(struct net_device
*dev
)
3363 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
3364 netif_running(dev
)) {
3365 netif_tx_stop_all_queues(dev
);
3368 EXPORT_SYMBOL(netif_device_detach
);
3371 * netif_device_attach - mark device as attached
3372 * @dev: network device
3374 * Mark device as attached from system and restart if needed.
3376 void netif_device_attach(struct net_device
*dev
)
3378 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
3379 netif_running(dev
)) {
3380 netif_tx_wake_all_queues(dev
);
3381 netdev_watchdog_up(dev
);
3384 EXPORT_SYMBOL(netif_device_attach
);
3387 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3388 * to be used as a distribution range.
3390 static u16
skb_tx_hash(const struct net_device
*dev
,
3391 const struct net_device
*sb_dev
,
3392 struct sk_buff
*skb
)
3396 u16 qcount
= dev
->real_num_tx_queues
;
3399 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
3401 qoffset
= sb_dev
->tc_to_txq
[tc
].offset
;
3402 qcount
= sb_dev
->tc_to_txq
[tc
].count
;
3403 if (unlikely(!qcount
)) {
3404 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3405 sb_dev
->name
, qoffset
, tc
);
3407 qcount
= dev
->real_num_tx_queues
;
3411 if (skb_rx_queue_recorded(skb
)) {
3412 DEBUG_NET_WARN_ON_ONCE(qcount
== 0);
3413 hash
= skb_get_rx_queue(skb
);
3414 if (hash
>= qoffset
)
3416 while (unlikely(hash
>= qcount
))
3418 return hash
+ qoffset
;
3421 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
3424 void skb_warn_bad_offload(const struct sk_buff
*skb
)
3426 static const netdev_features_t null_features
;
3427 struct net_device
*dev
= skb
->dev
;
3428 const char *name
= "";
3430 if (!net_ratelimit())
3434 if (dev
->dev
.parent
)
3435 name
= dev_driver_string(dev
->dev
.parent
);
3437 name
= netdev_name(dev
);
3439 skb_dump(KERN_WARNING
, skb
, false);
3440 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3441 name
, dev
? &dev
->features
: &null_features
,
3442 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
);
3446 * Invalidate hardware checksum when packet is to be mangled, and
3447 * complete checksum manually on outgoing path.
3449 int skb_checksum_help(struct sk_buff
*skb
)
3452 int ret
= 0, offset
;
3454 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
3455 goto out_set_summed
;
3457 if (unlikely(skb_is_gso(skb
))) {
3458 skb_warn_bad_offload(skb
);
3462 if (!skb_frags_readable(skb
)) {
3466 /* Before computing a checksum, we should make sure no frag could
3467 * be modified by an external entity : checksum could be wrong.
3469 if (skb_has_shared_frag(skb
)) {
3470 ret
= __skb_linearize(skb
);
3475 offset
= skb_checksum_start_offset(skb
);
3477 if (unlikely(offset
>= skb_headlen(skb
))) {
3478 DO_ONCE_LITE(skb_dump
, KERN_ERR
, skb
, false);
3479 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
3480 offset
, skb_headlen(skb
));
3483 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
3485 offset
+= skb
->csum_offset
;
3486 if (unlikely(offset
+ sizeof(__sum16
) > skb_headlen(skb
))) {
3487 DO_ONCE_LITE(skb_dump
, KERN_ERR
, skb
, false);
3488 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
3489 offset
+ sizeof(__sum16
), skb_headlen(skb
));
3492 ret
= skb_ensure_writable(skb
, offset
+ sizeof(__sum16
));
3496 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
3498 skb
->ip_summed
= CHECKSUM_NONE
;
3502 EXPORT_SYMBOL(skb_checksum_help
);
3504 int skb_crc32c_csum_help(struct sk_buff
*skb
)
3507 int ret
= 0, offset
, start
;
3509 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3512 if (unlikely(skb_is_gso(skb
)))
3515 /* Before computing a checksum, we should make sure no frag could
3516 * be modified by an external entity : checksum could be wrong.
3518 if (unlikely(skb_has_shared_frag(skb
))) {
3519 ret
= __skb_linearize(skb
);
3523 start
= skb_checksum_start_offset(skb
);
3524 offset
= start
+ offsetof(struct sctphdr
, checksum
);
3525 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
3530 ret
= skb_ensure_writable(skb
, offset
+ sizeof(__le32
));
3534 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
3535 skb
->len
- start
, ~(__u32
)0,
3537 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
3538 skb_reset_csum_not_inet(skb
);
3542 EXPORT_SYMBOL(skb_crc32c_csum_help
);
3544 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
3546 __be16 type
= skb
->protocol
;
3548 /* Tunnel gso handlers can set protocol to ethernet. */
3549 if (type
== htons(ETH_P_TEB
)) {
3552 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
3555 eth
= (struct ethhdr
*)skb
->data
;
3556 type
= eth
->h_proto
;
3559 return vlan_get_protocol_and_depth(skb
, type
, depth
);
3563 /* Take action when hardware reception checksum errors are detected. */
3565 static void do_netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3567 netdev_err(dev
, "hw csum failure\n");
3568 skb_dump(KERN_ERR
, skb
, true);
3572 void netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3574 DO_ONCE_LITE(do_netdev_rx_csum_fault
, dev
, skb
);
3576 EXPORT_SYMBOL(netdev_rx_csum_fault
);
3579 /* XXX: check that highmem exists at all on the given machine. */
3580 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
3582 #ifdef CONFIG_HIGHMEM
3585 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
3586 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3587 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3588 struct page
*page
= skb_frag_page(frag
);
3590 if (page
&& PageHighMem(page
))
3598 /* If MPLS offload request, verify we are testing hardware MPLS features
3599 * instead of standard features for the netdev.
3601 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3602 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3603 netdev_features_t features
,
3606 if (eth_p_mpls(type
))
3607 features
&= skb
->dev
->mpls_features
;
3612 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3613 netdev_features_t features
,
3620 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
3621 netdev_features_t features
)
3625 type
= skb_network_protocol(skb
, NULL
);
3626 features
= net_mpls_features(skb
, features
, type
);
3628 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
3629 !can_checksum_protocol(features
, type
)) {
3630 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3632 if (illegal_highdma(skb
->dev
, skb
))
3633 features
&= ~NETIF_F_SG
;
3638 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
3639 struct net_device
*dev
,
3640 netdev_features_t features
)
3644 EXPORT_SYMBOL(passthru_features_check
);
3646 static netdev_features_t
dflt_features_check(struct sk_buff
*skb
,
3647 struct net_device
*dev
,
3648 netdev_features_t features
)
3650 return vlan_features_check(skb
, features
);
3653 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
3654 struct net_device
*dev
,
3655 netdev_features_t features
)
3657 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
3659 if (gso_segs
> READ_ONCE(dev
->gso_max_segs
))
3660 return features
& ~NETIF_F_GSO_MASK
;
3662 if (unlikely(skb
->len
>= netif_get_gso_max_size(dev
, skb
)))
3663 return features
& ~NETIF_F_GSO_MASK
;
3665 if (!skb_shinfo(skb
)->gso_type
) {
3666 skb_warn_bad_offload(skb
);
3667 return features
& ~NETIF_F_GSO_MASK
;
3670 /* Support for GSO partial features requires software
3671 * intervention before we can actually process the packets
3672 * so we need to strip support for any partial features now
3673 * and we can pull them back in after we have partially
3674 * segmented the frame.
3676 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
3677 features
&= ~dev
->gso_partial_features
;
3679 /* Make sure to clear the IPv4 ID mangling feature if the
3680 * IPv4 header has the potential to be fragmented.
3682 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
3683 struct iphdr
*iph
= skb
->encapsulation
?
3684 inner_ip_hdr(skb
) : ip_hdr(skb
);
3686 if (!(iph
->frag_off
& htons(IP_DF
)))
3687 features
&= ~NETIF_F_TSO_MANGLEID
;
3693 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
3695 struct net_device
*dev
= skb
->dev
;
3696 netdev_features_t features
= dev
->features
;
3698 if (skb_is_gso(skb
))
3699 features
= gso_features_check(skb
, dev
, features
);
3701 /* If encapsulation offload request, verify we are testing
3702 * hardware encapsulation features instead of standard
3703 * features for the netdev
3705 if (skb
->encapsulation
)
3706 features
&= dev
->hw_enc_features
;
3708 if (skb_vlan_tagged(skb
))
3709 features
= netdev_intersect_features(features
,
3710 dev
->vlan_features
|
3711 NETIF_F_HW_VLAN_CTAG_TX
|
3712 NETIF_F_HW_VLAN_STAG_TX
);
3714 if (dev
->netdev_ops
->ndo_features_check
)
3715 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3718 features
&= dflt_features_check(skb
, dev
, features
);
3720 return harmonize_features(skb
, features
);
3722 EXPORT_SYMBOL(netif_skb_features
);
3724 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3725 struct netdev_queue
*txq
, bool more
)
3730 if (dev_nit_active(dev
))
3731 dev_queue_xmit_nit(skb
, dev
);
3734 trace_net_dev_start_xmit(skb
, dev
);
3735 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3736 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3741 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3742 struct netdev_queue
*txq
, int *ret
)
3744 struct sk_buff
*skb
= first
;
3745 int rc
= NETDEV_TX_OK
;
3748 struct sk_buff
*next
= skb
->next
;
3750 skb_mark_not_on_list(skb
);
3751 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3752 if (unlikely(!dev_xmit_complete(rc
))) {
3758 if (netif_tx_queue_stopped(txq
) && skb
) {
3759 rc
= NETDEV_TX_BUSY
;
3769 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3770 netdev_features_t features
)
3772 if (skb_vlan_tag_present(skb
) &&
3773 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3774 skb
= __vlan_hwaccel_push_inside(skb
);
3778 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3779 const netdev_features_t features
)
3781 if (unlikely(skb_csum_is_sctp(skb
)))
3782 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3783 skb_crc32c_csum_help(skb
);
3785 if (features
& NETIF_F_HW_CSUM
)
3788 if (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
3789 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
) &&
3790 skb_network_header_len(skb
) != sizeof(struct ipv6hdr
) &&
3791 !ipv6_has_hopopt_jumbo(skb
))
3794 switch (skb
->csum_offset
) {
3795 case offsetof(struct tcphdr
, check
):
3796 case offsetof(struct udphdr
, check
):
3802 return skb_checksum_help(skb
);
3804 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3806 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3808 netdev_features_t features
;
3810 features
= netif_skb_features(skb
);
3811 skb
= validate_xmit_vlan(skb
, features
);
3815 skb
= sk_validate_xmit_skb(skb
, dev
);
3819 if (netif_needs_gso(skb
, features
)) {
3820 struct sk_buff
*segs
;
3822 segs
= skb_gso_segment(skb
, features
);
3830 if (skb_needs_linearize(skb
, features
) &&
3831 __skb_linearize(skb
))
3834 /* If packet is not checksummed and device does not
3835 * support checksumming for this protocol, complete
3836 * checksumming here.
3838 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3839 if (skb
->encapsulation
)
3840 skb_set_inner_transport_header(skb
,
3841 skb_checksum_start_offset(skb
));
3843 skb_set_transport_header(skb
,
3844 skb_checksum_start_offset(skb
));
3845 if (skb_csum_hwoffload_help(skb
, features
))
3850 skb
= validate_xmit_xfrm(skb
, features
, again
);
3857 dev_core_stats_tx_dropped_inc(dev
);
3861 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3863 struct sk_buff
*next
, *head
= NULL
, *tail
;
3865 for (; skb
!= NULL
; skb
= next
) {
3867 skb_mark_not_on_list(skb
);
3869 /* in case skb won't be segmented, point to itself */
3872 skb
= validate_xmit_skb(skb
, dev
, again
);
3880 /* If skb was segmented, skb->prev points to
3881 * the last segment. If not, it still contains skb.
3887 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3889 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3891 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3893 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3895 /* To get more precise estimation of bytes sent on wire,
3896 * we add to pkt_len the headers size of all segments
3898 if (shinfo
->gso_size
&& skb_transport_header_was_set(skb
)) {
3899 u16 gso_segs
= shinfo
->gso_segs
;
3900 unsigned int hdr_len
;
3902 /* mac layer + network layer */
3903 hdr_len
= skb_transport_offset(skb
);
3905 /* + transport layer */
3906 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3907 const struct tcphdr
*th
;
3908 struct tcphdr _tcphdr
;
3910 th
= skb_header_pointer(skb
, hdr_len
,
3911 sizeof(_tcphdr
), &_tcphdr
);
3913 hdr_len
+= __tcp_hdrlen(th
);
3914 } else if (shinfo
->gso_type
& SKB_GSO_UDP_L4
) {
3915 struct udphdr _udphdr
;
3917 if (skb_header_pointer(skb
, hdr_len
,
3918 sizeof(_udphdr
), &_udphdr
))
3919 hdr_len
+= sizeof(struct udphdr
);
3922 if (unlikely(shinfo
->gso_type
& SKB_GSO_DODGY
)) {
3923 int payload
= skb
->len
- hdr_len
;
3925 /* Malicious packet. */
3928 gso_segs
= DIV_ROUND_UP(payload
, shinfo
->gso_size
);
3930 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3934 static int dev_qdisc_enqueue(struct sk_buff
*skb
, struct Qdisc
*q
,
3935 struct sk_buff
**to_free
,
3936 struct netdev_queue
*txq
)
3940 rc
= q
->enqueue(skb
, q
, to_free
) & NET_XMIT_MASK
;
3941 if (rc
== NET_XMIT_SUCCESS
)
3942 trace_qdisc_enqueue(q
, txq
, skb
);
3946 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3947 struct net_device
*dev
,
3948 struct netdev_queue
*txq
)
3950 spinlock_t
*root_lock
= qdisc_lock(q
);
3951 struct sk_buff
*to_free
= NULL
;
3955 qdisc_calculate_pkt_len(skb
, q
);
3957 tcf_set_drop_reason(skb
, SKB_DROP_REASON_QDISC_DROP
);
3959 if (q
->flags
& TCQ_F_NOLOCK
) {
3960 if (q
->flags
& TCQ_F_CAN_BYPASS
&& nolock_qdisc_is_empty(q
) &&
3961 qdisc_run_begin(q
)) {
3962 /* Retest nolock_qdisc_is_empty() within the protection
3963 * of q->seqlock to protect from racing with requeuing.
3965 if (unlikely(!nolock_qdisc_is_empty(q
))) {
3966 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3973 qdisc_bstats_cpu_update(q
, skb
);
3974 if (sch_direct_xmit(skb
, q
, dev
, txq
, NULL
, true) &&
3975 !nolock_qdisc_is_empty(q
))
3979 return NET_XMIT_SUCCESS
;
3982 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3986 if (unlikely(to_free
))
3987 kfree_skb_list_reason(to_free
,
3988 tcf_get_drop_reason(to_free
));
3992 if (unlikely(READ_ONCE(q
->owner
) == smp_processor_id())) {
3993 kfree_skb_reason(skb
, SKB_DROP_REASON_TC_RECLASSIFY_LOOP
);
3994 return NET_XMIT_DROP
;
3997 * Heuristic to force contended enqueues to serialize on a
3998 * separate lock before trying to get qdisc main lock.
3999 * This permits qdisc->running owner to get the lock more
4000 * often and dequeue packets faster.
4001 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
4002 * and then other tasks will only enqueue packets. The packets will be
4003 * sent after the qdisc owner is scheduled again. To prevent this
4004 * scenario the task always serialize on the lock.
4006 contended
= qdisc_is_running(q
) || IS_ENABLED(CONFIG_PREEMPT_RT
);
4007 if (unlikely(contended
))
4008 spin_lock(&q
->busylock
);
4010 spin_lock(root_lock
);
4011 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
4012 __qdisc_drop(skb
, &to_free
);
4014 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
4015 qdisc_run_begin(q
)) {
4017 * This is a work-conserving queue; there are no old skbs
4018 * waiting to be sent out; and the qdisc is not running -
4019 * xmit the skb directly.
4022 qdisc_bstats_update(q
, skb
);
4024 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
4025 if (unlikely(contended
)) {
4026 spin_unlock(&q
->busylock
);
4033 rc
= NET_XMIT_SUCCESS
;
4035 WRITE_ONCE(q
->owner
, smp_processor_id());
4036 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
4037 WRITE_ONCE(q
->owner
, -1);
4038 if (qdisc_run_begin(q
)) {
4039 if (unlikely(contended
)) {
4040 spin_unlock(&q
->busylock
);
4047 spin_unlock(root_lock
);
4048 if (unlikely(to_free
))
4049 kfree_skb_list_reason(to_free
,
4050 tcf_get_drop_reason(to_free
));
4051 if (unlikely(contended
))
4052 spin_unlock(&q
->busylock
);
4056 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
4057 static void skb_update_prio(struct sk_buff
*skb
)
4059 const struct netprio_map
*map
;
4060 const struct sock
*sk
;
4061 unsigned int prioidx
;
4065 map
= rcu_dereference_bh(skb
->dev
->priomap
);
4068 sk
= skb_to_full_sk(skb
);
4072 prioidx
= sock_cgroup_prioidx(&sk
->sk_cgrp_data
);
4074 if (prioidx
< map
->priomap_len
)
4075 skb
->priority
= map
->priomap
[prioidx
];
4078 #define skb_update_prio(skb)
4082 * dev_loopback_xmit - loop back @skb
4083 * @net: network namespace this loopback is happening in
4084 * @sk: sk needed to be a netfilter okfn
4085 * @skb: buffer to transmit
4087 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
4089 skb_reset_mac_header(skb
);
4090 __skb_pull(skb
, skb_network_offset(skb
));
4091 skb
->pkt_type
= PACKET_LOOPBACK
;
4092 if (skb
->ip_summed
== CHECKSUM_NONE
)
4093 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4094 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb
));
4099 EXPORT_SYMBOL(dev_loopback_xmit
);
4101 #ifdef CONFIG_NET_EGRESS
4102 static struct netdev_queue
*
4103 netdev_tx_queue_mapping(struct net_device
*dev
, struct sk_buff
*skb
)
4105 int qm
= skb_get_queue_mapping(skb
);
4107 return netdev_get_tx_queue(dev
, netdev_cap_txqueue(dev
, qm
));
4110 #ifndef CONFIG_PREEMPT_RT
4111 static bool netdev_xmit_txqueue_skipped(void)
4113 return __this_cpu_read(softnet_data
.xmit
.skip_txqueue
);
4116 void netdev_xmit_skip_txqueue(bool skip
)
4118 __this_cpu_write(softnet_data
.xmit
.skip_txqueue
, skip
);
4120 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue
);
4123 static bool netdev_xmit_txqueue_skipped(void)
4125 return current
->net_xmit
.skip_txqueue
;
4128 void netdev_xmit_skip_txqueue(bool skip
)
4130 current
->net_xmit
.skip_txqueue
= skip
;
4132 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue
);
4134 #endif /* CONFIG_NET_EGRESS */
4136 #ifdef CONFIG_NET_XGRESS
4137 static int tc_run(struct tcx_entry
*entry
, struct sk_buff
*skb
,
4138 enum skb_drop_reason
*drop_reason
)
4140 int ret
= TC_ACT_UNSPEC
;
4141 #ifdef CONFIG_NET_CLS_ACT
4142 struct mini_Qdisc
*miniq
= rcu_dereference_bh(entry
->miniq
);
4143 struct tcf_result res
;
4149 if (!static_branch_likely(&tcf_sw_enabled_key
))
4152 /* Block-wise bypass */
4153 if (tcf_block_bypass_sw(miniq
->block
))
4156 tc_skb_cb(skb
)->mru
= 0;
4157 tc_skb_cb(skb
)->post_ct
= false;
4158 tcf_set_drop_reason(skb
, *drop_reason
);
4160 mini_qdisc_bstats_cpu_update(miniq
, skb
);
4161 ret
= tcf_classify(skb
, miniq
->block
, miniq
->filter_list
, &res
, false);
4162 /* Only tcf related quirks below. */
4165 *drop_reason
= tcf_get_drop_reason(skb
);
4166 mini_qdisc_qstats_cpu_drop(miniq
);
4169 case TC_ACT_RECLASSIFY
:
4170 skb
->tc_index
= TC_H_MIN(res
.classid
);
4173 #endif /* CONFIG_NET_CLS_ACT */
4177 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key
);
4181 static_branch_inc(&tcx_needed_key
);
4186 static_branch_dec(&tcx_needed_key
);
4189 static __always_inline
enum tcx_action_base
4190 tcx_run(const struct bpf_mprog_entry
*entry
, struct sk_buff
*skb
,
4191 const bool needs_mac
)
4193 const struct bpf_mprog_fp
*fp
;
4194 const struct bpf_prog
*prog
;
4198 __skb_push(skb
, skb
->mac_len
);
4199 bpf_mprog_foreach_prog(entry
, fp
, prog
) {
4200 bpf_compute_data_pointers(skb
);
4201 ret
= bpf_prog_run(prog
, skb
);
4202 if (ret
!= TCX_NEXT
)
4206 __skb_pull(skb
, skb
->mac_len
);
4207 return tcx_action_code(skb
, ret
);
4210 static __always_inline
struct sk_buff
*
4211 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4212 struct net_device
*orig_dev
, bool *another
)
4214 struct bpf_mprog_entry
*entry
= rcu_dereference_bh(skb
->dev
->tcx_ingress
);
4215 enum skb_drop_reason drop_reason
= SKB_DROP_REASON_TC_INGRESS
;
4216 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
4222 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
4224 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4228 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4229 tcx_set_ingress(skb
, true);
4231 if (static_branch_unlikely(&tcx_needed_key
)) {
4232 sch_ret
= tcx_run(entry
, skb
, true);
4233 if (sch_ret
!= TC_ACT_UNSPEC
)
4234 goto ingress_verdict
;
4236 sch_ret
= tc_run(tcx_entry(entry
), skb
, &drop_reason
);
4239 case TC_ACT_REDIRECT
:
4240 /* skb_mac_header check was done by BPF, so we can safely
4241 * push the L2 header back before redirecting to another
4244 __skb_push(skb
, skb
->mac_len
);
4245 if (skb_do_redirect(skb
) == -EAGAIN
) {
4246 __skb_pull(skb
, skb
->mac_len
);
4250 *ret
= NET_RX_SUCCESS
;
4251 bpf_net_ctx_clear(bpf_net_ctx
);
4254 kfree_skb_reason(skb
, drop_reason
);
4256 bpf_net_ctx_clear(bpf_net_ctx
);
4258 /* used by tc_run */
4264 case TC_ACT_CONSUMED
:
4265 *ret
= NET_RX_SUCCESS
;
4266 bpf_net_ctx_clear(bpf_net_ctx
);
4269 bpf_net_ctx_clear(bpf_net_ctx
);
4274 static __always_inline
struct sk_buff
*
4275 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
4277 struct bpf_mprog_entry
*entry
= rcu_dereference_bh(dev
->tcx_egress
);
4278 enum skb_drop_reason drop_reason
= SKB_DROP_REASON_TC_EGRESS
;
4279 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
4285 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
4287 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4288 * already set by the caller.
4290 if (static_branch_unlikely(&tcx_needed_key
)) {
4291 sch_ret
= tcx_run(entry
, skb
, false);
4292 if (sch_ret
!= TC_ACT_UNSPEC
)
4293 goto egress_verdict
;
4295 sch_ret
= tc_run(tcx_entry(entry
), skb
, &drop_reason
);
4298 case TC_ACT_REDIRECT
:
4299 /* No need to push/pop skb's mac_header here on egress! */
4300 skb_do_redirect(skb
);
4301 *ret
= NET_XMIT_SUCCESS
;
4302 bpf_net_ctx_clear(bpf_net_ctx
);
4305 kfree_skb_reason(skb
, drop_reason
);
4306 *ret
= NET_XMIT_DROP
;
4307 bpf_net_ctx_clear(bpf_net_ctx
);
4309 /* used by tc_run */
4315 case TC_ACT_CONSUMED
:
4316 *ret
= NET_XMIT_SUCCESS
;
4317 bpf_net_ctx_clear(bpf_net_ctx
);
4320 bpf_net_ctx_clear(bpf_net_ctx
);
4325 static __always_inline
struct sk_buff
*
4326 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4327 struct net_device
*orig_dev
, bool *another
)
4332 static __always_inline
struct sk_buff
*
4333 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
4337 #endif /* CONFIG_NET_XGRESS */
4340 static int __get_xps_queue_idx(struct net_device
*dev
, struct sk_buff
*skb
,
4341 struct xps_dev_maps
*dev_maps
, unsigned int tci
)
4343 int tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
4344 struct xps_map
*map
;
4345 int queue_index
= -1;
4347 if (tc
>= dev_maps
->num_tc
|| tci
>= dev_maps
->nr_ids
)
4350 tci
*= dev_maps
->num_tc
;
4353 map
= rcu_dereference(dev_maps
->attr_map
[tci
]);
4356 queue_index
= map
->queues
[0];
4358 queue_index
= map
->queues
[reciprocal_scale(
4359 skb_get_hash(skb
), map
->len
)];
4360 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
4367 static int get_xps_queue(struct net_device
*dev
, struct net_device
*sb_dev
,
4368 struct sk_buff
*skb
)
4371 struct xps_dev_maps
*dev_maps
;
4372 struct sock
*sk
= skb
->sk
;
4373 int queue_index
= -1;
4375 if (!static_key_false(&xps_needed
))
4379 if (!static_key_false(&xps_rxqs_needed
))
4382 dev_maps
= rcu_dereference(sb_dev
->xps_maps
[XPS_RXQS
]);
4384 int tci
= sk_rx_queue_get(sk
);
4387 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
4392 if (queue_index
< 0) {
4393 dev_maps
= rcu_dereference(sb_dev
->xps_maps
[XPS_CPUS
]);
4395 unsigned int tci
= skb
->sender_cpu
- 1;
4397 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
4409 u16
dev_pick_tx_zero(struct net_device
*dev
, struct sk_buff
*skb
,
4410 struct net_device
*sb_dev
)
4414 EXPORT_SYMBOL(dev_pick_tx_zero
);
4416 u16
netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
,
4417 struct net_device
*sb_dev
)
4419 struct sock
*sk
= skb
->sk
;
4420 int queue_index
= sk_tx_queue_get(sk
);
4422 sb_dev
= sb_dev
? : dev
;
4424 if (queue_index
< 0 || skb
->ooo_okay
||
4425 queue_index
>= dev
->real_num_tx_queues
) {
4426 int new_index
= get_xps_queue(dev
, sb_dev
, skb
);
4429 new_index
= skb_tx_hash(dev
, sb_dev
, skb
);
4431 if (queue_index
!= new_index
&& sk
&&
4433 rcu_access_pointer(sk
->sk_dst_cache
))
4434 sk_tx_queue_set(sk
, new_index
);
4436 queue_index
= new_index
;
4441 EXPORT_SYMBOL(netdev_pick_tx
);
4443 struct netdev_queue
*netdev_core_pick_tx(struct net_device
*dev
,
4444 struct sk_buff
*skb
,
4445 struct net_device
*sb_dev
)
4447 int queue_index
= 0;
4450 u32 sender_cpu
= skb
->sender_cpu
- 1;
4452 if (sender_cpu
>= (u32
)NR_CPUS
)
4453 skb
->sender_cpu
= raw_smp_processor_id() + 1;
4456 if (dev
->real_num_tx_queues
!= 1) {
4457 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4459 if (ops
->ndo_select_queue
)
4460 queue_index
= ops
->ndo_select_queue(dev
, skb
, sb_dev
);
4462 queue_index
= netdev_pick_tx(dev
, skb
, sb_dev
);
4464 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
4467 skb_set_queue_mapping(skb
, queue_index
);
4468 return netdev_get_tx_queue(dev
, queue_index
);
4472 * __dev_queue_xmit() - transmit a buffer
4473 * @skb: buffer to transmit
4474 * @sb_dev: suboordinate device used for L2 forwarding offload
4476 * Queue a buffer for transmission to a network device. The caller must
4477 * have set the device and priority and built the buffer before calling
4478 * this function. The function can be called from an interrupt.
4480 * When calling this method, interrupts MUST be enabled. This is because
4481 * the BH enable code must have IRQs enabled so that it will not deadlock.
4483 * Regardless of the return value, the skb is consumed, so it is currently
4484 * difficult to retry a send to this method. (You can bump the ref count
4485 * before sending to hold a reference for retry if you are careful.)
4488 * * 0 - buffer successfully transmitted
4489 * * positive qdisc return code - NET_XMIT_DROP etc.
4490 * * negative errno - other errors
4492 int __dev_queue_xmit(struct sk_buff
*skb
, struct net_device
*sb_dev
)
4494 struct net_device
*dev
= skb
->dev
;
4495 struct netdev_queue
*txq
= NULL
;
4500 skb_reset_mac_header(skb
);
4501 skb_assert_len(skb
);
4503 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
4504 __skb_tstamp_tx(skb
, NULL
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
4506 /* Disable soft irqs for various locks below. Also
4507 * stops preemption for RCU.
4511 skb_update_prio(skb
);
4513 qdisc_pkt_len_init(skb
);
4514 tcx_set_ingress(skb
, false);
4515 #ifdef CONFIG_NET_EGRESS
4516 if (static_branch_unlikely(&egress_needed_key
)) {
4517 if (nf_hook_egress_active()) {
4518 skb
= nf_hook_egress(skb
, &rc
, dev
);
4523 netdev_xmit_skip_txqueue(false);
4525 nf_skip_egress(skb
, true);
4526 skb
= sch_handle_egress(skb
, &rc
, dev
);
4529 nf_skip_egress(skb
, false);
4531 if (netdev_xmit_txqueue_skipped())
4532 txq
= netdev_tx_queue_mapping(dev
, skb
);
4535 /* If device/qdisc don't need skb->dst, release it right now while
4536 * its hot in this cpu cache.
4538 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
4544 txq
= netdev_core_pick_tx(dev
, skb
, sb_dev
);
4546 q
= rcu_dereference_bh(txq
->qdisc
);
4548 trace_net_dev_queue(skb
);
4550 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
4554 /* The device has no queue. Common case for software devices:
4555 * loopback, all the sorts of tunnels...
4557 * Really, it is unlikely that netif_tx_lock protection is necessary
4558 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4560 * However, it is possible, that they rely on protection
4563 * Check this and shot the lock. It is not prone from deadlocks.
4564 *Either shot noqueue qdisc, it is even simpler 8)
4566 if (dev
->flags
& IFF_UP
) {
4567 int cpu
= smp_processor_id(); /* ok because BHs are off */
4569 /* Other cpus might concurrently change txq->xmit_lock_owner
4570 * to -1 or to their cpu id, but not to our id.
4572 if (READ_ONCE(txq
->xmit_lock_owner
) != cpu
) {
4573 if (dev_xmit_recursion())
4574 goto recursion_alert
;
4576 skb
= validate_xmit_skb(skb
, dev
, &again
);
4580 HARD_TX_LOCK(dev
, txq
, cpu
);
4582 if (!netif_xmit_stopped(txq
)) {
4583 dev_xmit_recursion_inc();
4584 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
4585 dev_xmit_recursion_dec();
4586 if (dev_xmit_complete(rc
)) {
4587 HARD_TX_UNLOCK(dev
, txq
);
4591 HARD_TX_UNLOCK(dev
, txq
);
4592 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4595 /* Recursion is detected! It is possible,
4599 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4605 rcu_read_unlock_bh();
4607 dev_core_stats_tx_dropped_inc(dev
);
4608 kfree_skb_list(skb
);
4611 rcu_read_unlock_bh();
4614 EXPORT_SYMBOL(__dev_queue_xmit
);
4616 int __dev_direct_xmit(struct sk_buff
*skb
, u16 queue_id
)
4618 struct net_device
*dev
= skb
->dev
;
4619 struct sk_buff
*orig_skb
= skb
;
4620 struct netdev_queue
*txq
;
4621 int ret
= NETDEV_TX_BUSY
;
4624 if (unlikely(!netif_running(dev
) ||
4625 !netif_carrier_ok(dev
)))
4628 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
4629 if (skb
!= orig_skb
)
4632 skb_set_queue_mapping(skb
, queue_id
);
4633 txq
= skb_get_tx_queue(dev
, skb
);
4637 dev_xmit_recursion_inc();
4638 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
4639 if (!netif_xmit_frozen_or_drv_stopped(txq
))
4640 ret
= netdev_start_xmit(skb
, dev
, txq
, false);
4641 HARD_TX_UNLOCK(dev
, txq
);
4642 dev_xmit_recursion_dec();
4647 dev_core_stats_tx_dropped_inc(dev
);
4648 kfree_skb_list(skb
);
4649 return NET_XMIT_DROP
;
4651 EXPORT_SYMBOL(__dev_direct_xmit
);
4653 /*************************************************************************
4655 *************************************************************************/
4656 static DEFINE_PER_CPU(struct task_struct
*, backlog_napi
);
4658 int weight_p __read_mostly
= 64; /* old backlog weight */
4659 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
4660 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
4662 /* Called with irq disabled */
4663 static inline void ____napi_schedule(struct softnet_data
*sd
,
4664 struct napi_struct
*napi
)
4666 struct task_struct
*thread
;
4668 lockdep_assert_irqs_disabled();
4670 if (test_bit(NAPI_STATE_THREADED
, &napi
->state
)) {
4671 /* Paired with smp_mb__before_atomic() in
4672 * napi_enable()/dev_set_threaded().
4673 * Use READ_ONCE() to guarantee a complete
4674 * read on napi->thread. Only call
4675 * wake_up_process() when it's not NULL.
4677 thread
= READ_ONCE(napi
->thread
);
4679 if (use_backlog_threads() && thread
== raw_cpu_read(backlog_napi
))
4680 goto use_local_napi
;
4682 set_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
);
4683 wake_up_process(thread
);
4689 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
4690 WRITE_ONCE(napi
->list_owner
, smp_processor_id());
4691 /* If not called from net_rx_action()
4692 * we have to raise NET_RX_SOFTIRQ.
4694 if (!sd
->in_net_rx_action
)
4695 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4700 struct static_key_false rps_needed __read_mostly
;
4701 EXPORT_SYMBOL(rps_needed
);
4702 struct static_key_false rfs_needed __read_mostly
;
4703 EXPORT_SYMBOL(rfs_needed
);
4705 static struct rps_dev_flow
*
4706 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4707 struct rps_dev_flow
*rflow
, u16 next_cpu
)
4709 if (next_cpu
< nr_cpu_ids
) {
4711 #ifdef CONFIG_RFS_ACCEL
4712 struct netdev_rx_queue
*rxqueue
;
4713 struct rps_dev_flow_table
*flow_table
;
4714 struct rps_dev_flow
*old_rflow
;
4719 /* Should we steer this flow to a different hardware queue? */
4720 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
4721 !(dev
->features
& NETIF_F_NTUPLE
))
4723 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
4724 if (rxq_index
== skb_get_rx_queue(skb
))
4727 rxqueue
= dev
->_rx
+ rxq_index
;
4728 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4731 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
4732 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
4733 rxq_index
, flow_id
);
4737 rflow
= &flow_table
->flows
[flow_id
];
4738 WRITE_ONCE(rflow
->filter
, rc
);
4739 if (old_rflow
->filter
== rc
)
4740 WRITE_ONCE(old_rflow
->filter
, RPS_NO_FILTER
);
4743 head
= READ_ONCE(per_cpu(softnet_data
, next_cpu
).input_queue_head
);
4744 rps_input_queue_tail_save(&rflow
->last_qtail
, head
);
4747 WRITE_ONCE(rflow
->cpu
, next_cpu
);
4752 * get_rps_cpu is called from netif_receive_skb and returns the target
4753 * CPU from the RPS map of the receiving queue for a given skb.
4754 * rcu_read_lock must be held on entry.
4756 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4757 struct rps_dev_flow
**rflowp
)
4759 const struct rps_sock_flow_table
*sock_flow_table
;
4760 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
4761 struct rps_dev_flow_table
*flow_table
;
4762 struct rps_map
*map
;
4767 if (skb_rx_queue_recorded(skb
)) {
4768 u16 index
= skb_get_rx_queue(skb
);
4770 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4771 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4772 "%s received packet on queue %u, but number "
4773 "of RX queues is %u\n",
4774 dev
->name
, index
, dev
->real_num_rx_queues
);
4780 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4782 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4783 map
= rcu_dereference(rxqueue
->rps_map
);
4784 if (!flow_table
&& !map
)
4787 skb_reset_network_header(skb
);
4788 hash
= skb_get_hash(skb
);
4792 sock_flow_table
= rcu_dereference(net_hotdata
.rps_sock_flow_table
);
4793 if (flow_table
&& sock_flow_table
) {
4794 struct rps_dev_flow
*rflow
;
4798 /* First check into global flow table if there is a match.
4799 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4801 ident
= READ_ONCE(sock_flow_table
->ents
[hash
& sock_flow_table
->mask
]);
4802 if ((ident
^ hash
) & ~net_hotdata
.rps_cpu_mask
)
4805 next_cpu
= ident
& net_hotdata
.rps_cpu_mask
;
4807 /* OK, now we know there is a match,
4808 * we can look at the local (per receive queue) flow table
4810 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
4814 * If the desired CPU (where last recvmsg was done) is
4815 * different from current CPU (one in the rx-queue flow
4816 * table entry), switch if one of the following holds:
4817 * - Current CPU is unset (>= nr_cpu_ids).
4818 * - Current CPU is offline.
4819 * - The current CPU's queue tail has advanced beyond the
4820 * last packet that was enqueued using this table entry.
4821 * This guarantees that all previous packets for the flow
4822 * have been dequeued, thus preserving in order delivery.
4824 if (unlikely(tcpu
!= next_cpu
) &&
4825 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
4826 ((int)(READ_ONCE(per_cpu(softnet_data
, tcpu
).input_queue_head
) -
4827 rflow
->last_qtail
)) >= 0)) {
4829 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
4832 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
4842 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
4843 if (cpu_online(tcpu
)) {
4853 #ifdef CONFIG_RFS_ACCEL
4856 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4857 * @dev: Device on which the filter was set
4858 * @rxq_index: RX queue index
4859 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4860 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4862 * Drivers that implement ndo_rx_flow_steer() should periodically call
4863 * this function for each installed filter and remove the filters for
4864 * which it returns %true.
4866 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
4867 u32 flow_id
, u16 filter_id
)
4869 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
4870 struct rps_dev_flow_table
*flow_table
;
4871 struct rps_dev_flow
*rflow
;
4876 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4877 if (flow_table
&& flow_id
<= flow_table
->mask
) {
4878 rflow
= &flow_table
->flows
[flow_id
];
4879 cpu
= READ_ONCE(rflow
->cpu
);
4880 if (READ_ONCE(rflow
->filter
) == filter_id
&& cpu
< nr_cpu_ids
&&
4881 ((int)(READ_ONCE(per_cpu(softnet_data
, cpu
).input_queue_head
) -
4882 READ_ONCE(rflow
->last_qtail
)) <
4883 (int)(10 * flow_table
->mask
)))
4889 EXPORT_SYMBOL(rps_may_expire_flow
);
4891 #endif /* CONFIG_RFS_ACCEL */
4893 /* Called from hardirq (IPI) context */
4894 static void rps_trigger_softirq(void *data
)
4896 struct softnet_data
*sd
= data
;
4898 ____napi_schedule(sd
, &sd
->backlog
);
4902 #endif /* CONFIG_RPS */
4904 /* Called from hardirq (IPI) context */
4905 static void trigger_rx_softirq(void *data
)
4907 struct softnet_data
*sd
= data
;
4909 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4910 smp_store_release(&sd
->defer_ipi_scheduled
, 0);
4914 * After we queued a packet into sd->input_pkt_queue,
4915 * we need to make sure this queue is serviced soon.
4917 * - If this is another cpu queue, link it to our rps_ipi_list,
4918 * and make sure we will process rps_ipi_list from net_rx_action().
4920 * - If this is our own queue, NAPI schedule our backlog.
4921 * Note that this also raises NET_RX_SOFTIRQ.
4923 static void napi_schedule_rps(struct softnet_data
*sd
)
4925 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
4929 if (use_backlog_threads()) {
4930 __napi_schedule_irqoff(&sd
->backlog
);
4934 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
4935 mysd
->rps_ipi_list
= sd
;
4937 /* If not called from net_rx_action() or napi_threaded_poll()
4938 * we have to raise NET_RX_SOFTIRQ.
4940 if (!mysd
->in_net_rx_action
&& !mysd
->in_napi_threaded_poll
)
4941 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4944 #endif /* CONFIG_RPS */
4945 __napi_schedule_irqoff(&mysd
->backlog
);
4948 void kick_defer_list_purge(struct softnet_data
*sd
, unsigned int cpu
)
4950 unsigned long flags
;
4952 if (use_backlog_threads()) {
4953 backlog_lock_irq_save(sd
, &flags
);
4955 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
))
4956 __napi_schedule_irqoff(&sd
->backlog
);
4958 backlog_unlock_irq_restore(sd
, &flags
);
4960 } else if (!cmpxchg(&sd
->defer_ipi_scheduled
, 0, 1)) {
4961 smp_call_function_single_async(cpu
, &sd
->defer_csd
);
4965 #ifdef CONFIG_NET_FLOW_LIMIT
4966 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
4969 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
4971 #ifdef CONFIG_NET_FLOW_LIMIT
4972 struct sd_flow_limit
*fl
;
4973 struct softnet_data
*sd
;
4974 unsigned int old_flow
, new_flow
;
4976 if (qlen
< (READ_ONCE(net_hotdata
.max_backlog
) >> 1))
4979 sd
= this_cpu_ptr(&softnet_data
);
4982 fl
= rcu_dereference(sd
->flow_limit
);
4984 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
4985 old_flow
= fl
->history
[fl
->history_head
];
4986 fl
->history
[fl
->history_head
] = new_flow
;
4989 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
4991 if (likely(fl
->buckets
[old_flow
]))
4992 fl
->buckets
[old_flow
]--;
4994 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
5006 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
5007 * queue (may be a remote CPU queue).
5009 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
5010 unsigned int *qtail
)
5012 enum skb_drop_reason reason
;
5013 struct softnet_data
*sd
;
5014 unsigned long flags
;
5019 reason
= SKB_DROP_REASON_DEV_READY
;
5020 if (!netif_running(skb
->dev
))
5023 reason
= SKB_DROP_REASON_CPU_BACKLOG
;
5024 sd
= &per_cpu(softnet_data
, cpu
);
5026 qlen
= skb_queue_len_lockless(&sd
->input_pkt_queue
);
5027 max_backlog
= READ_ONCE(net_hotdata
.max_backlog
);
5028 if (unlikely(qlen
> max_backlog
))
5029 goto cpu_backlog_drop
;
5030 backlog_lock_irq_save(sd
, &flags
);
5031 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
5032 if (qlen
<= max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
5034 /* Schedule NAPI for backlog device. We can use
5035 * non atomic operation as we own the queue lock.
5037 if (!__test_and_set_bit(NAPI_STATE_SCHED
,
5038 &sd
->backlog
.state
))
5039 napi_schedule_rps(sd
);
5041 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
5042 tail
= rps_input_queue_tail_incr(sd
);
5043 backlog_unlock_irq_restore(sd
, &flags
);
5045 /* save the tail outside of the critical section */
5046 rps_input_queue_tail_save(qtail
, tail
);
5047 return NET_RX_SUCCESS
;
5050 backlog_unlock_irq_restore(sd
, &flags
);
5053 atomic_inc(&sd
->dropped
);
5055 dev_core_stats_rx_dropped_inc(skb
->dev
);
5056 kfree_skb_reason(skb
, reason
);
5060 static struct netdev_rx_queue
*netif_get_rxqueue(struct sk_buff
*skb
)
5062 struct net_device
*dev
= skb
->dev
;
5063 struct netdev_rx_queue
*rxqueue
;
5067 if (skb_rx_queue_recorded(skb
)) {
5068 u16 index
= skb_get_rx_queue(skb
);
5070 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
5071 WARN_ONCE(dev
->real_num_rx_queues
> 1,
5072 "%s received packet on queue %u, but number "
5073 "of RX queues is %u\n",
5074 dev
->name
, index
, dev
->real_num_rx_queues
);
5076 return rxqueue
; /* Return first rxqueue */
5083 u32
bpf_prog_run_generic_xdp(struct sk_buff
*skb
, struct xdp_buff
*xdp
,
5084 const struct bpf_prog
*xdp_prog
)
5086 void *orig_data
, *orig_data_end
, *hard_start
;
5087 struct netdev_rx_queue
*rxqueue
;
5088 bool orig_bcast
, orig_host
;
5089 u32 mac_len
, frame_sz
;
5090 __be16 orig_eth_type
;
5095 /* The XDP program wants to see the packet starting at the MAC
5098 mac_len
= skb
->data
- skb_mac_header(skb
);
5099 hard_start
= skb
->data
- skb_headroom(skb
);
5101 /* SKB "head" area always have tailroom for skb_shared_info */
5102 frame_sz
= (void *)skb_end_pointer(skb
) - hard_start
;
5103 frame_sz
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
5105 rxqueue
= netif_get_rxqueue(skb
);
5106 xdp_init_buff(xdp
, frame_sz
, &rxqueue
->xdp_rxq
);
5107 xdp_prepare_buff(xdp
, hard_start
, skb_headroom(skb
) - mac_len
,
5108 skb_headlen(skb
) + mac_len
, true);
5109 if (skb_is_nonlinear(skb
)) {
5110 skb_shinfo(skb
)->xdp_frags_size
= skb
->data_len
;
5111 xdp_buff_set_frags_flag(xdp
);
5113 xdp_buff_clear_frags_flag(xdp
);
5116 orig_data_end
= xdp
->data_end
;
5117 orig_data
= xdp
->data
;
5118 eth
= (struct ethhdr
*)xdp
->data
;
5119 orig_host
= ether_addr_equal_64bits(eth
->h_dest
, skb
->dev
->dev_addr
);
5120 orig_bcast
= is_multicast_ether_addr_64bits(eth
->h_dest
);
5121 orig_eth_type
= eth
->h_proto
;
5123 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
5125 /* check if bpf_xdp_adjust_head was used */
5126 off
= xdp
->data
- orig_data
;
5129 __skb_pull(skb
, off
);
5131 __skb_push(skb
, -off
);
5133 skb
->mac_header
+= off
;
5134 skb_reset_network_header(skb
);
5137 /* check if bpf_xdp_adjust_tail was used */
5138 off
= xdp
->data_end
- orig_data_end
;
5140 skb_set_tail_pointer(skb
, xdp
->data_end
- xdp
->data
);
5141 skb
->len
+= off
; /* positive on grow, negative on shrink */
5144 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
5145 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
5147 if (xdp_buff_has_frags(xdp
))
5148 skb
->data_len
= skb_shinfo(skb
)->xdp_frags_size
;
5152 /* check if XDP changed eth hdr such SKB needs update */
5153 eth
= (struct ethhdr
*)xdp
->data
;
5154 if ((orig_eth_type
!= eth
->h_proto
) ||
5155 (orig_host
!= ether_addr_equal_64bits(eth
->h_dest
,
5156 skb
->dev
->dev_addr
)) ||
5157 (orig_bcast
!= is_multicast_ether_addr_64bits(eth
->h_dest
))) {
5158 __skb_push(skb
, ETH_HLEN
);
5159 skb
->pkt_type
= PACKET_HOST
;
5160 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5163 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
5164 * before calling us again on redirect path. We do not call do_redirect
5165 * as we leave that up to the caller.
5167 * Caller is responsible for managing lifetime of skb (i.e. calling
5168 * kfree_skb in response to actions it cannot handle/XDP_DROP).
5173 __skb_push(skb
, mac_len
);
5176 metalen
= xdp
->data
- xdp
->data_meta
;
5178 skb_metadata_set(skb
, metalen
);
5186 netif_skb_check_for_xdp(struct sk_buff
**pskb
, const struct bpf_prog
*prog
)
5188 struct sk_buff
*skb
= *pskb
;
5189 int err
, hroom
, troom
;
5191 if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool
), pskb
, prog
))
5194 /* In case we have to go down the path and also linearize,
5195 * then lets do the pskb_expand_head() work just once here.
5197 hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
5198 troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
5199 err
= pskb_expand_head(skb
,
5200 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
5201 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
);
5205 return skb_linearize(skb
);
5208 static u32
netif_receive_generic_xdp(struct sk_buff
**pskb
,
5209 struct xdp_buff
*xdp
,
5210 const struct bpf_prog
*xdp_prog
)
5212 struct sk_buff
*skb
= *pskb
;
5213 u32 mac_len
, act
= XDP_DROP
;
5215 /* Reinjected packets coming from act_mirred or similar should
5216 * not get XDP generic processing.
5218 if (skb_is_redirected(skb
))
5221 /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
5222 * bytes. This is the guarantee that also native XDP provides,
5223 * thus we need to do it here as well.
5225 mac_len
= skb
->data
- skb_mac_header(skb
);
5226 __skb_push(skb
, mac_len
);
5228 if (skb_cloned(skb
) || skb_is_nonlinear(skb
) ||
5229 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
5230 if (netif_skb_check_for_xdp(pskb
, xdp_prog
))
5234 __skb_pull(*pskb
, mac_len
);
5236 act
= bpf_prog_run_generic_xdp(*pskb
, xdp
, xdp_prog
);
5243 bpf_warn_invalid_xdp_action((*pskb
)->dev
, xdp_prog
, act
);
5246 trace_xdp_exception((*pskb
)->dev
, xdp_prog
, act
);
5257 /* When doing generic XDP we have to bypass the qdisc layer and the
5258 * network taps in order to match in-driver-XDP behavior. This also means
5259 * that XDP packets are able to starve other packets going through a qdisc,
5260 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
5261 * queues, so they do not have this starvation issue.
5263 void generic_xdp_tx(struct sk_buff
*skb
, const struct bpf_prog
*xdp_prog
)
5265 struct net_device
*dev
= skb
->dev
;
5266 struct netdev_queue
*txq
;
5267 bool free_skb
= true;
5270 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
5271 cpu
= smp_processor_id();
5272 HARD_TX_LOCK(dev
, txq
, cpu
);
5273 if (!netif_xmit_frozen_or_drv_stopped(txq
)) {
5274 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
5275 if (dev_xmit_complete(rc
))
5278 HARD_TX_UNLOCK(dev
, txq
);
5280 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
5281 dev_core_stats_tx_dropped_inc(dev
);
5286 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key
);
5288 int do_xdp_generic(const struct bpf_prog
*xdp_prog
, struct sk_buff
**pskb
)
5290 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
5293 struct xdp_buff xdp
;
5297 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
5298 act
= netif_receive_generic_xdp(pskb
, &xdp
, xdp_prog
);
5299 if (act
!= XDP_PASS
) {
5302 err
= xdp_do_generic_redirect((*pskb
)->dev
, *pskb
,
5308 generic_xdp_tx(*pskb
, xdp_prog
);
5311 bpf_net_ctx_clear(bpf_net_ctx
);
5314 bpf_net_ctx_clear(bpf_net_ctx
);
5318 bpf_net_ctx_clear(bpf_net_ctx
);
5319 kfree_skb_reason(*pskb
, SKB_DROP_REASON_XDP
);
5322 EXPORT_SYMBOL_GPL(do_xdp_generic
);
5324 static int netif_rx_internal(struct sk_buff
*skb
)
5328 net_timestamp_check(READ_ONCE(net_hotdata
.tstamp_prequeue
), skb
);
5330 trace_netif_rx(skb
);
5333 if (static_branch_unlikely(&rps_needed
)) {
5334 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5339 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5341 cpu
= smp_processor_id();
5343 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5351 ret
= enqueue_to_backlog(skb
, smp_processor_id(), &qtail
);
5357 * __netif_rx - Slightly optimized version of netif_rx
5358 * @skb: buffer to post
5360 * This behaves as netif_rx except that it does not disable bottom halves.
5361 * As a result this function may only be invoked from the interrupt context
5362 * (either hard or soft interrupt).
5364 int __netif_rx(struct sk_buff
*skb
)
5368 lockdep_assert_once(hardirq_count() | softirq_count());
5370 trace_netif_rx_entry(skb
);
5371 ret
= netif_rx_internal(skb
);
5372 trace_netif_rx_exit(ret
);
5375 EXPORT_SYMBOL(__netif_rx
);
5378 * netif_rx - post buffer to the network code
5379 * @skb: buffer to post
5381 * This function receives a packet from a device driver and queues it for
5382 * the upper (protocol) levels to process via the backlog NAPI device. It
5383 * always succeeds. The buffer may be dropped during processing for
5384 * congestion control or by the protocol layers.
5385 * The network buffer is passed via the backlog NAPI device. Modern NIC
5386 * driver should use NAPI and GRO.
5387 * This function can used from interrupt and from process context. The
5388 * caller from process context must not disable interrupts before invoking
5392 * NET_RX_SUCCESS (no congestion)
5393 * NET_RX_DROP (packet was dropped)
5396 int netif_rx(struct sk_buff
*skb
)
5398 bool need_bh_off
= !(hardirq_count() | softirq_count());
5403 trace_netif_rx_entry(skb
);
5404 ret
= netif_rx_internal(skb
);
5405 trace_netif_rx_exit(ret
);
5410 EXPORT_SYMBOL(netif_rx
);
5412 static __latent_entropy
void net_tx_action(void)
5414 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
5416 if (sd
->completion_queue
) {
5417 struct sk_buff
*clist
;
5419 local_irq_disable();
5420 clist
= sd
->completion_queue
;
5421 sd
->completion_queue
= NULL
;
5425 struct sk_buff
*skb
= clist
;
5427 clist
= clist
->next
;
5429 WARN_ON(refcount_read(&skb
->users
));
5430 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_CONSUMED
))
5431 trace_consume_skb(skb
, net_tx_action
);
5433 trace_kfree_skb(skb
, net_tx_action
,
5434 get_kfree_skb_cb(skb
)->reason
, NULL
);
5436 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
5439 __napi_kfree_skb(skb
,
5440 get_kfree_skb_cb(skb
)->reason
);
5444 if (sd
->output_queue
) {
5447 local_irq_disable();
5448 head
= sd
->output_queue
;
5449 sd
->output_queue
= NULL
;
5450 sd
->output_queue_tailp
= &sd
->output_queue
;
5456 struct Qdisc
*q
= head
;
5457 spinlock_t
*root_lock
= NULL
;
5459 head
= head
->next_sched
;
5461 /* We need to make sure head->next_sched is read
5462 * before clearing __QDISC_STATE_SCHED
5464 smp_mb__before_atomic();
5466 if (!(q
->flags
& TCQ_F_NOLOCK
)) {
5467 root_lock
= qdisc_lock(q
);
5468 spin_lock(root_lock
);
5469 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
,
5471 /* There is a synchronize_net() between
5472 * STATE_DEACTIVATED flag being set and
5473 * qdisc_reset()/some_qdisc_is_busy() in
5474 * dev_deactivate(), so we can safely bail out
5475 * early here to avoid data race between
5476 * qdisc_deactivate() and some_qdisc_is_busy()
5477 * for lockless qdisc.
5479 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
5483 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
5486 spin_unlock(root_lock
);
5492 xfrm_dev_backlog(sd
);
5495 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5496 /* This hook is defined here for ATM LANE */
5497 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
5498 unsigned char *addr
) __read_mostly
;
5499 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
5503 * netdev_is_rx_handler_busy - check if receive handler is registered
5504 * @dev: device to check
5506 * Check if a receive handler is already registered for a given device.
5507 * Return true if there one.
5509 * The caller must hold the rtnl_mutex.
5511 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
5514 return dev
&& rtnl_dereference(dev
->rx_handler
);
5516 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
5519 * netdev_rx_handler_register - register receive handler
5520 * @dev: device to register a handler for
5521 * @rx_handler: receive handler to register
5522 * @rx_handler_data: data pointer that is used by rx handler
5524 * Register a receive handler for a device. This handler will then be
5525 * called from __netif_receive_skb. A negative errno code is returned
5528 * The caller must hold the rtnl_mutex.
5530 * For a general description of rx_handler, see enum rx_handler_result.
5532 int netdev_rx_handler_register(struct net_device
*dev
,
5533 rx_handler_func_t
*rx_handler
,
5534 void *rx_handler_data
)
5536 if (netdev_is_rx_handler_busy(dev
))
5539 if (dev
->priv_flags
& IFF_NO_RX_HANDLER
)
5542 /* Note: rx_handler_data must be set before rx_handler */
5543 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
5544 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
5548 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
5551 * netdev_rx_handler_unregister - unregister receive handler
5552 * @dev: device to unregister a handler from
5554 * Unregister a receive handler from a device.
5556 * The caller must hold the rtnl_mutex.
5558 void netdev_rx_handler_unregister(struct net_device
*dev
)
5562 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
5563 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5564 * section has a guarantee to see a non NULL rx_handler_data
5568 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
5570 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
5573 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5574 * the special handling of PFMEMALLOC skbs.
5576 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
5578 switch (skb
->protocol
) {
5579 case htons(ETH_P_ARP
):
5580 case htons(ETH_P_IP
):
5581 case htons(ETH_P_IPV6
):
5582 case htons(ETH_P_8021Q
):
5583 case htons(ETH_P_8021AD
):
5590 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
5591 int *ret
, struct net_device
*orig_dev
)
5593 if (nf_hook_ingress_active(skb
)) {
5597 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
5602 ingress_retval
= nf_hook_ingress(skb
);
5604 return ingress_retval
;
5609 static int __netif_receive_skb_core(struct sk_buff
**pskb
, bool pfmemalloc
,
5610 struct packet_type
**ppt_prev
)
5612 struct packet_type
*ptype
, *pt_prev
;
5613 rx_handler_func_t
*rx_handler
;
5614 struct sk_buff
*skb
= *pskb
;
5615 struct net_device
*orig_dev
;
5616 bool deliver_exact
= false;
5617 int ret
= NET_RX_DROP
;
5620 net_timestamp_check(!READ_ONCE(net_hotdata
.tstamp_prequeue
), skb
);
5622 trace_netif_receive_skb(skb
);
5624 orig_dev
= skb
->dev
;
5626 skb_reset_network_header(skb
);
5627 #if !defined(CONFIG_DEBUG_NET)
5628 /* We plan to no longer reset the transport header here.
5629 * Give some time to fuzzers and dev build to catch bugs
5630 * in network stacks.
5632 if (!skb_transport_header_was_set(skb
))
5633 skb_reset_transport_header(skb
);
5635 skb_reset_mac_len(skb
);
5640 skb
->skb_iif
= skb
->dev
->ifindex
;
5642 __this_cpu_inc(softnet_data
.processed
);
5644 if (static_branch_unlikely(&generic_xdp_needed_key
)) {
5648 ret2
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
),
5652 if (ret2
!= XDP_PASS
) {
5658 if (eth_type_vlan(skb
->protocol
)) {
5659 skb
= skb_vlan_untag(skb
);
5664 if (skb_skip_tc_classify(skb
))
5670 list_for_each_entry_rcu(ptype
, &net_hotdata
.ptype_all
, list
) {
5672 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5676 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
5678 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5683 #ifdef CONFIG_NET_INGRESS
5684 if (static_branch_unlikely(&ingress_needed_key
)) {
5685 bool another
= false;
5687 nf_skip_egress(skb
, true);
5688 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
,
5695 nf_skip_egress(skb
, false);
5696 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
5700 skb_reset_redirect(skb
);
5702 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
5705 if (skb_vlan_tag_present(skb
)) {
5707 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5710 if (vlan_do_receive(&skb
))
5712 else if (unlikely(!skb
))
5716 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
5719 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5722 switch (rx_handler(&skb
)) {
5723 case RX_HANDLER_CONSUMED
:
5724 ret
= NET_RX_SUCCESS
;
5726 case RX_HANDLER_ANOTHER
:
5728 case RX_HANDLER_EXACT
:
5729 deliver_exact
= true;
5731 case RX_HANDLER_PASS
:
5738 if (unlikely(skb_vlan_tag_present(skb
)) && !netdev_uses_dsa(skb
->dev
)) {
5740 if (skb_vlan_tag_get_id(skb
)) {
5741 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5744 skb
->pkt_type
= PACKET_OTHERHOST
;
5745 } else if (eth_type_vlan(skb
->protocol
)) {
5746 /* Outer header is 802.1P with vlan 0, inner header is
5747 * 802.1Q or 802.1AD and vlan_do_receive() above could
5748 * not find vlan dev for vlan id 0.
5750 __vlan_hwaccel_clear_tag(skb
);
5751 skb
= skb_vlan_untag(skb
);
5754 if (vlan_do_receive(&skb
))
5755 /* After stripping off 802.1P header with vlan 0
5756 * vlan dev is found for inner header.
5759 else if (unlikely(!skb
))
5762 /* We have stripped outer 802.1P vlan 0 header.
5763 * But could not find vlan dev.
5764 * check again for vlan id to set OTHERHOST.
5768 /* Note: we might in the future use prio bits
5769 * and set skb->priority like in vlan_do_receive()
5770 * For the time being, just ignore Priority Code Point
5772 __vlan_hwaccel_clear_tag(skb
);
5775 type
= skb
->protocol
;
5777 /* deliver only exact match when indicated */
5778 if (likely(!deliver_exact
)) {
5779 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5780 &ptype_base
[ntohs(type
) &
5784 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5785 &orig_dev
->ptype_specific
);
5787 if (unlikely(skb
->dev
!= orig_dev
)) {
5788 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5789 &skb
->dev
->ptype_specific
);
5793 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
5795 *ppt_prev
= pt_prev
;
5799 dev_core_stats_rx_dropped_inc(skb
->dev
);
5801 dev_core_stats_rx_nohandler_inc(skb
->dev
);
5802 kfree_skb_reason(skb
, SKB_DROP_REASON_UNHANDLED_PROTO
);
5803 /* Jamal, now you will not able to escape explaining
5804 * me how you were going to use this. :-)
5810 /* The invariant here is that if *ppt_prev is not NULL
5811 * then skb should also be non-NULL.
5813 * Apparently *ppt_prev assignment above holds this invariant due to
5814 * skb dereferencing near it.
5820 static int __netif_receive_skb_one_core(struct sk_buff
*skb
, bool pfmemalloc
)
5822 struct net_device
*orig_dev
= skb
->dev
;
5823 struct packet_type
*pt_prev
= NULL
;
5826 ret
= __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
5828 ret
= INDIRECT_CALL_INET(pt_prev
->func
, ipv6_rcv
, ip_rcv
, skb
,
5829 skb
->dev
, pt_prev
, orig_dev
);
5834 * netif_receive_skb_core - special purpose version of netif_receive_skb
5835 * @skb: buffer to process
5837 * More direct receive version of netif_receive_skb(). It should
5838 * only be used by callers that have a need to skip RPS and Generic XDP.
5839 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5841 * This function may only be called from softirq context and interrupts
5842 * should be enabled.
5844 * Return values (usually ignored):
5845 * NET_RX_SUCCESS: no congestion
5846 * NET_RX_DROP: packet was dropped
5848 int netif_receive_skb_core(struct sk_buff
*skb
)
5853 ret
= __netif_receive_skb_one_core(skb
, false);
5858 EXPORT_SYMBOL(netif_receive_skb_core
);
5860 static inline void __netif_receive_skb_list_ptype(struct list_head
*head
,
5861 struct packet_type
*pt_prev
,
5862 struct net_device
*orig_dev
)
5864 struct sk_buff
*skb
, *next
;
5868 if (list_empty(head
))
5870 if (pt_prev
->list_func
!= NULL
)
5871 INDIRECT_CALL_INET(pt_prev
->list_func
, ipv6_list_rcv
,
5872 ip_list_rcv
, head
, pt_prev
, orig_dev
);
5874 list_for_each_entry_safe(skb
, next
, head
, list
) {
5875 skb_list_del_init(skb
);
5876 pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
5880 static void __netif_receive_skb_list_core(struct list_head
*head
, bool pfmemalloc
)
5882 /* Fast-path assumptions:
5883 * - There is no RX handler.
5884 * - Only one packet_type matches.
5885 * If either of these fails, we will end up doing some per-packet
5886 * processing in-line, then handling the 'last ptype' for the whole
5887 * sublist. This can't cause out-of-order delivery to any single ptype,
5888 * because the 'last ptype' must be constant across the sublist, and all
5889 * other ptypes are handled per-packet.
5891 /* Current (common) ptype of sublist */
5892 struct packet_type
*pt_curr
= NULL
;
5893 /* Current (common) orig_dev of sublist */
5894 struct net_device
*od_curr
= NULL
;
5895 struct sk_buff
*skb
, *next
;
5898 list_for_each_entry_safe(skb
, next
, head
, list
) {
5899 struct net_device
*orig_dev
= skb
->dev
;
5900 struct packet_type
*pt_prev
= NULL
;
5902 skb_list_del_init(skb
);
5903 __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
5906 if (pt_curr
!= pt_prev
|| od_curr
!= orig_dev
) {
5907 /* dispatch old sublist */
5908 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5909 /* start new sublist */
5910 INIT_LIST_HEAD(&sublist
);
5914 list_add_tail(&skb
->list
, &sublist
);
5917 /* dispatch final sublist */
5918 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5921 static int __netif_receive_skb(struct sk_buff
*skb
)
5925 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
5926 unsigned int noreclaim_flag
;
5929 * PFMEMALLOC skbs are special, they should
5930 * - be delivered to SOCK_MEMALLOC sockets only
5931 * - stay away from userspace
5932 * - have bounded memory usage
5934 * Use PF_MEMALLOC as this saves us from propagating the allocation
5935 * context down to all allocation sites.
5937 noreclaim_flag
= memalloc_noreclaim_save();
5938 ret
= __netif_receive_skb_one_core(skb
, true);
5939 memalloc_noreclaim_restore(noreclaim_flag
);
5941 ret
= __netif_receive_skb_one_core(skb
, false);
5946 static void __netif_receive_skb_list(struct list_head
*head
)
5948 unsigned long noreclaim_flag
= 0;
5949 struct sk_buff
*skb
, *next
;
5950 bool pfmemalloc
= false; /* Is current sublist PF_MEMALLOC? */
5952 list_for_each_entry_safe(skb
, next
, head
, list
) {
5953 if ((sk_memalloc_socks() && skb_pfmemalloc(skb
)) != pfmemalloc
) {
5954 struct list_head sublist
;
5956 /* Handle the previous sublist */
5957 list_cut_before(&sublist
, head
, &skb
->list
);
5958 if (!list_empty(&sublist
))
5959 __netif_receive_skb_list_core(&sublist
, pfmemalloc
);
5960 pfmemalloc
= !pfmemalloc
;
5961 /* See comments in __netif_receive_skb */
5963 noreclaim_flag
= memalloc_noreclaim_save();
5965 memalloc_noreclaim_restore(noreclaim_flag
);
5968 /* Handle the remaining sublist */
5969 if (!list_empty(head
))
5970 __netif_receive_skb_list_core(head
, pfmemalloc
);
5971 /* Restore pflags */
5973 memalloc_noreclaim_restore(noreclaim_flag
);
5976 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
5978 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
5979 struct bpf_prog
*new = xdp
->prog
;
5982 switch (xdp
->command
) {
5983 case XDP_SETUP_PROG
:
5984 rcu_assign_pointer(dev
->xdp_prog
, new);
5989 static_branch_dec(&generic_xdp_needed_key
);
5990 } else if (new && !old
) {
5991 static_branch_inc(&generic_xdp_needed_key
);
5992 dev_disable_lro(dev
);
5993 dev_disable_gro_hw(dev
);
6005 static int netif_receive_skb_internal(struct sk_buff
*skb
)
6009 net_timestamp_check(READ_ONCE(net_hotdata
.tstamp_prequeue
), skb
);
6011 if (skb_defer_rx_timestamp(skb
))
6012 return NET_RX_SUCCESS
;
6016 if (static_branch_unlikely(&rps_needed
)) {
6017 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
6018 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
6021 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
6027 ret
= __netif_receive_skb(skb
);
6032 void netif_receive_skb_list_internal(struct list_head
*head
)
6034 struct sk_buff
*skb
, *next
;
6037 list_for_each_entry_safe(skb
, next
, head
, list
) {
6038 net_timestamp_check(READ_ONCE(net_hotdata
.tstamp_prequeue
),
6040 skb_list_del_init(skb
);
6041 if (!skb_defer_rx_timestamp(skb
))
6042 list_add_tail(&skb
->list
, &sublist
);
6044 list_splice_init(&sublist
, head
);
6048 if (static_branch_unlikely(&rps_needed
)) {
6049 list_for_each_entry_safe(skb
, next
, head
, list
) {
6050 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
6051 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
6054 /* Will be handled, remove from list */
6055 skb_list_del_init(skb
);
6056 enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
6061 __netif_receive_skb_list(head
);
6066 * netif_receive_skb - process receive buffer from network
6067 * @skb: buffer to process
6069 * netif_receive_skb() is the main receive data processing function.
6070 * It always succeeds. The buffer may be dropped during processing
6071 * for congestion control or by the protocol layers.
6073 * This function may only be called from softirq context and interrupts
6074 * should be enabled.
6076 * Return values (usually ignored):
6077 * NET_RX_SUCCESS: no congestion
6078 * NET_RX_DROP: packet was dropped
6080 int netif_receive_skb(struct sk_buff
*skb
)
6084 trace_netif_receive_skb_entry(skb
);
6086 ret
= netif_receive_skb_internal(skb
);
6087 trace_netif_receive_skb_exit(ret
);
6091 EXPORT_SYMBOL(netif_receive_skb
);
6094 * netif_receive_skb_list - process many receive buffers from network
6095 * @head: list of skbs to process.
6097 * Since return value of netif_receive_skb() is normally ignored, and
6098 * wouldn't be meaningful for a list, this function returns void.
6100 * This function may only be called from softirq context and interrupts
6101 * should be enabled.
6103 void netif_receive_skb_list(struct list_head
*head
)
6105 struct sk_buff
*skb
;
6107 if (list_empty(head
))
6109 if (trace_netif_receive_skb_list_entry_enabled()) {
6110 list_for_each_entry(skb
, head
, list
)
6111 trace_netif_receive_skb_list_entry(skb
);
6113 netif_receive_skb_list_internal(head
);
6114 trace_netif_receive_skb_list_exit(0);
6116 EXPORT_SYMBOL(netif_receive_skb_list
);
6118 /* Network device is going away, flush any packets still pending */
6119 static void flush_backlog(struct work_struct
*work
)
6121 struct sk_buff
*skb
, *tmp
;
6122 struct softnet_data
*sd
;
6125 sd
= this_cpu_ptr(&softnet_data
);
6127 backlog_lock_irq_disable(sd
);
6128 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
6129 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
6130 __skb_unlink(skb
, &sd
->input_pkt_queue
);
6131 dev_kfree_skb_irq(skb
);
6132 rps_input_queue_head_incr(sd
);
6135 backlog_unlock_irq_enable(sd
);
6137 local_lock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6138 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
6139 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
6140 __skb_unlink(skb
, &sd
->process_queue
);
6142 rps_input_queue_head_incr(sd
);
6145 local_unlock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6149 static bool flush_required(int cpu
)
6151 #if IS_ENABLED(CONFIG_RPS)
6152 struct softnet_data
*sd
= &per_cpu(softnet_data
, cpu
);
6155 backlog_lock_irq_disable(sd
);
6157 /* as insertion into process_queue happens with the rps lock held,
6158 * process_queue access may race only with dequeue
6160 do_flush
= !skb_queue_empty(&sd
->input_pkt_queue
) ||
6161 !skb_queue_empty_lockless(&sd
->process_queue
);
6162 backlog_unlock_irq_enable(sd
);
6166 /* without RPS we can't safely check input_pkt_queue: during a
6167 * concurrent remote skb_queue_splice() we can detect as empty both
6168 * input_pkt_queue and process_queue even if the latter could end-up
6169 * containing a lot of packets.
6174 struct flush_backlogs
{
6175 cpumask_t flush_cpus
;
6176 struct work_struct w
[];
6179 static struct flush_backlogs
*flush_backlogs_alloc(void)
6181 return kmalloc(struct_size_t(struct flush_backlogs
, w
, nr_cpu_ids
),
6185 static struct flush_backlogs
*flush_backlogs_fallback
;
6186 static DEFINE_MUTEX(flush_backlogs_mutex
);
6188 static void flush_all_backlogs(void)
6190 struct flush_backlogs
*ptr
= flush_backlogs_alloc();
6194 mutex_lock(&flush_backlogs_mutex
);
6195 ptr
= flush_backlogs_fallback
;
6197 cpumask_clear(&ptr
->flush_cpus
);
6201 for_each_online_cpu(cpu
) {
6202 if (flush_required(cpu
)) {
6203 INIT_WORK(&ptr
->w
[cpu
], flush_backlog
);
6204 queue_work_on(cpu
, system_highpri_wq
, &ptr
->w
[cpu
]);
6205 __cpumask_set_cpu(cpu
, &ptr
->flush_cpus
);
6209 /* we can have in flight packet[s] on the cpus we are not flushing,
6210 * synchronize_net() in unregister_netdevice_many() will take care of
6213 for_each_cpu(cpu
, &ptr
->flush_cpus
)
6214 flush_work(&ptr
->w
[cpu
]);
6218 if (ptr
!= flush_backlogs_fallback
)
6221 mutex_unlock(&flush_backlogs_mutex
);
6224 static void net_rps_send_ipi(struct softnet_data
*remsd
)
6228 struct softnet_data
*next
= remsd
->rps_ipi_next
;
6230 if (cpu_online(remsd
->cpu
))
6231 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
6238 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6239 * Note: called with local irq disabled, but exits with local irq enabled.
6241 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
6244 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
6246 if (!use_backlog_threads() && remsd
) {
6247 sd
->rps_ipi_list
= NULL
;
6251 /* Send pending IPI's to kick RPS processing on remote cpus. */
6252 net_rps_send_ipi(remsd
);
6258 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
6261 return !use_backlog_threads() && sd
->rps_ipi_list
;
6267 static int process_backlog(struct napi_struct
*napi
, int quota
)
6269 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
6273 /* Check if we have pending ipi, its better to send them now,
6274 * not waiting net_rx_action() end.
6276 if (sd_has_rps_ipi_waiting(sd
)) {
6277 local_irq_disable();
6278 net_rps_action_and_irq_enable(sd
);
6281 napi
->weight
= READ_ONCE(net_hotdata
.dev_rx_weight
);
6283 struct sk_buff
*skb
;
6285 local_lock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6286 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
6287 local_unlock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6289 __netif_receive_skb(skb
);
6291 if (++work
>= quota
) {
6292 rps_input_queue_head_add(sd
, work
);
6296 local_lock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6298 local_unlock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6300 backlog_lock_irq_disable(sd
);
6301 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
6303 * Inline a custom version of __napi_complete().
6304 * only current cpu owns and manipulates this napi,
6305 * and NAPI_STATE_SCHED is the only possible flag set
6307 * We can use a plain write instead of clear_bit(),
6308 * and we dont need an smp_mb() memory barrier.
6310 napi
->state
&= NAPIF_STATE_THREADED
;
6313 local_lock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6314 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
6315 &sd
->process_queue
);
6316 local_unlock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6318 backlog_unlock_irq_enable(sd
);
6322 rps_input_queue_head_add(sd
, work
);
6327 * __napi_schedule - schedule for receive
6328 * @n: entry to schedule
6330 * The entry's receive function will be scheduled to run.
6331 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6333 void __napi_schedule(struct napi_struct
*n
)
6335 unsigned long flags
;
6337 local_irq_save(flags
);
6338 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6339 local_irq_restore(flags
);
6341 EXPORT_SYMBOL(__napi_schedule
);
6344 * napi_schedule_prep - check if napi can be scheduled
6347 * Test if NAPI routine is already running, and if not mark
6348 * it as running. This is used as a condition variable to
6349 * insure only one NAPI poll instance runs. We also make
6350 * sure there is no pending NAPI disable.
6352 bool napi_schedule_prep(struct napi_struct
*n
)
6354 unsigned long new, val
= READ_ONCE(n
->state
);
6357 if (unlikely(val
& NAPIF_STATE_DISABLE
))
6359 new = val
| NAPIF_STATE_SCHED
;
6361 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6362 * This was suggested by Alexander Duyck, as compiler
6363 * emits better code than :
6364 * if (val & NAPIF_STATE_SCHED)
6365 * new |= NAPIF_STATE_MISSED;
6367 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
6369 } while (!try_cmpxchg(&n
->state
, &val
, new));
6371 return !(val
& NAPIF_STATE_SCHED
);
6373 EXPORT_SYMBOL(napi_schedule_prep
);
6376 * __napi_schedule_irqoff - schedule for receive
6377 * @n: entry to schedule
6379 * Variant of __napi_schedule() assuming hard irqs are masked.
6381 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6382 * because the interrupt disabled assumption might not be true
6383 * due to force-threaded interrupts and spinlock substitution.
6385 void __napi_schedule_irqoff(struct napi_struct
*n
)
6387 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6388 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6392 EXPORT_SYMBOL(__napi_schedule_irqoff
);
6394 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
6396 unsigned long flags
, val
, new, timeout
= 0;
6400 * 1) Don't let napi dequeue from the cpu poll list
6401 * just in case its running on a different cpu.
6402 * 2) If we are busy polling, do nothing here, we have
6403 * the guarantee we will be called later.
6405 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
6406 NAPIF_STATE_IN_BUSY_POLL
)))
6411 timeout
= napi_get_gro_flush_timeout(n
);
6412 n
->defer_hard_irqs_count
= napi_get_defer_hard_irqs(n
);
6414 if (n
->defer_hard_irqs_count
> 0) {
6415 n
->defer_hard_irqs_count
--;
6416 timeout
= napi_get_gro_flush_timeout(n
);
6420 if (n
->gro_bitmask
) {
6421 /* When the NAPI instance uses a timeout and keeps postponing
6422 * it, we need to bound somehow the time packets are kept in
6425 napi_gro_flush(n
, !!timeout
);
6430 if (unlikely(!list_empty(&n
->poll_list
))) {
6431 /* If n->poll_list is not empty, we need to mask irqs */
6432 local_irq_save(flags
);
6433 list_del_init(&n
->poll_list
);
6434 local_irq_restore(flags
);
6436 WRITE_ONCE(n
->list_owner
, -1);
6438 val
= READ_ONCE(n
->state
);
6440 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
6442 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
|
6443 NAPIF_STATE_SCHED_THREADED
|
6444 NAPIF_STATE_PREFER_BUSY_POLL
);
6446 /* If STATE_MISSED was set, leave STATE_SCHED set,
6447 * because we will call napi->poll() one more time.
6448 * This C code was suggested by Alexander Duyck to help gcc.
6450 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
6452 } while (!try_cmpxchg(&n
->state
, &val
, new));
6454 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
6460 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
6461 HRTIMER_MODE_REL_PINNED
);
6464 EXPORT_SYMBOL(napi_complete_done
);
6466 static void skb_defer_free_flush(struct softnet_data
*sd
)
6468 struct sk_buff
*skb
, *next
;
6470 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6471 if (!READ_ONCE(sd
->defer_list
))
6474 spin_lock(&sd
->defer_lock
);
6475 skb
= sd
->defer_list
;
6476 sd
->defer_list
= NULL
;
6477 sd
->defer_count
= 0;
6478 spin_unlock(&sd
->defer_lock
);
6480 while (skb
!= NULL
) {
6482 napi_consume_skb(skb
, 1);
6487 #if defined(CONFIG_NET_RX_BUSY_POLL)
6489 static void __busy_poll_stop(struct napi_struct
*napi
, bool skip_schedule
)
6491 if (!skip_schedule
) {
6492 gro_normal_list(napi
);
6493 __napi_schedule(napi
);
6497 if (napi
->gro_bitmask
) {
6498 /* flush too old packets
6499 * If HZ < 1000, flush all packets.
6501 napi_gro_flush(napi
, HZ
>= 1000);
6504 gro_normal_list(napi
);
6505 clear_bit(NAPI_STATE_SCHED
, &napi
->state
);
6509 NAPI_F_PREFER_BUSY_POLL
= 1,
6510 NAPI_F_END_ON_RESCHED
= 2,
6513 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
,
6514 unsigned flags
, u16 budget
)
6516 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
6517 bool skip_schedule
= false;
6518 unsigned long timeout
;
6521 /* Busy polling means there is a high chance device driver hard irq
6522 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6523 * set in napi_schedule_prep().
6524 * Since we are about to call napi->poll() once more, we can safely
6525 * clear NAPI_STATE_MISSED.
6527 * Note: x86 could use a single "lock and ..." instruction
6528 * to perform these two clear_bit()
6530 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
6531 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
6534 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
6536 if (flags
& NAPI_F_PREFER_BUSY_POLL
) {
6537 napi
->defer_hard_irqs_count
= napi_get_defer_hard_irqs(napi
);
6538 timeout
= napi_get_gro_flush_timeout(napi
);
6539 if (napi
->defer_hard_irqs_count
&& timeout
) {
6540 hrtimer_start(&napi
->timer
, ns_to_ktime(timeout
), HRTIMER_MODE_REL_PINNED
);
6541 skip_schedule
= true;
6545 /* All we really want here is to re-enable device interrupts.
6546 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6548 rc
= napi
->poll(napi
, budget
);
6549 /* We can't gro_normal_list() here, because napi->poll() might have
6550 * rearmed the napi (napi_complete_done()) in which case it could
6551 * already be running on another CPU.
6553 trace_napi_poll(napi
, rc
, budget
);
6554 netpoll_poll_unlock(have_poll_lock
);
6556 __busy_poll_stop(napi
, skip_schedule
);
6557 bpf_net_ctx_clear(bpf_net_ctx
);
6561 static void __napi_busy_loop(unsigned int napi_id
,
6562 bool (*loop_end
)(void *, unsigned long),
6563 void *loop_end_arg
, unsigned flags
, u16 budget
)
6565 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
6566 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
6567 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
6568 void *have_poll_lock
= NULL
;
6569 struct napi_struct
*napi
;
6571 WARN_ON_ONCE(!rcu_read_lock_held());
6576 napi
= napi_by_id(napi_id
);
6580 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6586 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
6588 unsigned long val
= READ_ONCE(napi
->state
);
6590 /* If multiple threads are competing for this napi,
6591 * we avoid dirtying napi->state as much as we can.
6593 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
6594 NAPIF_STATE_IN_BUSY_POLL
)) {
6595 if (flags
& NAPI_F_PREFER_BUSY_POLL
)
6596 set_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6599 if (cmpxchg(&napi
->state
, val
,
6600 val
| NAPIF_STATE_IN_BUSY_POLL
|
6601 NAPIF_STATE_SCHED
) != val
) {
6602 if (flags
& NAPI_F_PREFER_BUSY_POLL
)
6603 set_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6606 have_poll_lock
= netpoll_poll_lock(napi
);
6607 napi_poll
= napi
->poll
;
6609 work
= napi_poll(napi
, budget
);
6610 trace_napi_poll(napi
, work
, budget
);
6611 gro_normal_list(napi
);
6614 __NET_ADD_STATS(dev_net(napi
->dev
),
6615 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
6616 skb_defer_free_flush(this_cpu_ptr(&softnet_data
));
6617 bpf_net_ctx_clear(bpf_net_ctx
);
6620 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
6623 if (unlikely(need_resched())) {
6624 if (flags
& NAPI_F_END_ON_RESCHED
)
6627 busy_poll_stop(napi
, have_poll_lock
, flags
, budget
);
6628 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6633 if (loop_end(loop_end_arg
, start_time
))
6640 busy_poll_stop(napi
, have_poll_lock
, flags
, budget
);
6641 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6645 void napi_busy_loop_rcu(unsigned int napi_id
,
6646 bool (*loop_end
)(void *, unsigned long),
6647 void *loop_end_arg
, bool prefer_busy_poll
, u16 budget
)
6649 unsigned flags
= NAPI_F_END_ON_RESCHED
;
6651 if (prefer_busy_poll
)
6652 flags
|= NAPI_F_PREFER_BUSY_POLL
;
6654 __napi_busy_loop(napi_id
, loop_end
, loop_end_arg
, flags
, budget
);
6657 void napi_busy_loop(unsigned int napi_id
,
6658 bool (*loop_end
)(void *, unsigned long),
6659 void *loop_end_arg
, bool prefer_busy_poll
, u16 budget
)
6661 unsigned flags
= prefer_busy_poll
? NAPI_F_PREFER_BUSY_POLL
: 0;
6664 __napi_busy_loop(napi_id
, loop_end
, loop_end_arg
, flags
, budget
);
6667 EXPORT_SYMBOL(napi_busy_loop
);
6669 void napi_suspend_irqs(unsigned int napi_id
)
6671 struct napi_struct
*napi
;
6674 napi
= napi_by_id(napi_id
);
6676 unsigned long timeout
= napi_get_irq_suspend_timeout(napi
);
6679 hrtimer_start(&napi
->timer
, ns_to_ktime(timeout
),
6680 HRTIMER_MODE_REL_PINNED
);
6685 void napi_resume_irqs(unsigned int napi_id
)
6687 struct napi_struct
*napi
;
6690 napi
= napi_by_id(napi_id
);
6692 /* If irq_suspend_timeout is set to 0 between the call to
6693 * napi_suspend_irqs and now, the original value still
6694 * determines the safety timeout as intended and napi_watchdog
6695 * will resume irq processing.
6697 if (napi_get_irq_suspend_timeout(napi
)) {
6699 napi_schedule(napi
);
6706 #endif /* CONFIG_NET_RX_BUSY_POLL */
6708 static void __napi_hash_add_with_id(struct napi_struct
*napi
,
6709 unsigned int napi_id
)
6711 napi
->napi_id
= napi_id
;
6712 hlist_add_head_rcu(&napi
->napi_hash_node
,
6713 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
6716 static void napi_hash_add_with_id(struct napi_struct
*napi
,
6717 unsigned int napi_id
)
6719 unsigned long flags
;
6721 spin_lock_irqsave(&napi_hash_lock
, flags
);
6722 WARN_ON_ONCE(napi_by_id(napi_id
));
6723 __napi_hash_add_with_id(napi
, napi_id
);
6724 spin_unlock_irqrestore(&napi_hash_lock
, flags
);
6727 static void napi_hash_add(struct napi_struct
*napi
)
6729 unsigned long flags
;
6731 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
))
6734 spin_lock_irqsave(&napi_hash_lock
, flags
);
6736 /* 0..NR_CPUS range is reserved for sender_cpu use */
6738 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
6739 napi_gen_id
= MIN_NAPI_ID
;
6740 } while (napi_by_id(napi_gen_id
));
6742 __napi_hash_add_with_id(napi
, napi_gen_id
);
6744 spin_unlock_irqrestore(&napi_hash_lock
, flags
);
6747 /* Warning : caller is responsible to make sure rcu grace period
6748 * is respected before freeing memory containing @napi
6750 static void napi_hash_del(struct napi_struct
*napi
)
6752 unsigned long flags
;
6754 spin_lock_irqsave(&napi_hash_lock
, flags
);
6756 hlist_del_init_rcu(&napi
->napi_hash_node
);
6758 spin_unlock_irqrestore(&napi_hash_lock
, flags
);
6761 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
6763 struct napi_struct
*napi
;
6765 napi
= container_of(timer
, struct napi_struct
, timer
);
6767 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6768 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6770 if (!napi_disable_pending(napi
) &&
6771 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
)) {
6772 clear_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6773 __napi_schedule_irqoff(napi
);
6776 return HRTIMER_NORESTART
;
6779 static void init_gro_hash(struct napi_struct
*napi
)
6783 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6784 INIT_LIST_HEAD(&napi
->gro_hash
[i
].list
);
6785 napi
->gro_hash
[i
].count
= 0;
6787 napi
->gro_bitmask
= 0;
6790 int dev_set_threaded(struct net_device
*dev
, bool threaded
)
6792 struct napi_struct
*napi
;
6795 netdev_assert_locked_or_invisible(dev
);
6797 if (dev
->threaded
== threaded
)
6801 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
6802 if (!napi
->thread
) {
6803 err
= napi_kthread_create(napi
);
6812 WRITE_ONCE(dev
->threaded
, threaded
);
6814 /* Make sure kthread is created before THREADED bit
6817 smp_mb__before_atomic();
6819 /* Setting/unsetting threaded mode on a napi might not immediately
6820 * take effect, if the current napi instance is actively being
6821 * polled. In this case, the switch between threaded mode and
6822 * softirq mode will happen in the next round of napi_schedule().
6823 * This should not cause hiccups/stalls to the live traffic.
6825 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
)
6826 assign_bit(NAPI_STATE_THREADED
, &napi
->state
, threaded
);
6830 EXPORT_SYMBOL(dev_set_threaded
);
6833 * netif_queue_set_napi - Associate queue with the napi
6834 * @dev: device to which NAPI and queue belong
6835 * @queue_index: Index of queue
6836 * @type: queue type as RX or TX
6837 * @napi: NAPI context, pass NULL to clear previously set NAPI
6839 * Set queue with its corresponding napi context. This should be done after
6840 * registering the NAPI handler for the queue-vector and the queues have been
6841 * mapped to the corresponding interrupt vector.
6843 void netif_queue_set_napi(struct net_device
*dev
, unsigned int queue_index
,
6844 enum netdev_queue_type type
, struct napi_struct
*napi
)
6846 struct netdev_rx_queue
*rxq
;
6847 struct netdev_queue
*txq
;
6849 if (WARN_ON_ONCE(napi
&& !napi
->dev
))
6851 if (dev
->reg_state
>= NETREG_REGISTERED
)
6855 case NETDEV_QUEUE_TYPE_RX
:
6856 rxq
= __netif_get_rx_queue(dev
, queue_index
);
6859 case NETDEV_QUEUE_TYPE_TX
:
6860 txq
= netdev_get_tx_queue(dev
, queue_index
);
6867 EXPORT_SYMBOL(netif_queue_set_napi
);
6869 static void napi_restore_config(struct napi_struct
*n
)
6871 n
->defer_hard_irqs
= n
->config
->defer_hard_irqs
;
6872 n
->gro_flush_timeout
= n
->config
->gro_flush_timeout
;
6873 n
->irq_suspend_timeout
= n
->config
->irq_suspend_timeout
;
6874 /* a NAPI ID might be stored in the config, if so use it. if not, use
6875 * napi_hash_add to generate one for us.
6877 if (n
->config
->napi_id
) {
6878 napi_hash_add_with_id(n
, n
->config
->napi_id
);
6881 n
->config
->napi_id
= n
->napi_id
;
6885 static void napi_save_config(struct napi_struct
*n
)
6887 n
->config
->defer_hard_irqs
= n
->defer_hard_irqs
;
6888 n
->config
->gro_flush_timeout
= n
->gro_flush_timeout
;
6889 n
->config
->irq_suspend_timeout
= n
->irq_suspend_timeout
;
6893 /* Netlink wants the NAPI list to be sorted by ID, if adding a NAPI which will
6894 * inherit an existing ID try to insert it at the right position.
6897 netif_napi_dev_list_add(struct net_device
*dev
, struct napi_struct
*napi
)
6899 unsigned int new_id
, pos_id
;
6900 struct list_head
*higher
;
6901 struct napi_struct
*pos
;
6904 if (napi
->config
&& napi
->config
->napi_id
)
6905 new_id
= napi
->config
->napi_id
;
6907 higher
= &dev
->napi_list
;
6908 list_for_each_entry(pos
, &dev
->napi_list
, dev_list
) {
6909 if (pos
->napi_id
>= MIN_NAPI_ID
)
6910 pos_id
= pos
->napi_id
;
6911 else if (pos
->config
)
6912 pos_id
= pos
->config
->napi_id
;
6916 if (pos_id
<= new_id
)
6918 higher
= &pos
->dev_list
;
6920 list_add_rcu(&napi
->dev_list
, higher
); /* adds after higher */
6923 void netif_napi_add_weight_locked(struct net_device
*dev
,
6924 struct napi_struct
*napi
,
6925 int (*poll
)(struct napi_struct
*, int),
6928 netdev_assert_locked(dev
);
6929 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED
, &napi
->state
)))
6932 INIT_LIST_HEAD(&napi
->poll_list
);
6933 INIT_HLIST_NODE(&napi
->napi_hash_node
);
6934 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
6935 napi
->timer
.function
= napi_watchdog
;
6936 init_gro_hash(napi
);
6938 INIT_LIST_HEAD(&napi
->rx_list
);
6941 if (weight
> NAPI_POLL_WEIGHT
)
6942 netdev_err_once(dev
, "%s() called with weight %d\n", __func__
,
6944 napi
->weight
= weight
;
6946 #ifdef CONFIG_NETPOLL
6947 napi
->poll_owner
= -1;
6949 napi
->list_owner
= -1;
6950 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
6951 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
6952 netif_napi_dev_list_add(dev
, napi
);
6954 /* default settings from sysfs are applied to all NAPIs. any per-NAPI
6955 * configuration will be loaded in napi_enable
6957 napi_set_defer_hard_irqs(napi
, READ_ONCE(dev
->napi_defer_hard_irqs
));
6958 napi_set_gro_flush_timeout(napi
, READ_ONCE(dev
->gro_flush_timeout
));
6960 napi_get_frags_check(napi
);
6961 /* Create kthread for this napi if dev->threaded is set.
6962 * Clear dev->threaded if kthread creation failed so that
6963 * threaded mode will not be enabled in napi_enable().
6965 if (dev
->threaded
&& napi_kthread_create(napi
))
6966 dev
->threaded
= false;
6967 netif_napi_set_irq_locked(napi
, -1);
6969 EXPORT_SYMBOL(netif_napi_add_weight_locked
);
6971 void napi_disable_locked(struct napi_struct
*n
)
6973 unsigned long val
, new;
6976 netdev_assert_locked(n
->dev
);
6978 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
6980 val
= READ_ONCE(n
->state
);
6982 while (val
& (NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
)) {
6983 usleep_range(20, 200);
6984 val
= READ_ONCE(n
->state
);
6987 new = val
| NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
;
6988 new &= ~(NAPIF_STATE_THREADED
| NAPIF_STATE_PREFER_BUSY_POLL
);
6989 } while (!try_cmpxchg(&n
->state
, &val
, new));
6991 hrtimer_cancel(&n
->timer
);
6994 napi_save_config(n
);
6998 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
7000 EXPORT_SYMBOL(napi_disable_locked
);
7003 * napi_disable() - prevent NAPI from scheduling
7006 * Stop NAPI from being scheduled on this context.
7007 * Waits till any outstanding processing completes.
7008 * Takes netdev_lock() for associated net_device.
7010 void napi_disable(struct napi_struct
*n
)
7012 netdev_lock(n
->dev
);
7013 napi_disable_locked(n
);
7014 netdev_unlock(n
->dev
);
7016 EXPORT_SYMBOL(napi_disable
);
7018 void napi_enable_locked(struct napi_struct
*n
)
7020 unsigned long new, val
= READ_ONCE(n
->state
);
7023 napi_restore_config(n
);
7028 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &val
));
7030 new = val
& ~(NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
);
7031 if (n
->dev
->threaded
&& n
->thread
)
7032 new |= NAPIF_STATE_THREADED
;
7033 } while (!try_cmpxchg(&n
->state
, &val
, new));
7035 EXPORT_SYMBOL(napi_enable_locked
);
7038 * napi_enable() - enable NAPI scheduling
7041 * Enable scheduling of a NAPI instance.
7042 * Must be paired with napi_disable().
7043 * Takes netdev_lock() for associated net_device.
7045 void napi_enable(struct napi_struct
*n
)
7047 netdev_lock(n
->dev
);
7048 napi_enable_locked(n
);
7049 netdev_unlock(n
->dev
);
7051 EXPORT_SYMBOL(napi_enable
);
7053 static void flush_gro_hash(struct napi_struct
*napi
)
7057 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
7058 struct sk_buff
*skb
, *n
;
7060 list_for_each_entry_safe(skb
, n
, &napi
->gro_hash
[i
].list
, list
)
7062 napi
->gro_hash
[i
].count
= 0;
7066 /* Must be called in process context */
7067 void __netif_napi_del_locked(struct napi_struct
*napi
)
7069 netdev_assert_locked(napi
->dev
);
7071 if (!test_and_clear_bit(NAPI_STATE_LISTED
, &napi
->state
))
7076 napi
->config
= NULL
;
7079 list_del_rcu(&napi
->dev_list
);
7080 napi_free_frags(napi
);
7082 flush_gro_hash(napi
);
7083 napi
->gro_bitmask
= 0;
7086 kthread_stop(napi
->thread
);
7087 napi
->thread
= NULL
;
7090 EXPORT_SYMBOL(__netif_napi_del_locked
);
7092 static int __napi_poll(struct napi_struct
*n
, bool *repoll
)
7098 /* This NAPI_STATE_SCHED test is for avoiding a race
7099 * with netpoll's poll_napi(). Only the entity which
7100 * obtains the lock and sees NAPI_STATE_SCHED set will
7101 * actually make the ->poll() call. Therefore we avoid
7102 * accidentally calling ->poll() when NAPI is not scheduled.
7105 if (napi_is_scheduled(n
)) {
7106 work
= n
->poll(n
, weight
);
7107 trace_napi_poll(n
, work
, weight
);
7109 xdp_do_check_flushed(n
);
7112 if (unlikely(work
> weight
))
7113 netdev_err_once(n
->dev
, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
7114 n
->poll
, work
, weight
);
7116 if (likely(work
< weight
))
7119 /* Drivers must not modify the NAPI state if they
7120 * consume the entire weight. In such cases this code
7121 * still "owns" the NAPI instance and therefore can
7122 * move the instance around on the list at-will.
7124 if (unlikely(napi_disable_pending(n
))) {
7129 /* The NAPI context has more processing work, but busy-polling
7130 * is preferred. Exit early.
7132 if (napi_prefer_busy_poll(n
)) {
7133 if (napi_complete_done(n
, work
)) {
7134 /* If timeout is not set, we need to make sure
7135 * that the NAPI is re-scheduled.
7142 if (n
->gro_bitmask
) {
7143 /* flush too old packets
7144 * If HZ < 1000, flush all packets.
7146 napi_gro_flush(n
, HZ
>= 1000);
7151 /* Some drivers may have called napi_schedule
7152 * prior to exhausting their budget.
7154 if (unlikely(!list_empty(&n
->poll_list
))) {
7155 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
7156 n
->dev
? n
->dev
->name
: "backlog");
7165 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
7167 bool do_repoll
= false;
7171 list_del_init(&n
->poll_list
);
7173 have
= netpoll_poll_lock(n
);
7175 work
= __napi_poll(n
, &do_repoll
);
7178 list_add_tail(&n
->poll_list
, repoll
);
7180 netpoll_poll_unlock(have
);
7185 static int napi_thread_wait(struct napi_struct
*napi
)
7187 set_current_state(TASK_INTERRUPTIBLE
);
7189 while (!kthread_should_stop()) {
7190 /* Testing SCHED_THREADED bit here to make sure the current
7191 * kthread owns this napi and could poll on this napi.
7192 * Testing SCHED bit is not enough because SCHED bit might be
7193 * set by some other busy poll thread or by napi_disable().
7195 if (test_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
)) {
7196 WARN_ON(!list_empty(&napi
->poll_list
));
7197 __set_current_state(TASK_RUNNING
);
7202 set_current_state(TASK_INTERRUPTIBLE
);
7204 __set_current_state(TASK_RUNNING
);
7209 static void napi_threaded_poll_loop(struct napi_struct
*napi
)
7211 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
7212 struct softnet_data
*sd
;
7213 unsigned long last_qs
= jiffies
;
7216 bool repoll
= false;
7220 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
7222 sd
= this_cpu_ptr(&softnet_data
);
7223 sd
->in_napi_threaded_poll
= true;
7225 have
= netpoll_poll_lock(napi
);
7226 __napi_poll(napi
, &repoll
);
7227 netpoll_poll_unlock(have
);
7229 sd
->in_napi_threaded_poll
= false;
7232 if (sd_has_rps_ipi_waiting(sd
)) {
7233 local_irq_disable();
7234 net_rps_action_and_irq_enable(sd
);
7236 skb_defer_free_flush(sd
);
7237 bpf_net_ctx_clear(bpf_net_ctx
);
7243 rcu_softirq_qs_periodic(last_qs
);
7248 static int napi_threaded_poll(void *data
)
7250 struct napi_struct
*napi
= data
;
7252 while (!napi_thread_wait(napi
))
7253 napi_threaded_poll_loop(napi
);
7258 static __latent_entropy
void net_rx_action(void)
7260 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
7261 unsigned long time_limit
= jiffies
+
7262 usecs_to_jiffies(READ_ONCE(net_hotdata
.netdev_budget_usecs
));
7263 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
7264 int budget
= READ_ONCE(net_hotdata
.netdev_budget
);
7268 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
7270 sd
->in_net_rx_action
= true;
7271 local_irq_disable();
7272 list_splice_init(&sd
->poll_list
, &list
);
7276 struct napi_struct
*n
;
7278 skb_defer_free_flush(sd
);
7280 if (list_empty(&list
)) {
7281 if (list_empty(&repoll
)) {
7282 sd
->in_net_rx_action
= false;
7284 /* We need to check if ____napi_schedule()
7285 * had refilled poll_list while
7286 * sd->in_net_rx_action was true.
7288 if (!list_empty(&sd
->poll_list
))
7290 if (!sd_has_rps_ipi_waiting(sd
))
7296 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
7297 budget
-= napi_poll(n
, &repoll
);
7299 /* If softirq window is exhausted then punt.
7300 * Allow this to run for 2 jiffies since which will allow
7301 * an average latency of 1.5/HZ.
7303 if (unlikely(budget
<= 0 ||
7304 time_after_eq(jiffies
, time_limit
))) {
7310 local_irq_disable();
7312 list_splice_tail_init(&sd
->poll_list
, &list
);
7313 list_splice_tail(&repoll
, &list
);
7314 list_splice(&list
, &sd
->poll_list
);
7315 if (!list_empty(&sd
->poll_list
))
7316 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
7318 sd
->in_net_rx_action
= false;
7320 net_rps_action_and_irq_enable(sd
);
7322 bpf_net_ctx_clear(bpf_net_ctx
);
7325 struct netdev_adjacent
{
7326 struct net_device
*dev
;
7327 netdevice_tracker dev_tracker
;
7329 /* upper master flag, there can only be one master device per list */
7332 /* lookup ignore flag */
7335 /* counter for the number of times this device was added to us */
7338 /* private field for the users */
7341 struct list_head list
;
7342 struct rcu_head rcu
;
7345 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
7346 struct list_head
*adj_list
)
7348 struct netdev_adjacent
*adj
;
7350 list_for_each_entry(adj
, adj_list
, list
) {
7351 if (adj
->dev
== adj_dev
)
7357 static int ____netdev_has_upper_dev(struct net_device
*upper_dev
,
7358 struct netdev_nested_priv
*priv
)
7360 struct net_device
*dev
= (struct net_device
*)priv
->data
;
7362 return upper_dev
== dev
;
7366 * netdev_has_upper_dev - Check if device is linked to an upper device
7368 * @upper_dev: upper device to check
7370 * Find out if a device is linked to specified upper device and return true
7371 * in case it is. Note that this checks only immediate upper device,
7372 * not through a complete stack of devices. The caller must hold the RTNL lock.
7374 bool netdev_has_upper_dev(struct net_device
*dev
,
7375 struct net_device
*upper_dev
)
7377 struct netdev_nested_priv priv
= {
7378 .data
= (void *)upper_dev
,
7383 return netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
7386 EXPORT_SYMBOL(netdev_has_upper_dev
);
7389 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
7391 * @upper_dev: upper device to check
7393 * Find out if a device is linked to specified upper device and return true
7394 * in case it is. Note that this checks the entire upper device chain.
7395 * The caller must hold rcu lock.
7398 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
7399 struct net_device
*upper_dev
)
7401 struct netdev_nested_priv priv
= {
7402 .data
= (void *)upper_dev
,
7405 return !!netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
7408 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
7411 * netdev_has_any_upper_dev - Check if device is linked to some device
7414 * Find out if a device is linked to an upper device and return true in case
7415 * it is. The caller must hold the RTNL lock.
7417 bool netdev_has_any_upper_dev(struct net_device
*dev
)
7421 return !list_empty(&dev
->adj_list
.upper
);
7423 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
7426 * netdev_master_upper_dev_get - Get master upper device
7429 * Find a master upper device and return pointer to it or NULL in case
7430 * it's not there. The caller must hold the RTNL lock.
7432 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
7434 struct netdev_adjacent
*upper
;
7438 if (list_empty(&dev
->adj_list
.upper
))
7441 upper
= list_first_entry(&dev
->adj_list
.upper
,
7442 struct netdev_adjacent
, list
);
7443 if (likely(upper
->master
))
7447 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
7449 static struct net_device
*__netdev_master_upper_dev_get(struct net_device
*dev
)
7451 struct netdev_adjacent
*upper
;
7455 if (list_empty(&dev
->adj_list
.upper
))
7458 upper
= list_first_entry(&dev
->adj_list
.upper
,
7459 struct netdev_adjacent
, list
);
7460 if (likely(upper
->master
) && !upper
->ignore
)
7466 * netdev_has_any_lower_dev - Check if device is linked to some device
7469 * Find out if a device is linked to a lower device and return true in case
7470 * it is. The caller must hold the RTNL lock.
7472 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
7476 return !list_empty(&dev
->adj_list
.lower
);
7479 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
7481 struct netdev_adjacent
*adj
;
7483 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
7485 return adj
->private;
7487 EXPORT_SYMBOL(netdev_adjacent_get_private
);
7490 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7492 * @iter: list_head ** of the current position
7494 * Gets the next device from the dev's upper list, starting from iter
7495 * position. The caller must hold RCU read lock.
7497 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
7498 struct list_head
**iter
)
7500 struct netdev_adjacent
*upper
;
7502 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7504 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7506 if (&upper
->list
== &dev
->adj_list
.upper
)
7509 *iter
= &upper
->list
;
7513 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
7515 static struct net_device
*__netdev_next_upper_dev(struct net_device
*dev
,
7516 struct list_head
**iter
,
7519 struct netdev_adjacent
*upper
;
7521 upper
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7523 if (&upper
->list
== &dev
->adj_list
.upper
)
7526 *iter
= &upper
->list
;
7527 *ignore
= upper
->ignore
;
7532 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
7533 struct list_head
**iter
)
7535 struct netdev_adjacent
*upper
;
7537 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7539 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7541 if (&upper
->list
== &dev
->adj_list
.upper
)
7544 *iter
= &upper
->list
;
7549 static int __netdev_walk_all_upper_dev(struct net_device
*dev
,
7550 int (*fn
)(struct net_device
*dev
,
7551 struct netdev_nested_priv
*priv
),
7552 struct netdev_nested_priv
*priv
)
7554 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7555 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7560 iter
= &dev
->adj_list
.upper
;
7564 ret
= fn(now
, priv
);
7571 udev
= __netdev_next_upper_dev(now
, &iter
, &ignore
);
7578 niter
= &udev
->adj_list
.upper
;
7579 dev_stack
[cur
] = now
;
7580 iter_stack
[cur
++] = iter
;
7587 next
= dev_stack
[--cur
];
7588 niter
= iter_stack
[cur
];
7598 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
7599 int (*fn
)(struct net_device
*dev
,
7600 struct netdev_nested_priv
*priv
),
7601 struct netdev_nested_priv
*priv
)
7603 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7604 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7608 iter
= &dev
->adj_list
.upper
;
7612 ret
= fn(now
, priv
);
7619 udev
= netdev_next_upper_dev_rcu(now
, &iter
);
7624 niter
= &udev
->adj_list
.upper
;
7625 dev_stack
[cur
] = now
;
7626 iter_stack
[cur
++] = iter
;
7633 next
= dev_stack
[--cur
];
7634 niter
= iter_stack
[cur
];
7643 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
7645 static bool __netdev_has_upper_dev(struct net_device
*dev
,
7646 struct net_device
*upper_dev
)
7648 struct netdev_nested_priv priv
= {
7650 .data
= (void *)upper_dev
,
7655 return __netdev_walk_all_upper_dev(dev
, ____netdev_has_upper_dev
,
7660 * netdev_lower_get_next_private - Get the next ->private from the
7661 * lower neighbour list
7663 * @iter: list_head ** of the current position
7665 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7666 * list, starting from iter position. The caller must hold either hold the
7667 * RTNL lock or its own locking that guarantees that the neighbour lower
7668 * list will remain unchanged.
7670 void *netdev_lower_get_next_private(struct net_device
*dev
,
7671 struct list_head
**iter
)
7673 struct netdev_adjacent
*lower
;
7675 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
7677 if (&lower
->list
== &dev
->adj_list
.lower
)
7680 *iter
= lower
->list
.next
;
7682 return lower
->private;
7684 EXPORT_SYMBOL(netdev_lower_get_next_private
);
7687 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7688 * lower neighbour list, RCU
7691 * @iter: list_head ** of the current position
7693 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7694 * list, starting from iter position. The caller must hold RCU read lock.
7696 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
7697 struct list_head
**iter
)
7699 struct netdev_adjacent
*lower
;
7701 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7703 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7705 if (&lower
->list
== &dev
->adj_list
.lower
)
7708 *iter
= &lower
->list
;
7710 return lower
->private;
7712 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
7715 * netdev_lower_get_next - Get the next device from the lower neighbour
7718 * @iter: list_head ** of the current position
7720 * Gets the next netdev_adjacent from the dev's lower neighbour
7721 * list, starting from iter position. The caller must hold RTNL lock or
7722 * its own locking that guarantees that the neighbour lower
7723 * list will remain unchanged.
7725 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
7727 struct netdev_adjacent
*lower
;
7729 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
7731 if (&lower
->list
== &dev
->adj_list
.lower
)
7734 *iter
= lower
->list
.next
;
7738 EXPORT_SYMBOL(netdev_lower_get_next
);
7740 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
7741 struct list_head
**iter
)
7743 struct netdev_adjacent
*lower
;
7745 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7747 if (&lower
->list
== &dev
->adj_list
.lower
)
7750 *iter
= &lower
->list
;
7755 static struct net_device
*__netdev_next_lower_dev(struct net_device
*dev
,
7756 struct list_head
**iter
,
7759 struct netdev_adjacent
*lower
;
7761 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7763 if (&lower
->list
== &dev
->adj_list
.lower
)
7766 *iter
= &lower
->list
;
7767 *ignore
= lower
->ignore
;
7772 int netdev_walk_all_lower_dev(struct net_device
*dev
,
7773 int (*fn
)(struct net_device
*dev
,
7774 struct netdev_nested_priv
*priv
),
7775 struct netdev_nested_priv
*priv
)
7777 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7778 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7782 iter
= &dev
->adj_list
.lower
;
7786 ret
= fn(now
, priv
);
7793 ldev
= netdev_next_lower_dev(now
, &iter
);
7798 niter
= &ldev
->adj_list
.lower
;
7799 dev_stack
[cur
] = now
;
7800 iter_stack
[cur
++] = iter
;
7807 next
= dev_stack
[--cur
];
7808 niter
= iter_stack
[cur
];
7817 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
7819 static int __netdev_walk_all_lower_dev(struct net_device
*dev
,
7820 int (*fn
)(struct net_device
*dev
,
7821 struct netdev_nested_priv
*priv
),
7822 struct netdev_nested_priv
*priv
)
7824 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7825 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7830 iter
= &dev
->adj_list
.lower
;
7834 ret
= fn(now
, priv
);
7841 ldev
= __netdev_next_lower_dev(now
, &iter
, &ignore
);
7848 niter
= &ldev
->adj_list
.lower
;
7849 dev_stack
[cur
] = now
;
7850 iter_stack
[cur
++] = iter
;
7857 next
= dev_stack
[--cur
];
7858 niter
= iter_stack
[cur
];
7868 struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
7869 struct list_head
**iter
)
7871 struct netdev_adjacent
*lower
;
7873 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7874 if (&lower
->list
== &dev
->adj_list
.lower
)
7877 *iter
= &lower
->list
;
7881 EXPORT_SYMBOL(netdev_next_lower_dev_rcu
);
7883 static u8
__netdev_upper_depth(struct net_device
*dev
)
7885 struct net_device
*udev
;
7886 struct list_head
*iter
;
7890 for (iter
= &dev
->adj_list
.upper
,
7891 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
);
7893 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
)) {
7896 if (max_depth
< udev
->upper_level
)
7897 max_depth
= udev
->upper_level
;
7903 static u8
__netdev_lower_depth(struct net_device
*dev
)
7905 struct net_device
*ldev
;
7906 struct list_head
*iter
;
7910 for (iter
= &dev
->adj_list
.lower
,
7911 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
);
7913 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
)) {
7916 if (max_depth
< ldev
->lower_level
)
7917 max_depth
= ldev
->lower_level
;
7923 static int __netdev_update_upper_level(struct net_device
*dev
,
7924 struct netdev_nested_priv
*__unused
)
7926 dev
->upper_level
= __netdev_upper_depth(dev
) + 1;
7930 #ifdef CONFIG_LOCKDEP
7931 static LIST_HEAD(net_unlink_list
);
7933 static void net_unlink_todo(struct net_device
*dev
)
7935 if (list_empty(&dev
->unlink_list
))
7936 list_add_tail(&dev
->unlink_list
, &net_unlink_list
);
7940 static int __netdev_update_lower_level(struct net_device
*dev
,
7941 struct netdev_nested_priv
*priv
)
7943 dev
->lower_level
= __netdev_lower_depth(dev
) + 1;
7945 #ifdef CONFIG_LOCKDEP
7949 if (priv
->flags
& NESTED_SYNC_IMM
)
7950 dev
->nested_level
= dev
->lower_level
- 1;
7951 if (priv
->flags
& NESTED_SYNC_TODO
)
7952 net_unlink_todo(dev
);
7957 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
7958 int (*fn
)(struct net_device
*dev
,
7959 struct netdev_nested_priv
*priv
),
7960 struct netdev_nested_priv
*priv
)
7962 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7963 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7967 iter
= &dev
->adj_list
.lower
;
7971 ret
= fn(now
, priv
);
7978 ldev
= netdev_next_lower_dev_rcu(now
, &iter
);
7983 niter
= &ldev
->adj_list
.lower
;
7984 dev_stack
[cur
] = now
;
7985 iter_stack
[cur
++] = iter
;
7992 next
= dev_stack
[--cur
];
7993 niter
= iter_stack
[cur
];
8002 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
8005 * netdev_lower_get_first_private_rcu - Get the first ->private from the
8006 * lower neighbour list, RCU
8010 * Gets the first netdev_adjacent->private from the dev's lower neighbour
8011 * list. The caller must hold RCU read lock.
8013 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
8015 struct netdev_adjacent
*lower
;
8017 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
8018 struct netdev_adjacent
, list
);
8020 return lower
->private;
8023 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
8026 * netdev_master_upper_dev_get_rcu - Get master upper device
8029 * Find a master upper device and return pointer to it or NULL in case
8030 * it's not there. The caller must hold the RCU read lock.
8032 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
8034 struct netdev_adjacent
*upper
;
8036 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
8037 struct netdev_adjacent
, list
);
8038 if (upper
&& likely(upper
->master
))
8042 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
8044 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
8045 struct net_device
*adj_dev
,
8046 struct list_head
*dev_list
)
8048 char linkname
[IFNAMSIZ
+7];
8050 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
8051 "upper_%s" : "lower_%s", adj_dev
->name
);
8052 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
8055 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
8057 struct list_head
*dev_list
)
8059 char linkname
[IFNAMSIZ
+7];
8061 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
8062 "upper_%s" : "lower_%s", name
);
8063 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
8066 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
8067 struct net_device
*adj_dev
,
8068 struct list_head
*dev_list
)
8070 return (dev_list
== &dev
->adj_list
.upper
||
8071 dev_list
== &dev
->adj_list
.lower
) &&
8072 net_eq(dev_net(dev
), dev_net(adj_dev
));
8075 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
8076 struct net_device
*adj_dev
,
8077 struct list_head
*dev_list
,
8078 void *private, bool master
)
8080 struct netdev_adjacent
*adj
;
8083 adj
= __netdev_find_adj(adj_dev
, dev_list
);
8087 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
8088 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
8093 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
8098 adj
->master
= master
;
8100 adj
->private = private;
8101 adj
->ignore
= false;
8102 netdev_hold(adj_dev
, &adj
->dev_tracker
, GFP_KERNEL
);
8104 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
8105 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
8107 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
8108 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
8113 /* Ensure that master link is always the first item in list. */
8115 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
8116 &(adj_dev
->dev
.kobj
), "master");
8118 goto remove_symlinks
;
8120 list_add_rcu(&adj
->list
, dev_list
);
8122 list_add_tail_rcu(&adj
->list
, dev_list
);
8128 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
8129 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
8131 netdev_put(adj_dev
, &adj
->dev_tracker
);
8137 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
8138 struct net_device
*adj_dev
,
8140 struct list_head
*dev_list
)
8142 struct netdev_adjacent
*adj
;
8144 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
8145 dev
->name
, adj_dev
->name
, ref_nr
);
8147 adj
= __netdev_find_adj(adj_dev
, dev_list
);
8150 pr_err("Adjacency does not exist for device %s from %s\n",
8151 dev
->name
, adj_dev
->name
);
8156 if (adj
->ref_nr
> ref_nr
) {
8157 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
8158 dev
->name
, adj_dev
->name
, ref_nr
,
8159 adj
->ref_nr
- ref_nr
);
8160 adj
->ref_nr
-= ref_nr
;
8165 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
8167 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
8168 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
8170 list_del_rcu(&adj
->list
);
8171 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
8172 adj_dev
->name
, dev
->name
, adj_dev
->name
);
8173 netdev_put(adj_dev
, &adj
->dev_tracker
);
8174 kfree_rcu(adj
, rcu
);
8177 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
8178 struct net_device
*upper_dev
,
8179 struct list_head
*up_list
,
8180 struct list_head
*down_list
,
8181 void *private, bool master
)
8185 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
8190 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
8193 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
8200 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
8201 struct net_device
*upper_dev
,
8203 struct list_head
*up_list
,
8204 struct list_head
*down_list
)
8206 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
8207 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
8210 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
8211 struct net_device
*upper_dev
,
8212 void *private, bool master
)
8214 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
8215 &dev
->adj_list
.upper
,
8216 &upper_dev
->adj_list
.lower
,
8220 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
8221 struct net_device
*upper_dev
)
8223 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
8224 &dev
->adj_list
.upper
,
8225 &upper_dev
->adj_list
.lower
);
8228 static int __netdev_upper_dev_link(struct net_device
*dev
,
8229 struct net_device
*upper_dev
, bool master
,
8230 void *upper_priv
, void *upper_info
,
8231 struct netdev_nested_priv
*priv
,
8232 struct netlink_ext_ack
*extack
)
8234 struct netdev_notifier_changeupper_info changeupper_info
= {
8239 .upper_dev
= upper_dev
,
8242 .upper_info
= upper_info
,
8244 struct net_device
*master_dev
;
8249 if (dev
== upper_dev
)
8252 /* To prevent loops, check if dev is not upper device to upper_dev. */
8253 if (__netdev_has_upper_dev(upper_dev
, dev
))
8256 if ((dev
->lower_level
+ upper_dev
->upper_level
) > MAX_NEST_DEV
)
8260 if (__netdev_has_upper_dev(dev
, upper_dev
))
8263 master_dev
= __netdev_master_upper_dev_get(dev
);
8265 return master_dev
== upper_dev
? -EEXIST
: -EBUSY
;
8268 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
8269 &changeupper_info
.info
);
8270 ret
= notifier_to_errno(ret
);
8274 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
8279 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
8280 &changeupper_info
.info
);
8281 ret
= notifier_to_errno(ret
);
8285 __netdev_update_upper_level(dev
, NULL
);
8286 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
8288 __netdev_update_lower_level(upper_dev
, priv
);
8289 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
8295 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
8301 * netdev_upper_dev_link - Add a link to the upper device
8303 * @upper_dev: new upper device
8304 * @extack: netlink extended ack
8306 * Adds a link to device which is upper to this one. The caller must hold
8307 * the RTNL lock. On a failure a negative errno code is returned.
8308 * On success the reference counts are adjusted and the function
8311 int netdev_upper_dev_link(struct net_device
*dev
,
8312 struct net_device
*upper_dev
,
8313 struct netlink_ext_ack
*extack
)
8315 struct netdev_nested_priv priv
= {
8316 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
8320 return __netdev_upper_dev_link(dev
, upper_dev
, false,
8321 NULL
, NULL
, &priv
, extack
);
8323 EXPORT_SYMBOL(netdev_upper_dev_link
);
8326 * netdev_master_upper_dev_link - Add a master link to the upper device
8328 * @upper_dev: new upper device
8329 * @upper_priv: upper device private
8330 * @upper_info: upper info to be passed down via notifier
8331 * @extack: netlink extended ack
8333 * Adds a link to device which is upper to this one. In this case, only
8334 * one master upper device can be linked, although other non-master devices
8335 * might be linked as well. The caller must hold the RTNL lock.
8336 * On a failure a negative errno code is returned. On success the reference
8337 * counts are adjusted and the function returns zero.
8339 int netdev_master_upper_dev_link(struct net_device
*dev
,
8340 struct net_device
*upper_dev
,
8341 void *upper_priv
, void *upper_info
,
8342 struct netlink_ext_ack
*extack
)
8344 struct netdev_nested_priv priv
= {
8345 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
8349 return __netdev_upper_dev_link(dev
, upper_dev
, true,
8350 upper_priv
, upper_info
, &priv
, extack
);
8352 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
8354 static void __netdev_upper_dev_unlink(struct net_device
*dev
,
8355 struct net_device
*upper_dev
,
8356 struct netdev_nested_priv
*priv
)
8358 struct netdev_notifier_changeupper_info changeupper_info
= {
8362 .upper_dev
= upper_dev
,
8368 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
8370 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
8371 &changeupper_info
.info
);
8373 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
8375 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
8376 &changeupper_info
.info
);
8378 __netdev_update_upper_level(dev
, NULL
);
8379 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
8381 __netdev_update_lower_level(upper_dev
, priv
);
8382 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
8387 * netdev_upper_dev_unlink - Removes a link to upper device
8389 * @upper_dev: new upper device
8391 * Removes a link to device which is upper to this one. The caller must hold
8394 void netdev_upper_dev_unlink(struct net_device
*dev
,
8395 struct net_device
*upper_dev
)
8397 struct netdev_nested_priv priv
= {
8398 .flags
= NESTED_SYNC_TODO
,
8402 __netdev_upper_dev_unlink(dev
, upper_dev
, &priv
);
8404 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
8406 static void __netdev_adjacent_dev_set(struct net_device
*upper_dev
,
8407 struct net_device
*lower_dev
,
8410 struct netdev_adjacent
*adj
;
8412 adj
= __netdev_find_adj(lower_dev
, &upper_dev
->adj_list
.lower
);
8416 adj
= __netdev_find_adj(upper_dev
, &lower_dev
->adj_list
.upper
);
8421 static void netdev_adjacent_dev_disable(struct net_device
*upper_dev
,
8422 struct net_device
*lower_dev
)
8424 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, true);
8427 static void netdev_adjacent_dev_enable(struct net_device
*upper_dev
,
8428 struct net_device
*lower_dev
)
8430 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, false);
8433 int netdev_adjacent_change_prepare(struct net_device
*old_dev
,
8434 struct net_device
*new_dev
,
8435 struct net_device
*dev
,
8436 struct netlink_ext_ack
*extack
)
8438 struct netdev_nested_priv priv
= {
8447 if (old_dev
&& new_dev
!= old_dev
)
8448 netdev_adjacent_dev_disable(dev
, old_dev
);
8449 err
= __netdev_upper_dev_link(new_dev
, dev
, false, NULL
, NULL
, &priv
,
8452 if (old_dev
&& new_dev
!= old_dev
)
8453 netdev_adjacent_dev_enable(dev
, old_dev
);
8459 EXPORT_SYMBOL(netdev_adjacent_change_prepare
);
8461 void netdev_adjacent_change_commit(struct net_device
*old_dev
,
8462 struct net_device
*new_dev
,
8463 struct net_device
*dev
)
8465 struct netdev_nested_priv priv
= {
8466 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
8470 if (!new_dev
|| !old_dev
)
8473 if (new_dev
== old_dev
)
8476 netdev_adjacent_dev_enable(dev
, old_dev
);
8477 __netdev_upper_dev_unlink(old_dev
, dev
, &priv
);
8479 EXPORT_SYMBOL(netdev_adjacent_change_commit
);
8481 void netdev_adjacent_change_abort(struct net_device
*old_dev
,
8482 struct net_device
*new_dev
,
8483 struct net_device
*dev
)
8485 struct netdev_nested_priv priv
= {
8493 if (old_dev
&& new_dev
!= old_dev
)
8494 netdev_adjacent_dev_enable(dev
, old_dev
);
8496 __netdev_upper_dev_unlink(new_dev
, dev
, &priv
);
8498 EXPORT_SYMBOL(netdev_adjacent_change_abort
);
8501 * netdev_bonding_info_change - Dispatch event about slave change
8503 * @bonding_info: info to dispatch
8505 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8506 * The caller must hold the RTNL lock.
8508 void netdev_bonding_info_change(struct net_device
*dev
,
8509 struct netdev_bonding_info
*bonding_info
)
8511 struct netdev_notifier_bonding_info info
= {
8515 memcpy(&info
.bonding_info
, bonding_info
,
8516 sizeof(struct netdev_bonding_info
));
8517 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
8520 EXPORT_SYMBOL(netdev_bonding_info_change
);
8522 static int netdev_offload_xstats_enable_l3(struct net_device
*dev
,
8523 struct netlink_ext_ack
*extack
)
8525 struct netdev_notifier_offload_xstats_info info
= {
8527 .info
.extack
= extack
,
8528 .type
= NETDEV_OFFLOAD_XSTATS_TYPE_L3
,
8533 dev
->offload_xstats_l3
= kzalloc(sizeof(*dev
->offload_xstats_l3
),
8535 if (!dev
->offload_xstats_l3
)
8538 rc
= call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE
,
8539 NETDEV_OFFLOAD_XSTATS_DISABLE
,
8541 err
= notifier_to_errno(rc
);
8548 kfree(dev
->offload_xstats_l3
);
8549 dev
->offload_xstats_l3
= NULL
;
8553 int netdev_offload_xstats_enable(struct net_device
*dev
,
8554 enum netdev_offload_xstats_type type
,
8555 struct netlink_ext_ack
*extack
)
8559 if (netdev_offload_xstats_enabled(dev
, type
))
8563 case NETDEV_OFFLOAD_XSTATS_TYPE_L3
:
8564 return netdev_offload_xstats_enable_l3(dev
, extack
);
8570 EXPORT_SYMBOL(netdev_offload_xstats_enable
);
8572 static void netdev_offload_xstats_disable_l3(struct net_device
*dev
)
8574 struct netdev_notifier_offload_xstats_info info
= {
8576 .type
= NETDEV_OFFLOAD_XSTATS_TYPE_L3
,
8579 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE
,
8581 kfree(dev
->offload_xstats_l3
);
8582 dev
->offload_xstats_l3
= NULL
;
8585 int netdev_offload_xstats_disable(struct net_device
*dev
,
8586 enum netdev_offload_xstats_type type
)
8590 if (!netdev_offload_xstats_enabled(dev
, type
))
8594 case NETDEV_OFFLOAD_XSTATS_TYPE_L3
:
8595 netdev_offload_xstats_disable_l3(dev
);
8602 EXPORT_SYMBOL(netdev_offload_xstats_disable
);
8604 static void netdev_offload_xstats_disable_all(struct net_device
*dev
)
8606 netdev_offload_xstats_disable(dev
, NETDEV_OFFLOAD_XSTATS_TYPE_L3
);
8609 static struct rtnl_hw_stats64
*
8610 netdev_offload_xstats_get_ptr(const struct net_device
*dev
,
8611 enum netdev_offload_xstats_type type
)
8614 case NETDEV_OFFLOAD_XSTATS_TYPE_L3
:
8615 return dev
->offload_xstats_l3
;
8622 bool netdev_offload_xstats_enabled(const struct net_device
*dev
,
8623 enum netdev_offload_xstats_type type
)
8627 return netdev_offload_xstats_get_ptr(dev
, type
);
8629 EXPORT_SYMBOL(netdev_offload_xstats_enabled
);
8631 struct netdev_notifier_offload_xstats_ru
{
8635 struct netdev_notifier_offload_xstats_rd
{
8636 struct rtnl_hw_stats64 stats
;
8640 static void netdev_hw_stats64_add(struct rtnl_hw_stats64
*dest
,
8641 const struct rtnl_hw_stats64
*src
)
8643 dest
->rx_packets
+= src
->rx_packets
;
8644 dest
->tx_packets
+= src
->tx_packets
;
8645 dest
->rx_bytes
+= src
->rx_bytes
;
8646 dest
->tx_bytes
+= src
->tx_bytes
;
8647 dest
->rx_errors
+= src
->rx_errors
;
8648 dest
->tx_errors
+= src
->tx_errors
;
8649 dest
->rx_dropped
+= src
->rx_dropped
;
8650 dest
->tx_dropped
+= src
->tx_dropped
;
8651 dest
->multicast
+= src
->multicast
;
8654 static int netdev_offload_xstats_get_used(struct net_device
*dev
,
8655 enum netdev_offload_xstats_type type
,
8657 struct netlink_ext_ack
*extack
)
8659 struct netdev_notifier_offload_xstats_ru report_used
= {};
8660 struct netdev_notifier_offload_xstats_info info
= {
8662 .info
.extack
= extack
,
8664 .report_used
= &report_used
,
8668 WARN_ON(!netdev_offload_xstats_enabled(dev
, type
));
8669 rc
= call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED
,
8671 *p_used
= report_used
.used
;
8672 return notifier_to_errno(rc
);
8675 static int netdev_offload_xstats_get_stats(struct net_device
*dev
,
8676 enum netdev_offload_xstats_type type
,
8677 struct rtnl_hw_stats64
*p_stats
,
8679 struct netlink_ext_ack
*extack
)
8681 struct netdev_notifier_offload_xstats_rd report_delta
= {};
8682 struct netdev_notifier_offload_xstats_info info
= {
8684 .info
.extack
= extack
,
8686 .report_delta
= &report_delta
,
8688 struct rtnl_hw_stats64
*stats
;
8691 stats
= netdev_offload_xstats_get_ptr(dev
, type
);
8692 if (WARN_ON(!stats
))
8695 rc
= call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA
,
8698 /* Cache whatever we got, even if there was an error, otherwise the
8699 * successful stats retrievals would get lost.
8701 netdev_hw_stats64_add(stats
, &report_delta
.stats
);
8705 *p_used
= report_delta
.used
;
8707 return notifier_to_errno(rc
);
8710 int netdev_offload_xstats_get(struct net_device
*dev
,
8711 enum netdev_offload_xstats_type type
,
8712 struct rtnl_hw_stats64
*p_stats
, bool *p_used
,
8713 struct netlink_ext_ack
*extack
)
8718 return netdev_offload_xstats_get_stats(dev
, type
, p_stats
,
8721 return netdev_offload_xstats_get_used(dev
, type
, p_used
,
8724 EXPORT_SYMBOL(netdev_offload_xstats_get
);
8727 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd
*report_delta
,
8728 const struct rtnl_hw_stats64
*stats
)
8730 report_delta
->used
= true;
8731 netdev_hw_stats64_add(&report_delta
->stats
, stats
);
8733 EXPORT_SYMBOL(netdev_offload_xstats_report_delta
);
8736 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru
*report_used
)
8738 report_used
->used
= true;
8740 EXPORT_SYMBOL(netdev_offload_xstats_report_used
);
8742 void netdev_offload_xstats_push_delta(struct net_device
*dev
,
8743 enum netdev_offload_xstats_type type
,
8744 const struct rtnl_hw_stats64
*p_stats
)
8746 struct rtnl_hw_stats64
*stats
;
8750 stats
= netdev_offload_xstats_get_ptr(dev
, type
);
8751 if (WARN_ON(!stats
))
8754 netdev_hw_stats64_add(stats
, p_stats
);
8756 EXPORT_SYMBOL(netdev_offload_xstats_push_delta
);
8759 * netdev_get_xmit_slave - Get the xmit slave of master device
8762 * @all_slaves: assume all the slaves are active
8764 * The reference counters are not incremented so the caller must be
8765 * careful with locks. The caller must hold RCU lock.
8766 * %NULL is returned if no slave is found.
8769 struct net_device
*netdev_get_xmit_slave(struct net_device
*dev
,
8770 struct sk_buff
*skb
,
8773 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8775 if (!ops
->ndo_get_xmit_slave
)
8777 return ops
->ndo_get_xmit_slave(dev
, skb
, all_slaves
);
8779 EXPORT_SYMBOL(netdev_get_xmit_slave
);
8781 static struct net_device
*netdev_sk_get_lower_dev(struct net_device
*dev
,
8784 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8786 if (!ops
->ndo_sk_get_lower_dev
)
8788 return ops
->ndo_sk_get_lower_dev(dev
, sk
);
8792 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8796 * %NULL is returned if no lower device is found.
8799 struct net_device
*netdev_sk_get_lowest_dev(struct net_device
*dev
,
8802 struct net_device
*lower
;
8804 lower
= netdev_sk_get_lower_dev(dev
, sk
);
8807 lower
= netdev_sk_get_lower_dev(dev
, sk
);
8812 EXPORT_SYMBOL(netdev_sk_get_lowest_dev
);
8814 static void netdev_adjacent_add_links(struct net_device
*dev
)
8816 struct netdev_adjacent
*iter
;
8818 struct net
*net
= dev_net(dev
);
8820 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8821 if (!net_eq(net
, dev_net(iter
->dev
)))
8823 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8824 &iter
->dev
->adj_list
.lower
);
8825 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
8826 &dev
->adj_list
.upper
);
8829 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8830 if (!net_eq(net
, dev_net(iter
->dev
)))
8832 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8833 &iter
->dev
->adj_list
.upper
);
8834 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
8835 &dev
->adj_list
.lower
);
8839 static void netdev_adjacent_del_links(struct net_device
*dev
)
8841 struct netdev_adjacent
*iter
;
8843 struct net
*net
= dev_net(dev
);
8845 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8846 if (!net_eq(net
, dev_net(iter
->dev
)))
8848 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
8849 &iter
->dev
->adj_list
.lower
);
8850 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
8851 &dev
->adj_list
.upper
);
8854 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8855 if (!net_eq(net
, dev_net(iter
->dev
)))
8857 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
8858 &iter
->dev
->adj_list
.upper
);
8859 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
8860 &dev
->adj_list
.lower
);
8864 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
8866 struct netdev_adjacent
*iter
;
8868 struct net
*net
= dev_net(dev
);
8870 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8871 if (!net_eq(net
, dev_net(iter
->dev
)))
8873 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
8874 &iter
->dev
->adj_list
.lower
);
8875 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8876 &iter
->dev
->adj_list
.lower
);
8879 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8880 if (!net_eq(net
, dev_net(iter
->dev
)))
8882 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
8883 &iter
->dev
->adj_list
.upper
);
8884 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8885 &iter
->dev
->adj_list
.upper
);
8889 void *netdev_lower_dev_get_private(struct net_device
*dev
,
8890 struct net_device
*lower_dev
)
8892 struct netdev_adjacent
*lower
;
8896 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
8900 return lower
->private;
8902 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
8906 * netdev_lower_state_changed - Dispatch event about lower device state change
8907 * @lower_dev: device
8908 * @lower_state_info: state to dispatch
8910 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8911 * The caller must hold the RTNL lock.
8913 void netdev_lower_state_changed(struct net_device
*lower_dev
,
8914 void *lower_state_info
)
8916 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
8917 .info
.dev
= lower_dev
,
8921 changelowerstate_info
.lower_state_info
= lower_state_info
;
8922 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
8923 &changelowerstate_info
.info
);
8925 EXPORT_SYMBOL(netdev_lower_state_changed
);
8927 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
8929 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8931 if (ops
->ndo_change_rx_flags
)
8932 ops
->ndo_change_rx_flags(dev
, flags
);
8935 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
8937 unsigned int old_flags
= dev
->flags
;
8938 unsigned int promiscuity
, flags
;
8944 promiscuity
= dev
->promiscuity
+ inc
;
8945 if (promiscuity
== 0) {
8948 * If inc causes overflow, untouch promisc and return error.
8950 if (unlikely(inc
> 0)) {
8951 netdev_warn(dev
, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8954 flags
= old_flags
& ~IFF_PROMISC
;
8956 flags
= old_flags
| IFF_PROMISC
;
8958 WRITE_ONCE(dev
->promiscuity
, promiscuity
);
8959 if (flags
!= old_flags
) {
8960 WRITE_ONCE(dev
->flags
, flags
);
8961 netdev_info(dev
, "%s promiscuous mode\n",
8962 dev
->flags
& IFF_PROMISC
? "entered" : "left");
8963 if (audit_enabled
) {
8964 current_uid_gid(&uid
, &gid
);
8965 audit_log(audit_context(), GFP_ATOMIC
,
8966 AUDIT_ANOM_PROMISCUOUS
,
8967 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8968 dev
->name
, (dev
->flags
& IFF_PROMISC
),
8969 (old_flags
& IFF_PROMISC
),
8970 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
8971 from_kuid(&init_user_ns
, uid
),
8972 from_kgid(&init_user_ns
, gid
),
8973 audit_get_sessionid(current
));
8976 dev_change_rx_flags(dev
, IFF_PROMISC
);
8979 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
, 0, NULL
);
8984 * dev_set_promiscuity - update promiscuity count on a device
8988 * Add or remove promiscuity from a device. While the count in the device
8989 * remains above zero the interface remains promiscuous. Once it hits zero
8990 * the device reverts back to normal filtering operation. A negative inc
8991 * value is used to drop promiscuity on the device.
8992 * Return 0 if successful or a negative errno code on error.
8994 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
8996 unsigned int old_flags
= dev
->flags
;
8999 err
= __dev_set_promiscuity(dev
, inc
, true);
9002 if (dev
->flags
!= old_flags
)
9003 dev_set_rx_mode(dev
);
9006 EXPORT_SYMBOL(dev_set_promiscuity
);
9008 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
9010 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
9011 unsigned int allmulti
, flags
;
9015 allmulti
= dev
->allmulti
+ inc
;
9016 if (allmulti
== 0) {
9019 * If inc causes overflow, untouch allmulti and return error.
9021 if (unlikely(inc
> 0)) {
9022 netdev_warn(dev
, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
9025 flags
= old_flags
& ~IFF_ALLMULTI
;
9027 flags
= old_flags
| IFF_ALLMULTI
;
9029 WRITE_ONCE(dev
->allmulti
, allmulti
);
9030 if (flags
!= old_flags
) {
9031 WRITE_ONCE(dev
->flags
, flags
);
9032 netdev_info(dev
, "%s allmulticast mode\n",
9033 dev
->flags
& IFF_ALLMULTI
? "entered" : "left");
9034 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
9035 dev_set_rx_mode(dev
);
9037 __dev_notify_flags(dev
, old_flags
,
9038 dev
->gflags
^ old_gflags
, 0, NULL
);
9044 * dev_set_allmulti - update allmulti count on a device
9048 * Add or remove reception of all multicast frames to a device. While the
9049 * count in the device remains above zero the interface remains listening
9050 * to all interfaces. Once it hits zero the device reverts back to normal
9051 * filtering operation. A negative @inc value is used to drop the counter
9052 * when releasing a resource needing all multicasts.
9053 * Return 0 if successful or a negative errno code on error.
9056 int dev_set_allmulti(struct net_device
*dev
, int inc
)
9058 return __dev_set_allmulti(dev
, inc
, true);
9060 EXPORT_SYMBOL(dev_set_allmulti
);
9063 * Upload unicast and multicast address lists to device and
9064 * configure RX filtering. When the device doesn't support unicast
9065 * filtering it is put in promiscuous mode while unicast addresses
9068 void __dev_set_rx_mode(struct net_device
*dev
)
9070 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9072 /* dev_open will call this function so the list will stay sane. */
9073 if (!(dev
->flags
&IFF_UP
))
9076 if (!netif_device_present(dev
))
9079 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
9080 /* Unicast addresses changes may only happen under the rtnl,
9081 * therefore calling __dev_set_promiscuity here is safe.
9083 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
9084 __dev_set_promiscuity(dev
, 1, false);
9085 dev
->uc_promisc
= true;
9086 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
9087 __dev_set_promiscuity(dev
, -1, false);
9088 dev
->uc_promisc
= false;
9092 if (ops
->ndo_set_rx_mode
)
9093 ops
->ndo_set_rx_mode(dev
);
9096 void dev_set_rx_mode(struct net_device
*dev
)
9098 netif_addr_lock_bh(dev
);
9099 __dev_set_rx_mode(dev
);
9100 netif_addr_unlock_bh(dev
);
9104 * dev_get_flags - get flags reported to userspace
9107 * Get the combination of flag bits exported through APIs to userspace.
9109 unsigned int dev_get_flags(const struct net_device
*dev
)
9113 flags
= (READ_ONCE(dev
->flags
) & ~(IFF_PROMISC
|
9118 (READ_ONCE(dev
->gflags
) & (IFF_PROMISC
|
9121 if (netif_running(dev
)) {
9122 if (netif_oper_up(dev
))
9123 flags
|= IFF_RUNNING
;
9124 if (netif_carrier_ok(dev
))
9125 flags
|= IFF_LOWER_UP
;
9126 if (netif_dormant(dev
))
9127 flags
|= IFF_DORMANT
;
9132 EXPORT_SYMBOL(dev_get_flags
);
9134 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
,
9135 struct netlink_ext_ack
*extack
)
9137 unsigned int old_flags
= dev
->flags
;
9143 * Set the flags on our device.
9146 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
9147 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
9149 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
9153 * Load in the correct multicast list now the flags have changed.
9156 if ((old_flags
^ flags
) & IFF_MULTICAST
)
9157 dev_change_rx_flags(dev
, IFF_MULTICAST
);
9159 dev_set_rx_mode(dev
);
9162 * Have we downed the interface. We handle IFF_UP ourselves
9163 * according to user attempts to set it, rather than blindly
9168 if ((old_flags
^ flags
) & IFF_UP
) {
9169 if (old_flags
& IFF_UP
)
9172 ret
= __dev_open(dev
, extack
);
9175 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
9176 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
9177 unsigned int old_flags
= dev
->flags
;
9179 dev
->gflags
^= IFF_PROMISC
;
9181 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
9182 if (dev
->flags
!= old_flags
)
9183 dev_set_rx_mode(dev
);
9186 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
9187 * is important. Some (broken) drivers set IFF_PROMISC, when
9188 * IFF_ALLMULTI is requested not asking us and not reporting.
9190 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
9191 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
9193 dev
->gflags
^= IFF_ALLMULTI
;
9194 __dev_set_allmulti(dev
, inc
, false);
9200 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
9201 unsigned int gchanges
, u32 portid
,
9202 const struct nlmsghdr
*nlh
)
9204 unsigned int changes
= dev
->flags
^ old_flags
;
9207 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
, portid
, nlh
);
9209 if (changes
& IFF_UP
) {
9210 if (dev
->flags
& IFF_UP
)
9211 call_netdevice_notifiers(NETDEV_UP
, dev
);
9213 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
9216 if (dev
->flags
& IFF_UP
&&
9217 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
9218 struct netdev_notifier_change_info change_info
= {
9222 .flags_changed
= changes
,
9225 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
9230 * dev_change_flags - change device settings
9232 * @flags: device state flags
9233 * @extack: netlink extended ack
9235 * Change settings on device based state flags. The flags are
9236 * in the userspace exported format.
9238 int dev_change_flags(struct net_device
*dev
, unsigned int flags
,
9239 struct netlink_ext_ack
*extack
)
9242 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
9244 ret
= __dev_change_flags(dev
, flags
, extack
);
9248 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
9249 __dev_notify_flags(dev
, old_flags
, changes
, 0, NULL
);
9252 EXPORT_SYMBOL(dev_change_flags
);
9254 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
9256 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9258 if (ops
->ndo_change_mtu
)
9259 return ops
->ndo_change_mtu(dev
, new_mtu
);
9261 /* Pairs with all the lockless reads of dev->mtu in the stack */
9262 WRITE_ONCE(dev
->mtu
, new_mtu
);
9265 EXPORT_SYMBOL(__dev_set_mtu
);
9267 int dev_validate_mtu(struct net_device
*dev
, int new_mtu
,
9268 struct netlink_ext_ack
*extack
)
9270 /* MTU must be positive, and in range */
9271 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
9272 NL_SET_ERR_MSG(extack
, "mtu less than device minimum");
9276 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
9277 NL_SET_ERR_MSG(extack
, "mtu greater than device maximum");
9284 * dev_set_mtu_ext - Change maximum transfer unit
9286 * @new_mtu: new transfer unit
9287 * @extack: netlink extended ack
9289 * Change the maximum transfer size of the network device.
9291 int dev_set_mtu_ext(struct net_device
*dev
, int new_mtu
,
9292 struct netlink_ext_ack
*extack
)
9296 if (new_mtu
== dev
->mtu
)
9299 err
= dev_validate_mtu(dev
, new_mtu
, extack
);
9303 if (!netif_device_present(dev
))
9306 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
9307 err
= notifier_to_errno(err
);
9311 orig_mtu
= dev
->mtu
;
9312 err
= __dev_set_mtu(dev
, new_mtu
);
9315 err
= call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
9317 err
= notifier_to_errno(err
);
9319 /* setting mtu back and notifying everyone again,
9320 * so that they have a chance to revert changes.
9322 __dev_set_mtu(dev
, orig_mtu
);
9323 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
9330 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
9332 struct netlink_ext_ack extack
;
9335 memset(&extack
, 0, sizeof(extack
));
9336 err
= dev_set_mtu_ext(dev
, new_mtu
, &extack
);
9337 if (err
&& extack
._msg
)
9338 net_err_ratelimited("%s: %s\n", dev
->name
, extack
._msg
);
9341 EXPORT_SYMBOL(dev_set_mtu
);
9344 * dev_change_tx_queue_len - Change TX queue length of a netdevice
9346 * @new_len: new tx queue length
9348 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
9350 unsigned int orig_len
= dev
->tx_queue_len
;
9353 if (new_len
!= (unsigned int)new_len
)
9356 if (new_len
!= orig_len
) {
9357 WRITE_ONCE(dev
->tx_queue_len
, new_len
);
9358 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
9359 res
= notifier_to_errno(res
);
9362 res
= dev_qdisc_change_tx_queue_len(dev
);
9370 netdev_err(dev
, "refused to change device tx_queue_len\n");
9371 WRITE_ONCE(dev
->tx_queue_len
, orig_len
);
9376 * dev_set_group - Change group this device belongs to
9378 * @new_group: group this device should belong to
9380 void dev_set_group(struct net_device
*dev
, int new_group
)
9382 dev
->group
= new_group
;
9386 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
9388 * @addr: new address
9389 * @extack: netlink extended ack
9391 int dev_pre_changeaddr_notify(struct net_device
*dev
, const char *addr
,
9392 struct netlink_ext_ack
*extack
)
9394 struct netdev_notifier_pre_changeaddr_info info
= {
9396 .info
.extack
= extack
,
9401 rc
= call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR
, &info
.info
);
9402 return notifier_to_errno(rc
);
9404 EXPORT_SYMBOL(dev_pre_changeaddr_notify
);
9407 * dev_set_mac_address - Change Media Access Control Address
9410 * @extack: netlink extended ack
9412 * Change the hardware (MAC) address of the device
9414 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
,
9415 struct netlink_ext_ack
*extack
)
9417 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9420 if (!ops
->ndo_set_mac_address
)
9422 if (sa
->sa_family
!= dev
->type
)
9424 if (!netif_device_present(dev
))
9426 err
= dev_pre_changeaddr_notify(dev
, sa
->sa_data
, extack
);
9429 if (memcmp(dev
->dev_addr
, sa
->sa_data
, dev
->addr_len
)) {
9430 err
= ops
->ndo_set_mac_address(dev
, sa
);
9434 dev
->addr_assign_type
= NET_ADDR_SET
;
9435 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
9436 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
9439 EXPORT_SYMBOL(dev_set_mac_address
);
9441 DECLARE_RWSEM(dev_addr_sem
);
9443 int dev_set_mac_address_user(struct net_device
*dev
, struct sockaddr
*sa
,
9444 struct netlink_ext_ack
*extack
)
9448 down_write(&dev_addr_sem
);
9449 ret
= dev_set_mac_address(dev
, sa
, extack
);
9450 up_write(&dev_addr_sem
);
9453 EXPORT_SYMBOL(dev_set_mac_address_user
);
9455 int dev_get_mac_address(struct sockaddr
*sa
, struct net
*net
, char *dev_name
)
9457 size_t size
= sizeof(sa
->sa_data_min
);
9458 struct net_device
*dev
;
9461 down_read(&dev_addr_sem
);
9464 dev
= dev_get_by_name_rcu(net
, dev_name
);
9470 memset(sa
->sa_data
, 0, size
);
9472 memcpy(sa
->sa_data
, dev
->dev_addr
,
9473 min_t(size_t, size
, dev
->addr_len
));
9474 sa
->sa_family
= dev
->type
;
9478 up_read(&dev_addr_sem
);
9481 EXPORT_SYMBOL(dev_get_mac_address
);
9484 * dev_change_carrier - Change device carrier
9486 * @new_carrier: new value
9488 * Change device carrier
9490 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
9492 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9494 if (!ops
->ndo_change_carrier
)
9496 if (!netif_device_present(dev
))
9498 return ops
->ndo_change_carrier(dev
, new_carrier
);
9502 * dev_get_phys_port_id - Get device physical port ID
9506 * Get device physical port ID
9508 int dev_get_phys_port_id(struct net_device
*dev
,
9509 struct netdev_phys_item_id
*ppid
)
9511 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9513 if (!ops
->ndo_get_phys_port_id
)
9515 return ops
->ndo_get_phys_port_id(dev
, ppid
);
9519 * dev_get_phys_port_name - Get device physical port name
9522 * @len: limit of bytes to copy to name
9524 * Get device physical port name
9526 int dev_get_phys_port_name(struct net_device
*dev
,
9527 char *name
, size_t len
)
9529 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9532 if (ops
->ndo_get_phys_port_name
) {
9533 err
= ops
->ndo_get_phys_port_name(dev
, name
, len
);
9534 if (err
!= -EOPNOTSUPP
)
9537 return devlink_compat_phys_port_name_get(dev
, name
, len
);
9541 * dev_get_port_parent_id - Get the device's port parent identifier
9542 * @dev: network device
9543 * @ppid: pointer to a storage for the port's parent identifier
9544 * @recurse: allow/disallow recursion to lower devices
9546 * Get the devices's port parent identifier
9548 int dev_get_port_parent_id(struct net_device
*dev
,
9549 struct netdev_phys_item_id
*ppid
,
9552 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9553 struct netdev_phys_item_id first
= { };
9554 struct net_device
*lower_dev
;
9555 struct list_head
*iter
;
9558 if (ops
->ndo_get_port_parent_id
) {
9559 err
= ops
->ndo_get_port_parent_id(dev
, ppid
);
9560 if (err
!= -EOPNOTSUPP
)
9564 err
= devlink_compat_switch_id_get(dev
, ppid
);
9565 if (!recurse
|| err
!= -EOPNOTSUPP
)
9568 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
9569 err
= dev_get_port_parent_id(lower_dev
, ppid
, true);
9574 else if (memcmp(&first
, ppid
, sizeof(*ppid
)))
9580 EXPORT_SYMBOL(dev_get_port_parent_id
);
9583 * netdev_port_same_parent_id - Indicate if two network devices have
9584 * the same port parent identifier
9585 * @a: first network device
9586 * @b: second network device
9588 bool netdev_port_same_parent_id(struct net_device
*a
, struct net_device
*b
)
9590 struct netdev_phys_item_id a_id
= { };
9591 struct netdev_phys_item_id b_id
= { };
9593 if (dev_get_port_parent_id(a
, &a_id
, true) ||
9594 dev_get_port_parent_id(b
, &b_id
, true))
9597 return netdev_phys_item_id_same(&a_id
, &b_id
);
9599 EXPORT_SYMBOL(netdev_port_same_parent_id
);
9602 * dev_change_proto_down - set carrier according to proto_down.
9605 * @proto_down: new value
9607 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
9609 if (!dev
->change_proto_down
)
9611 if (!netif_device_present(dev
))
9614 netif_carrier_off(dev
);
9616 netif_carrier_on(dev
);
9617 WRITE_ONCE(dev
->proto_down
, proto_down
);
9622 * dev_change_proto_down_reason - proto down reason
9625 * @mask: proto down mask
9626 * @value: proto down value
9628 void dev_change_proto_down_reason(struct net_device
*dev
, unsigned long mask
,
9631 u32 proto_down_reason
;
9635 proto_down_reason
= value
;
9637 proto_down_reason
= dev
->proto_down_reason
;
9638 for_each_set_bit(b
, &mask
, 32) {
9639 if (value
& (1 << b
))
9640 proto_down_reason
|= BIT(b
);
9642 proto_down_reason
&= ~BIT(b
);
9645 WRITE_ONCE(dev
->proto_down_reason
, proto_down_reason
);
9648 struct bpf_xdp_link
{
9649 struct bpf_link link
;
9650 struct net_device
*dev
; /* protected by rtnl_lock, no refcnt held */
9654 static enum bpf_xdp_mode
dev_xdp_mode(struct net_device
*dev
, u32 flags
)
9656 if (flags
& XDP_FLAGS_HW_MODE
)
9658 if (flags
& XDP_FLAGS_DRV_MODE
)
9659 return XDP_MODE_DRV
;
9660 if (flags
& XDP_FLAGS_SKB_MODE
)
9661 return XDP_MODE_SKB
;
9662 return dev
->netdev_ops
->ndo_bpf
? XDP_MODE_DRV
: XDP_MODE_SKB
;
9665 static bpf_op_t
dev_xdp_bpf_op(struct net_device
*dev
, enum bpf_xdp_mode mode
)
9669 return generic_xdp_install
;
9672 return dev
->netdev_ops
->ndo_bpf
;
9678 static struct bpf_xdp_link
*dev_xdp_link(struct net_device
*dev
,
9679 enum bpf_xdp_mode mode
)
9681 return dev
->xdp_state
[mode
].link
;
9684 static struct bpf_prog
*dev_xdp_prog(struct net_device
*dev
,
9685 enum bpf_xdp_mode mode
)
9687 struct bpf_xdp_link
*link
= dev_xdp_link(dev
, mode
);
9690 return link
->link
.prog
;
9691 return dev
->xdp_state
[mode
].prog
;
9694 u8
dev_xdp_prog_count(struct net_device
*dev
)
9699 for (i
= 0; i
< __MAX_XDP_MODE
; i
++)
9700 if (dev
->xdp_state
[i
].prog
|| dev
->xdp_state
[i
].link
)
9704 EXPORT_SYMBOL_GPL(dev_xdp_prog_count
);
9706 u8
dev_xdp_sb_prog_count(struct net_device
*dev
)
9711 for (i
= 0; i
< __MAX_XDP_MODE
; i
++)
9712 if (dev
->xdp_state
[i
].prog
&&
9713 !dev
->xdp_state
[i
].prog
->aux
->xdp_has_frags
)
9718 int dev_xdp_propagate(struct net_device
*dev
, struct netdev_bpf
*bpf
)
9720 if (!dev
->netdev_ops
->ndo_bpf
)
9723 if (dev
->cfg
->hds_config
== ETHTOOL_TCP_DATA_SPLIT_ENABLED
&&
9724 bpf
->command
== XDP_SETUP_PROG
&&
9725 bpf
->prog
&& !bpf
->prog
->aux
->xdp_has_frags
) {
9726 NL_SET_ERR_MSG(bpf
->extack
,
9727 "unable to propagate XDP to device using tcp-data-split");
9731 if (dev_get_min_mp_channel_count(dev
)) {
9732 NL_SET_ERR_MSG(bpf
->extack
, "unable to propagate XDP to device using memory provider");
9736 return dev
->netdev_ops
->ndo_bpf(dev
, bpf
);
9738 EXPORT_SYMBOL_GPL(dev_xdp_propagate
);
9740 u32
dev_xdp_prog_id(struct net_device
*dev
, enum bpf_xdp_mode mode
)
9742 struct bpf_prog
*prog
= dev_xdp_prog(dev
, mode
);
9744 return prog
? prog
->aux
->id
: 0;
9747 static void dev_xdp_set_link(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9748 struct bpf_xdp_link
*link
)
9750 dev
->xdp_state
[mode
].link
= link
;
9751 dev
->xdp_state
[mode
].prog
= NULL
;
9754 static void dev_xdp_set_prog(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9755 struct bpf_prog
*prog
)
9757 dev
->xdp_state
[mode
].link
= NULL
;
9758 dev
->xdp_state
[mode
].prog
= prog
;
9761 static int dev_xdp_install(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9762 bpf_op_t bpf_op
, struct netlink_ext_ack
*extack
,
9763 u32 flags
, struct bpf_prog
*prog
)
9765 struct netdev_bpf xdp
;
9768 if (dev
->cfg
->hds_config
== ETHTOOL_TCP_DATA_SPLIT_ENABLED
&&
9769 prog
&& !prog
->aux
->xdp_has_frags
) {
9770 NL_SET_ERR_MSG(extack
, "unable to install XDP to device using tcp-data-split");
9774 if (dev_get_min_mp_channel_count(dev
)) {
9775 NL_SET_ERR_MSG(extack
, "unable to install XDP to device using memory provider");
9779 memset(&xdp
, 0, sizeof(xdp
));
9780 xdp
.command
= mode
== XDP_MODE_HW
? XDP_SETUP_PROG_HW
: XDP_SETUP_PROG
;
9781 xdp
.extack
= extack
;
9785 /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9786 * "moved" into driver), so they don't increment it on their own, but
9787 * they do decrement refcnt when program is detached or replaced.
9788 * Given net_device also owns link/prog, we need to bump refcnt here
9789 * to prevent drivers from underflowing it.
9793 err
= bpf_op(dev
, &xdp
);
9800 if (mode
!= XDP_MODE_HW
)
9801 bpf_prog_change_xdp(dev_xdp_prog(dev
, mode
), prog
);
9806 static void dev_xdp_uninstall(struct net_device
*dev
)
9808 struct bpf_xdp_link
*link
;
9809 struct bpf_prog
*prog
;
9810 enum bpf_xdp_mode mode
;
9815 for (mode
= XDP_MODE_SKB
; mode
< __MAX_XDP_MODE
; mode
++) {
9816 prog
= dev_xdp_prog(dev
, mode
);
9820 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9824 WARN_ON(dev_xdp_install(dev
, mode
, bpf_op
, NULL
, 0, NULL
));
9826 /* auto-detach link from net device */
9827 link
= dev_xdp_link(dev
, mode
);
9833 dev_xdp_set_link(dev
, mode
, NULL
);
9837 static int dev_xdp_attach(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
9838 struct bpf_xdp_link
*link
, struct bpf_prog
*new_prog
,
9839 struct bpf_prog
*old_prog
, u32 flags
)
9841 unsigned int num_modes
= hweight32(flags
& XDP_FLAGS_MODES
);
9842 struct bpf_prog
*cur_prog
;
9843 struct net_device
*upper
;
9844 struct list_head
*iter
;
9845 enum bpf_xdp_mode mode
;
9851 /* either link or prog attachment, never both */
9852 if (link
&& (new_prog
|| old_prog
))
9854 /* link supports only XDP mode flags */
9855 if (link
&& (flags
& ~XDP_FLAGS_MODES
)) {
9856 NL_SET_ERR_MSG(extack
, "Invalid XDP flags for BPF link attachment");
9859 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9860 if (num_modes
> 1) {
9861 NL_SET_ERR_MSG(extack
, "Only one XDP mode flag can be set");
9864 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9865 if (!num_modes
&& dev_xdp_prog_count(dev
) > 1) {
9866 NL_SET_ERR_MSG(extack
,
9867 "More than one program loaded, unset mode is ambiguous");
9870 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9871 if (old_prog
&& !(flags
& XDP_FLAGS_REPLACE
)) {
9872 NL_SET_ERR_MSG(extack
, "XDP_FLAGS_REPLACE is not specified");
9876 mode
= dev_xdp_mode(dev
, flags
);
9877 /* can't replace attached link */
9878 if (dev_xdp_link(dev
, mode
)) {
9879 NL_SET_ERR_MSG(extack
, "Can't replace active BPF XDP link");
9883 /* don't allow if an upper device already has a program */
9884 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
) {
9885 if (dev_xdp_prog_count(upper
) > 0) {
9886 NL_SET_ERR_MSG(extack
, "Cannot attach when an upper device already has a program");
9891 cur_prog
= dev_xdp_prog(dev
, mode
);
9892 /* can't replace attached prog with link */
9893 if (link
&& cur_prog
) {
9894 NL_SET_ERR_MSG(extack
, "Can't replace active XDP program with BPF link");
9897 if ((flags
& XDP_FLAGS_REPLACE
) && cur_prog
!= old_prog
) {
9898 NL_SET_ERR_MSG(extack
, "Active program does not match expected");
9902 /* put effective new program into new_prog */
9904 new_prog
= link
->link
.prog
;
9907 bool offload
= mode
== XDP_MODE_HW
;
9908 enum bpf_xdp_mode other_mode
= mode
== XDP_MODE_SKB
9909 ? XDP_MODE_DRV
: XDP_MODE_SKB
;
9911 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) && cur_prog
) {
9912 NL_SET_ERR_MSG(extack
, "XDP program already attached");
9915 if (!offload
&& dev_xdp_prog(dev
, other_mode
)) {
9916 NL_SET_ERR_MSG(extack
, "Native and generic XDP can't be active at the same time");
9919 if (!offload
&& bpf_prog_is_offloaded(new_prog
->aux
)) {
9920 NL_SET_ERR_MSG(extack
, "Using offloaded program without HW_MODE flag is not supported");
9923 if (bpf_prog_is_dev_bound(new_prog
->aux
) && !bpf_offload_dev_match(new_prog
, dev
)) {
9924 NL_SET_ERR_MSG(extack
, "Program bound to different device");
9927 if (new_prog
->expected_attach_type
== BPF_XDP_DEVMAP
) {
9928 NL_SET_ERR_MSG(extack
, "BPF_XDP_DEVMAP programs can not be attached to a device");
9931 if (new_prog
->expected_attach_type
== BPF_XDP_CPUMAP
) {
9932 NL_SET_ERR_MSG(extack
, "BPF_XDP_CPUMAP programs can not be attached to a device");
9937 /* don't call drivers if the effective program didn't change */
9938 if (new_prog
!= cur_prog
) {
9939 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9941 NL_SET_ERR_MSG(extack
, "Underlying driver does not support XDP in native mode");
9945 err
= dev_xdp_install(dev
, mode
, bpf_op
, extack
, flags
, new_prog
);
9951 dev_xdp_set_link(dev
, mode
, link
);
9953 dev_xdp_set_prog(dev
, mode
, new_prog
);
9955 bpf_prog_put(cur_prog
);
9960 static int dev_xdp_attach_link(struct net_device
*dev
,
9961 struct netlink_ext_ack
*extack
,
9962 struct bpf_xdp_link
*link
)
9964 return dev_xdp_attach(dev
, extack
, link
, NULL
, NULL
, link
->flags
);
9967 static int dev_xdp_detach_link(struct net_device
*dev
,
9968 struct netlink_ext_ack
*extack
,
9969 struct bpf_xdp_link
*link
)
9971 enum bpf_xdp_mode mode
;
9976 mode
= dev_xdp_mode(dev
, link
->flags
);
9977 if (dev_xdp_link(dev
, mode
) != link
)
9980 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9981 WARN_ON(dev_xdp_install(dev
, mode
, bpf_op
, NULL
, 0, NULL
));
9982 dev_xdp_set_link(dev
, mode
, NULL
);
9986 static void bpf_xdp_link_release(struct bpf_link
*link
)
9988 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9992 /* if racing with net_device's tear down, xdp_link->dev might be
9993 * already NULL, in which case link was already auto-detached
9995 if (xdp_link
->dev
) {
9996 WARN_ON(dev_xdp_detach_link(xdp_link
->dev
, NULL
, xdp_link
));
9997 xdp_link
->dev
= NULL
;
10003 static int bpf_xdp_link_detach(struct bpf_link
*link
)
10005 bpf_xdp_link_release(link
);
10009 static void bpf_xdp_link_dealloc(struct bpf_link
*link
)
10011 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
10016 static void bpf_xdp_link_show_fdinfo(const struct bpf_link
*link
,
10017 struct seq_file
*seq
)
10019 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
10024 ifindex
= xdp_link
->dev
->ifindex
;
10027 seq_printf(seq
, "ifindex:\t%u\n", ifindex
);
10030 static int bpf_xdp_link_fill_link_info(const struct bpf_link
*link
,
10031 struct bpf_link_info
*info
)
10033 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
10038 ifindex
= xdp_link
->dev
->ifindex
;
10041 info
->xdp
.ifindex
= ifindex
;
10045 static int bpf_xdp_link_update(struct bpf_link
*link
, struct bpf_prog
*new_prog
,
10046 struct bpf_prog
*old_prog
)
10048 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
10049 enum bpf_xdp_mode mode
;
10055 /* link might have been auto-released already, so fail */
10056 if (!xdp_link
->dev
) {
10061 if (old_prog
&& link
->prog
!= old_prog
) {
10065 old_prog
= link
->prog
;
10066 if (old_prog
->type
!= new_prog
->type
||
10067 old_prog
->expected_attach_type
!= new_prog
->expected_attach_type
) {
10072 if (old_prog
== new_prog
) {
10073 /* no-op, don't disturb drivers */
10074 bpf_prog_put(new_prog
);
10078 mode
= dev_xdp_mode(xdp_link
->dev
, xdp_link
->flags
);
10079 bpf_op
= dev_xdp_bpf_op(xdp_link
->dev
, mode
);
10080 err
= dev_xdp_install(xdp_link
->dev
, mode
, bpf_op
, NULL
,
10081 xdp_link
->flags
, new_prog
);
10085 old_prog
= xchg(&link
->prog
, new_prog
);
10086 bpf_prog_put(old_prog
);
10093 static const struct bpf_link_ops bpf_xdp_link_lops
= {
10094 .release
= bpf_xdp_link_release
,
10095 .dealloc
= bpf_xdp_link_dealloc
,
10096 .detach
= bpf_xdp_link_detach
,
10097 .show_fdinfo
= bpf_xdp_link_show_fdinfo
,
10098 .fill_link_info
= bpf_xdp_link_fill_link_info
,
10099 .update_prog
= bpf_xdp_link_update
,
10102 int bpf_xdp_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
10104 struct net
*net
= current
->nsproxy
->net_ns
;
10105 struct bpf_link_primer link_primer
;
10106 struct netlink_ext_ack extack
= {};
10107 struct bpf_xdp_link
*link
;
10108 struct net_device
*dev
;
10112 dev
= dev_get_by_index(net
, attr
->link_create
.target_ifindex
);
10118 link
= kzalloc(sizeof(*link
), GFP_USER
);
10124 bpf_link_init(&link
->link
, BPF_LINK_TYPE_XDP
, &bpf_xdp_link_lops
, prog
);
10126 link
->flags
= attr
->link_create
.flags
;
10128 err
= bpf_link_prime(&link
->link
, &link_primer
);
10134 err
= dev_xdp_attach_link(dev
, &extack
, link
);
10139 bpf_link_cleanup(&link_primer
);
10140 trace_bpf_xdp_link_attach_failed(extack
._msg
);
10144 fd
= bpf_link_settle(&link_primer
);
10145 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
10158 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
10160 * @extack: netlink extended ack
10161 * @fd: new program fd or negative value to clear
10162 * @expected_fd: old program fd that userspace expects to replace or clear
10163 * @flags: xdp-related flags
10165 * Set or clear a bpf program for a device
10167 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
10168 int fd
, int expected_fd
, u32 flags
)
10170 enum bpf_xdp_mode mode
= dev_xdp_mode(dev
, flags
);
10171 struct bpf_prog
*new_prog
= NULL
, *old_prog
= NULL
;
10177 new_prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
10178 mode
!= XDP_MODE_SKB
);
10179 if (IS_ERR(new_prog
))
10180 return PTR_ERR(new_prog
);
10183 if (expected_fd
>= 0) {
10184 old_prog
= bpf_prog_get_type_dev(expected_fd
, BPF_PROG_TYPE_XDP
,
10185 mode
!= XDP_MODE_SKB
);
10186 if (IS_ERR(old_prog
)) {
10187 err
= PTR_ERR(old_prog
);
10193 err
= dev_xdp_attach(dev
, extack
, NULL
, new_prog
, old_prog
, flags
);
10196 if (err
&& new_prog
)
10197 bpf_prog_put(new_prog
);
10199 bpf_prog_put(old_prog
);
10203 u32
dev_get_min_mp_channel_count(const struct net_device
*dev
)
10209 for (i
= dev
->real_num_rx_queues
- 1; i
>= 0; i
--)
10210 if (dev
->_rx
[i
].mp_params
.mp_priv
)
10211 /* The channel count is the idx plus 1. */
10218 * dev_index_reserve() - allocate an ifindex in a namespace
10219 * @net: the applicable net namespace
10220 * @ifindex: requested ifindex, pass %0 to get one allocated
10222 * Allocate a ifindex for a new device. Caller must either use the ifindex
10223 * to store the device (via list_netdevice()) or call dev_index_release()
10224 * to give the index up.
10226 * Return: a suitable unique value for a new device interface number or -errno.
10228 static int dev_index_reserve(struct net
*net
, u32 ifindex
)
10232 if (ifindex
> INT_MAX
) {
10233 DEBUG_NET_WARN_ON_ONCE(1);
10238 err
= xa_alloc_cyclic(&net
->dev_by_index
, &ifindex
, NULL
,
10239 xa_limit_31b
, &net
->ifindex
, GFP_KERNEL
);
10241 err
= xa_insert(&net
->dev_by_index
, ifindex
, NULL
, GFP_KERNEL
);
10248 static void dev_index_release(struct net
*net
, int ifindex
)
10250 /* Expect only unused indexes, unlist_netdevice() removes the used */
10251 WARN_ON(xa_erase(&net
->dev_by_index
, ifindex
));
10254 static bool from_cleanup_net(void)
10256 #ifdef CONFIG_NET_NS
10257 return current
== cleanup_net_task
;
10263 static void rtnl_drop_if_cleanup_net(void)
10265 if (from_cleanup_net())
10269 static void rtnl_acquire_if_cleanup_net(void)
10271 if (from_cleanup_net())
10275 /* Delayed registration/unregisteration */
10276 LIST_HEAD(net_todo_list
);
10277 static LIST_HEAD(net_todo_list_for_cleanup_net
);
10279 /* TODO: net_todo_list/net_todo_list_for_cleanup_net should probably
10280 * be provided by callers, instead of being static, rtnl protected.
10282 static struct list_head
*todo_list(void)
10284 return from_cleanup_net() ? &net_todo_list_for_cleanup_net
:
10288 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
10289 atomic_t dev_unreg_count
= ATOMIC_INIT(0);
10291 static void net_set_todo(struct net_device
*dev
)
10293 list_add_tail(&dev
->todo_list
, todo_list());
10296 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
10297 struct net_device
*upper
, netdev_features_t features
)
10299 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
10300 netdev_features_t feature
;
10303 for_each_netdev_feature(upper_disables
, feature_bit
) {
10304 feature
= __NETIF_F_BIT(feature_bit
);
10305 if (!(upper
->wanted_features
& feature
)
10306 && (features
& feature
)) {
10307 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
10308 &feature
, upper
->name
);
10309 features
&= ~feature
;
10316 static void netdev_sync_lower_features(struct net_device
*upper
,
10317 struct net_device
*lower
, netdev_features_t features
)
10319 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
10320 netdev_features_t feature
;
10323 for_each_netdev_feature(upper_disables
, feature_bit
) {
10324 feature
= __NETIF_F_BIT(feature_bit
);
10325 if (!(features
& feature
) && (lower
->features
& feature
)) {
10326 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
10327 &feature
, lower
->name
);
10328 lower
->wanted_features
&= ~feature
;
10329 __netdev_update_features(lower
);
10331 if (unlikely(lower
->features
& feature
))
10332 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
10333 &feature
, lower
->name
);
10335 netdev_features_change(lower
);
10340 static bool netdev_has_ip_or_hw_csum(netdev_features_t features
)
10342 netdev_features_t ip_csum_mask
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
10343 bool ip_csum
= (features
& ip_csum_mask
) == ip_csum_mask
;
10344 bool hw_csum
= features
& NETIF_F_HW_CSUM
;
10346 return ip_csum
|| hw_csum
;
10349 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
10350 netdev_features_t features
)
10352 /* Fix illegal checksum combinations */
10353 if ((features
& NETIF_F_HW_CSUM
) &&
10354 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
10355 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
10356 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
10359 /* TSO requires that SG is present as well. */
10360 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
10361 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
10362 features
&= ~NETIF_F_ALL_TSO
;
10365 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
10366 !(features
& NETIF_F_IP_CSUM
)) {
10367 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
10368 features
&= ~NETIF_F_TSO
;
10369 features
&= ~NETIF_F_TSO_ECN
;
10372 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
10373 !(features
& NETIF_F_IPV6_CSUM
)) {
10374 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
10375 features
&= ~NETIF_F_TSO6
;
10378 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
10379 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
10380 features
&= ~NETIF_F_TSO_MANGLEID
;
10382 /* TSO ECN requires that TSO is present as well. */
10383 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
10384 features
&= ~NETIF_F_TSO_ECN
;
10386 /* Software GSO depends on SG. */
10387 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
10388 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
10389 features
&= ~NETIF_F_GSO
;
10392 /* GSO partial features require GSO partial be set */
10393 if ((features
& dev
->gso_partial_features
) &&
10394 !(features
& NETIF_F_GSO_PARTIAL
)) {
10396 "Dropping partially supported GSO features since no GSO partial.\n");
10397 features
&= ~dev
->gso_partial_features
;
10400 if (!(features
& NETIF_F_RXCSUM
)) {
10401 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
10402 * successfully merged by hardware must also have the
10403 * checksum verified by hardware. If the user does not
10404 * want to enable RXCSUM, logically, we should disable GRO_HW.
10406 if (features
& NETIF_F_GRO_HW
) {
10407 netdev_dbg(dev
, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
10408 features
&= ~NETIF_F_GRO_HW
;
10412 /* LRO/HW-GRO features cannot be combined with RX-FCS */
10413 if (features
& NETIF_F_RXFCS
) {
10414 if (features
& NETIF_F_LRO
) {
10415 netdev_dbg(dev
, "Dropping LRO feature since RX-FCS is requested.\n");
10416 features
&= ~NETIF_F_LRO
;
10419 if (features
& NETIF_F_GRO_HW
) {
10420 netdev_dbg(dev
, "Dropping HW-GRO feature since RX-FCS is requested.\n");
10421 features
&= ~NETIF_F_GRO_HW
;
10425 if ((features
& NETIF_F_GRO_HW
) && (features
& NETIF_F_LRO
)) {
10426 netdev_dbg(dev
, "Dropping LRO feature since HW-GRO is requested.\n");
10427 features
&= ~NETIF_F_LRO
;
10430 if ((features
& NETIF_F_HW_TLS_TX
) && !netdev_has_ip_or_hw_csum(features
)) {
10431 netdev_dbg(dev
, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
10432 features
&= ~NETIF_F_HW_TLS_TX
;
10435 if ((features
& NETIF_F_HW_TLS_RX
) && !(features
& NETIF_F_RXCSUM
)) {
10436 netdev_dbg(dev
, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
10437 features
&= ~NETIF_F_HW_TLS_RX
;
10440 if ((features
& NETIF_F_GSO_UDP_L4
) && !netdev_has_ip_or_hw_csum(features
)) {
10441 netdev_dbg(dev
, "Dropping USO feature since no CSUM feature.\n");
10442 features
&= ~NETIF_F_GSO_UDP_L4
;
10448 int __netdev_update_features(struct net_device
*dev
)
10450 struct net_device
*upper
, *lower
;
10451 netdev_features_t features
;
10452 struct list_head
*iter
;
10457 features
= netdev_get_wanted_features(dev
);
10459 if (dev
->netdev_ops
->ndo_fix_features
)
10460 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
10462 /* driver might be less strict about feature dependencies */
10463 features
= netdev_fix_features(dev
, features
);
10465 /* some features can't be enabled if they're off on an upper device */
10466 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
10467 features
= netdev_sync_upper_features(dev
, upper
, features
);
10469 if (dev
->features
== features
)
10472 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
10473 &dev
->features
, &features
);
10475 if (dev
->netdev_ops
->ndo_set_features
)
10476 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
10480 if (unlikely(err
< 0)) {
10482 "set_features() failed (%d); wanted %pNF, left %pNF\n",
10483 err
, &features
, &dev
->features
);
10484 /* return non-0 since some features might have changed and
10485 * it's better to fire a spurious notification than miss it
10491 /* some features must be disabled on lower devices when disabled
10492 * on an upper device (think: bonding master or bridge)
10494 netdev_for_each_lower_dev(dev
, lower
, iter
)
10495 netdev_sync_lower_features(dev
, lower
, features
);
10498 netdev_features_t diff
= features
^ dev
->features
;
10500 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
10501 /* udp_tunnel_{get,drop}_rx_info both need
10502 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
10503 * device, or they won't do anything.
10504 * Thus we need to update dev->features
10505 * *before* calling udp_tunnel_get_rx_info,
10506 * but *after* calling udp_tunnel_drop_rx_info.
10508 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
10509 dev
->features
= features
;
10510 udp_tunnel_get_rx_info(dev
);
10512 udp_tunnel_drop_rx_info(dev
);
10516 if (diff
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
10517 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
10518 dev
->features
= features
;
10519 err
|= vlan_get_rx_ctag_filter_info(dev
);
10521 vlan_drop_rx_ctag_filter_info(dev
);
10525 if (diff
& NETIF_F_HW_VLAN_STAG_FILTER
) {
10526 if (features
& NETIF_F_HW_VLAN_STAG_FILTER
) {
10527 dev
->features
= features
;
10528 err
|= vlan_get_rx_stag_filter_info(dev
);
10530 vlan_drop_rx_stag_filter_info(dev
);
10534 dev
->features
= features
;
10537 return err
< 0 ? 0 : 1;
10541 * netdev_update_features - recalculate device features
10542 * @dev: the device to check
10544 * Recalculate dev->features set and send notifications if it
10545 * has changed. Should be called after driver or hardware dependent
10546 * conditions might have changed that influence the features.
10548 void netdev_update_features(struct net_device
*dev
)
10550 if (__netdev_update_features(dev
))
10551 netdev_features_change(dev
);
10553 EXPORT_SYMBOL(netdev_update_features
);
10556 * netdev_change_features - recalculate device features
10557 * @dev: the device to check
10559 * Recalculate dev->features set and send notifications even
10560 * if they have not changed. Should be called instead of
10561 * netdev_update_features() if also dev->vlan_features might
10562 * have changed to allow the changes to be propagated to stacked
10565 void netdev_change_features(struct net_device
*dev
)
10567 __netdev_update_features(dev
);
10568 netdev_features_change(dev
);
10570 EXPORT_SYMBOL(netdev_change_features
);
10573 * netif_stacked_transfer_operstate - transfer operstate
10574 * @rootdev: the root or lower level device to transfer state from
10575 * @dev: the device to transfer operstate to
10577 * Transfer operational state from root to device. This is normally
10578 * called when a stacking relationship exists between the root
10579 * device and the device(a leaf device).
10581 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
10582 struct net_device
*dev
)
10584 if (rootdev
->operstate
== IF_OPER_DORMANT
)
10585 netif_dormant_on(dev
);
10587 netif_dormant_off(dev
);
10589 if (rootdev
->operstate
== IF_OPER_TESTING
)
10590 netif_testing_on(dev
);
10592 netif_testing_off(dev
);
10594 if (netif_carrier_ok(rootdev
))
10595 netif_carrier_on(dev
);
10597 netif_carrier_off(dev
);
10599 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
10601 static int netif_alloc_rx_queues(struct net_device
*dev
)
10603 unsigned int i
, count
= dev
->num_rx_queues
;
10604 struct netdev_rx_queue
*rx
;
10605 size_t sz
= count
* sizeof(*rx
);
10610 rx
= kvzalloc(sz
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
10616 for (i
= 0; i
< count
; i
++) {
10619 /* XDP RX-queue setup */
10620 err
= xdp_rxq_info_reg(&rx
[i
].xdp_rxq
, dev
, i
, 0);
10627 /* Rollback successful reg's and free other resources */
10629 xdp_rxq_info_unreg(&rx
[i
].xdp_rxq
);
10635 static void netif_free_rx_queues(struct net_device
*dev
)
10637 unsigned int i
, count
= dev
->num_rx_queues
;
10639 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10643 for (i
= 0; i
< count
; i
++)
10644 xdp_rxq_info_unreg(&dev
->_rx
[i
].xdp_rxq
);
10649 static void netdev_init_one_queue(struct net_device
*dev
,
10650 struct netdev_queue
*queue
, void *_unused
)
10652 /* Initialize queue lock */
10653 spin_lock_init(&queue
->_xmit_lock
);
10654 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
10655 queue
->xmit_lock_owner
= -1;
10656 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
10659 dql_init(&queue
->dql
, HZ
);
10663 static void netif_free_tx_queues(struct net_device
*dev
)
10668 static int netif_alloc_netdev_queues(struct net_device
*dev
)
10670 unsigned int count
= dev
->num_tx_queues
;
10671 struct netdev_queue
*tx
;
10672 size_t sz
= count
* sizeof(*tx
);
10674 if (count
< 1 || count
> 0xffff)
10677 tx
= kvzalloc(sz
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
10683 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
10684 spin_lock_init(&dev
->tx_global_lock
);
10689 void netif_tx_stop_all_queues(struct net_device
*dev
)
10693 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
10694 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
10696 netif_tx_stop_queue(txq
);
10699 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
10701 static int netdev_do_alloc_pcpu_stats(struct net_device
*dev
)
10705 /* Drivers implementing ndo_get_peer_dev must support tstat
10706 * accounting, so that skb_do_redirect() can bump the dev's
10707 * RX stats upon network namespace switch.
10709 if (dev
->netdev_ops
->ndo_get_peer_dev
&&
10710 dev
->pcpu_stat_type
!= NETDEV_PCPU_STAT_TSTATS
)
10711 return -EOPNOTSUPP
;
10713 switch (dev
->pcpu_stat_type
) {
10714 case NETDEV_PCPU_STAT_NONE
:
10716 case NETDEV_PCPU_STAT_LSTATS
:
10717 v
= dev
->lstats
= netdev_alloc_pcpu_stats(struct pcpu_lstats
);
10719 case NETDEV_PCPU_STAT_TSTATS
:
10720 v
= dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
10722 case NETDEV_PCPU_STAT_DSTATS
:
10723 v
= dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
10729 return v
? 0 : -ENOMEM
;
10732 static void netdev_do_free_pcpu_stats(struct net_device
*dev
)
10734 switch (dev
->pcpu_stat_type
) {
10735 case NETDEV_PCPU_STAT_NONE
:
10737 case NETDEV_PCPU_STAT_LSTATS
:
10738 free_percpu(dev
->lstats
);
10740 case NETDEV_PCPU_STAT_TSTATS
:
10741 free_percpu(dev
->tstats
);
10743 case NETDEV_PCPU_STAT_DSTATS
:
10744 free_percpu(dev
->dstats
);
10749 static void netdev_free_phy_link_topology(struct net_device
*dev
)
10751 struct phy_link_topology
*topo
= dev
->link_topo
;
10753 if (IS_ENABLED(CONFIG_PHYLIB
) && topo
) {
10754 xa_destroy(&topo
->phys
);
10756 dev
->link_topo
= NULL
;
10761 * register_netdevice() - register a network device
10762 * @dev: device to register
10764 * Take a prepared network device structure and make it externally accessible.
10765 * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
10766 * Callers must hold the rtnl lock - you may want register_netdev()
10769 int register_netdevice(struct net_device
*dev
)
10772 struct net
*net
= dev_net(dev
);
10774 BUILD_BUG_ON(sizeof(netdev_features_t
) * BITS_PER_BYTE
<
10775 NETDEV_FEATURE_COUNT
);
10776 BUG_ON(dev_boot_phase
);
10781 /* When net_device's are persistent, this will be fatal. */
10782 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
10785 ret
= ethtool_check_ops(dev
->ethtool_ops
);
10789 /* rss ctx ID 0 is reserved for the default context, start from 1 */
10790 xa_init_flags(&dev
->ethtool
->rss_ctx
, XA_FLAGS_ALLOC1
);
10791 mutex_init(&dev
->ethtool
->rss_lock
);
10793 spin_lock_init(&dev
->addr_list_lock
);
10794 netdev_set_addr_lockdep_class(dev
);
10796 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
10801 dev
->name_node
= netdev_name_node_head_alloc(dev
);
10802 if (!dev
->name_node
)
10805 /* Init, if this function is available */
10806 if (dev
->netdev_ops
->ndo_init
) {
10807 ret
= dev
->netdev_ops
->ndo_init(dev
);
10811 goto err_free_name
;
10815 if (((dev
->hw_features
| dev
->features
) &
10816 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
10817 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
10818 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
10819 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
10824 ret
= netdev_do_alloc_pcpu_stats(dev
);
10828 ret
= dev_index_reserve(net
, dev
->ifindex
);
10830 goto err_free_pcpu
;
10831 dev
->ifindex
= ret
;
10833 /* Transfer changeable features to wanted_features and enable
10834 * software offloads (GSO and GRO).
10836 dev
->hw_features
|= (NETIF_F_SOFT_FEATURES
| NETIF_F_SOFT_FEATURES_OFF
);
10837 dev
->features
|= NETIF_F_SOFT_FEATURES
;
10839 if (dev
->udp_tunnel_nic_info
) {
10840 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
10841 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
10844 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
10846 if (!(dev
->flags
& IFF_LOOPBACK
))
10847 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
10849 /* If IPv4 TCP segmentation offload is supported we should also
10850 * allow the device to enable segmenting the frame with the option
10851 * of ignoring a static IP ID value. This doesn't enable the
10852 * feature itself but allows the user to enable it later.
10854 if (dev
->hw_features
& NETIF_F_TSO
)
10855 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
10856 if (dev
->vlan_features
& NETIF_F_TSO
)
10857 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
10858 if (dev
->mpls_features
& NETIF_F_TSO
)
10859 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
10860 if (dev
->hw_enc_features
& NETIF_F_TSO
)
10861 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
10863 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10865 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
10867 /* Make NETIF_F_SG inheritable to tunnel devices.
10869 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
10871 /* Make NETIF_F_SG inheritable to MPLS.
10873 dev
->mpls_features
|= NETIF_F_SG
;
10875 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
10876 ret
= notifier_to_errno(ret
);
10878 goto err_ifindex_release
;
10880 ret
= netdev_register_kobject(dev
);
10883 WRITE_ONCE(dev
->reg_state
, ret
? NETREG_UNREGISTERED
: NETREG_REGISTERED
);
10884 netdev_unlock(dev
);
10887 goto err_uninit_notify
;
10889 __netdev_update_features(dev
);
10892 * Default initial state at registry is that the
10893 * device is present.
10896 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
10898 linkwatch_init_dev(dev
);
10900 dev_init_scheduler(dev
);
10902 netdev_hold(dev
, &dev
->dev_registered_tracker
, GFP_KERNEL
);
10903 list_netdevice(dev
);
10905 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
10907 /* If the device has permanent device address, driver should
10908 * set dev_addr and also addr_assign_type should be set to
10909 * NET_ADDR_PERM (default value).
10911 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
10912 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
10914 /* Notify protocols, that a new device appeared. */
10915 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
10916 ret
= notifier_to_errno(ret
);
10918 /* Expect explicit free_netdev() on failure */
10919 dev
->needs_free_netdev
= false;
10920 unregister_netdevice_queue(dev
, NULL
);
10924 * Prevent userspace races by waiting until the network
10925 * device is fully setup before sending notifications.
10927 if (!dev
->rtnl_link_ops
||
10928 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
10929 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
, 0, NULL
);
10935 call_netdevice_notifiers(NETDEV_PRE_UNINIT
, dev
);
10936 err_ifindex_release
:
10937 dev_index_release(net
, dev
->ifindex
);
10939 netdev_do_free_pcpu_stats(dev
);
10941 if (dev
->netdev_ops
->ndo_uninit
)
10942 dev
->netdev_ops
->ndo_uninit(dev
);
10943 if (dev
->priv_destructor
)
10944 dev
->priv_destructor(dev
);
10946 netdev_name_node_free(dev
->name_node
);
10949 EXPORT_SYMBOL(register_netdevice
);
10951 /* Initialize the core of a dummy net device.
10952 * The setup steps dummy netdevs need which normal netdevs get by going
10953 * through register_netdevice().
10955 static void init_dummy_netdev(struct net_device
*dev
)
10957 /* make sure we BUG if trying to hit standard
10958 * register/unregister code path
10960 dev
->reg_state
= NETREG_DUMMY
;
10962 /* a dummy interface is started by default */
10963 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
10964 set_bit(__LINK_STATE_START
, &dev
->state
);
10966 /* Note : We dont allocate pcpu_refcnt for dummy devices,
10967 * because users of this 'device' dont need to change
10973 * register_netdev - register a network device
10974 * @dev: device to register
10976 * Take a completed network device structure and add it to the kernel
10977 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10978 * chain. 0 is returned on success. A negative errno code is returned
10979 * on a failure to set up the device, or if the name is a duplicate.
10981 * This is a wrapper around register_netdevice that takes the rtnl semaphore
10982 * and expands the device name if you passed a format string to
10985 int register_netdev(struct net_device
*dev
)
10987 struct net
*net
= dev_net(dev
);
10990 if (rtnl_net_lock_killable(net
))
10993 err
= register_netdevice(dev
);
10995 rtnl_net_unlock(net
);
10999 EXPORT_SYMBOL(register_netdev
);
11001 int netdev_refcnt_read(const struct net_device
*dev
)
11003 #ifdef CONFIG_PCPU_DEV_REFCNT
11006 for_each_possible_cpu(i
)
11007 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
11010 return refcount_read(&dev
->dev_refcnt
);
11013 EXPORT_SYMBOL(netdev_refcnt_read
);
11015 int netdev_unregister_timeout_secs __read_mostly
= 10;
11017 #define WAIT_REFS_MIN_MSECS 1
11018 #define WAIT_REFS_MAX_MSECS 250
11020 * netdev_wait_allrefs_any - wait until all references are gone.
11021 * @list: list of net_devices to wait on
11023 * This is called when unregistering network devices.
11025 * Any protocol or device that holds a reference should register
11026 * for netdevice notification, and cleanup and put back the
11027 * reference if they receive an UNREGISTER event.
11028 * We can get stuck here if buggy protocols don't correctly
11031 static struct net_device
*netdev_wait_allrefs_any(struct list_head
*list
)
11033 unsigned long rebroadcast_time
, warning_time
;
11034 struct net_device
*dev
;
11037 rebroadcast_time
= warning_time
= jiffies
;
11039 list_for_each_entry(dev
, list
, todo_list
)
11040 if (netdev_refcnt_read(dev
) == 1)
11044 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
11047 /* Rebroadcast unregister notification */
11048 list_for_each_entry(dev
, list
, todo_list
)
11049 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
11055 list_for_each_entry(dev
, list
, todo_list
)
11056 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
11058 /* We must not have linkwatch events
11059 * pending on unregister. If this
11060 * happens, we simply run the queue
11061 * unscheduled, resulting in a noop
11064 linkwatch_run_queue();
11070 rebroadcast_time
= jiffies
;
11076 wait
= WAIT_REFS_MIN_MSECS
;
11079 wait
= min(wait
<< 1, WAIT_REFS_MAX_MSECS
);
11082 list_for_each_entry(dev
, list
, todo_list
)
11083 if (netdev_refcnt_read(dev
) == 1)
11086 if (time_after(jiffies
, warning_time
+
11087 READ_ONCE(netdev_unregister_timeout_secs
) * HZ
)) {
11088 list_for_each_entry(dev
, list
, todo_list
) {
11089 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
11090 dev
->name
, netdev_refcnt_read(dev
));
11091 ref_tracker_dir_print(&dev
->refcnt_tracker
, 10);
11094 warning_time
= jiffies
;
11099 /* The sequence is:
11103 * register_netdevice(x1);
11104 * register_netdevice(x2);
11106 * unregister_netdevice(y1);
11107 * unregister_netdevice(y2);
11113 * We are invoked by rtnl_unlock().
11114 * This allows us to deal with problems:
11115 * 1) We can delete sysfs objects which invoke hotplug
11116 * without deadlocking with linkwatch via keventd.
11117 * 2) Since we run with the RTNL semaphore not held, we can sleep
11118 * safely in order to wait for the netdev refcnt to drop to zero.
11120 * We must not return until all unregister events added during
11121 * the interval the lock was held have been completed.
11123 void netdev_run_todo(void)
11125 struct net_device
*dev
, *tmp
;
11126 struct list_head list
;
11128 #ifdef CONFIG_LOCKDEP
11129 struct list_head unlink_list
;
11131 list_replace_init(&net_unlink_list
, &unlink_list
);
11133 while (!list_empty(&unlink_list
)) {
11134 struct net_device
*dev
= list_first_entry(&unlink_list
,
11137 list_del_init(&dev
->unlink_list
);
11138 dev
->nested_level
= dev
->lower_level
- 1;
11142 /* Snapshot list, allow later requests */
11143 list_replace_init(todo_list(), &list
);
11147 /* Wait for rcu callbacks to finish before next phase */
11148 if (!list_empty(&list
))
11151 list_for_each_entry_safe(dev
, tmp
, &list
, todo_list
) {
11152 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
11153 netdev_WARN(dev
, "run_todo but not unregistering\n");
11154 list_del(&dev
->todo_list
);
11159 WRITE_ONCE(dev
->reg_state
, NETREG_UNREGISTERED
);
11160 netdev_unlock(dev
);
11161 linkwatch_sync_dev(dev
);
11165 while (!list_empty(&list
)) {
11166 dev
= netdev_wait_allrefs_any(&list
);
11167 list_del(&dev
->todo_list
);
11170 BUG_ON(netdev_refcnt_read(dev
) != 1);
11171 BUG_ON(!list_empty(&dev
->ptype_all
));
11172 BUG_ON(!list_empty(&dev
->ptype_specific
));
11173 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
11174 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
11176 netdev_do_free_pcpu_stats(dev
);
11177 if (dev
->priv_destructor
)
11178 dev
->priv_destructor(dev
);
11179 if (dev
->needs_free_netdev
)
11184 /* Free network device */
11185 kobject_put(&dev
->dev
.kobj
);
11187 if (cnt
&& atomic_sub_and_test(cnt
, &dev_unreg_count
))
11188 wake_up(&netdev_unregistering_wq
);
11191 /* Collate per-cpu network dstats statistics
11193 * Read per-cpu network statistics from dev->dstats and populate the related
11196 static void dev_fetch_dstats(struct rtnl_link_stats64
*s
,
11197 const struct pcpu_dstats __percpu
*dstats
)
11201 for_each_possible_cpu(cpu
) {
11202 u64 rx_packets
, rx_bytes
, rx_drops
;
11203 u64 tx_packets
, tx_bytes
, tx_drops
;
11204 const struct pcpu_dstats
*stats
;
11205 unsigned int start
;
11207 stats
= per_cpu_ptr(dstats
, cpu
);
11209 start
= u64_stats_fetch_begin(&stats
->syncp
);
11210 rx_packets
= u64_stats_read(&stats
->rx_packets
);
11211 rx_bytes
= u64_stats_read(&stats
->rx_bytes
);
11212 rx_drops
= u64_stats_read(&stats
->rx_drops
);
11213 tx_packets
= u64_stats_read(&stats
->tx_packets
);
11214 tx_bytes
= u64_stats_read(&stats
->tx_bytes
);
11215 tx_drops
= u64_stats_read(&stats
->tx_drops
);
11216 } while (u64_stats_fetch_retry(&stats
->syncp
, start
));
11218 s
->rx_packets
+= rx_packets
;
11219 s
->rx_bytes
+= rx_bytes
;
11220 s
->rx_dropped
+= rx_drops
;
11221 s
->tx_packets
+= tx_packets
;
11222 s
->tx_bytes
+= tx_bytes
;
11223 s
->tx_dropped
+= tx_drops
;
11227 /* ndo_get_stats64 implementation for dtstats-based accounting.
11229 * Populate @s from dev->stats and dev->dstats. This is used internally by the
11230 * core for NETDEV_PCPU_STAT_DSTAT-type stats collection.
11232 static void dev_get_dstats64(const struct net_device
*dev
,
11233 struct rtnl_link_stats64
*s
)
11235 netdev_stats_to_stats64(s
, &dev
->stats
);
11236 dev_fetch_dstats(s
, dev
->dstats
);
11239 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
11240 * all the same fields in the same order as net_device_stats, with only
11241 * the type differing, but rtnl_link_stats64 may have additional fields
11242 * at the end for newer counters.
11244 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
11245 const struct net_device_stats
*netdev_stats
)
11247 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(atomic_long_t
);
11248 const atomic_long_t
*src
= (atomic_long_t
*)netdev_stats
;
11249 u64
*dst
= (u64
*)stats64
;
11251 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
11252 for (i
= 0; i
< n
; i
++)
11253 dst
[i
] = (unsigned long)atomic_long_read(&src
[i
]);
11254 /* zero out counters that only exist in rtnl_link_stats64 */
11255 memset((char *)stats64
+ n
* sizeof(u64
), 0,
11256 sizeof(*stats64
) - n
* sizeof(u64
));
11258 EXPORT_SYMBOL(netdev_stats_to_stats64
);
11260 static __cold
struct net_device_core_stats __percpu
*netdev_core_stats_alloc(
11261 struct net_device
*dev
)
11263 struct net_device_core_stats __percpu
*p
;
11265 p
= alloc_percpu_gfp(struct net_device_core_stats
,
11266 GFP_ATOMIC
| __GFP_NOWARN
);
11268 if (p
&& cmpxchg(&dev
->core_stats
, NULL
, p
))
11271 /* This READ_ONCE() pairs with the cmpxchg() above */
11272 return READ_ONCE(dev
->core_stats
);
11275 noinline
void netdev_core_stats_inc(struct net_device
*dev
, u32 offset
)
11277 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
11278 struct net_device_core_stats __percpu
*p
= READ_ONCE(dev
->core_stats
);
11279 unsigned long __percpu
*field
;
11281 if (unlikely(!p
)) {
11282 p
= netdev_core_stats_alloc(dev
);
11287 field
= (unsigned long __percpu
*)((void __percpu
*)p
+ offset
);
11288 this_cpu_inc(*field
);
11290 EXPORT_SYMBOL_GPL(netdev_core_stats_inc
);
11293 * dev_get_stats - get network device statistics
11294 * @dev: device to get statistics from
11295 * @storage: place to store stats
11297 * Get network statistics from device. Return @storage.
11298 * The device driver may provide its own method by setting
11299 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
11300 * otherwise the internal statistics structure is used.
11302 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
11303 struct rtnl_link_stats64
*storage
)
11305 const struct net_device_ops
*ops
= dev
->netdev_ops
;
11306 const struct net_device_core_stats __percpu
*p
;
11308 if (ops
->ndo_get_stats64
) {
11309 memset(storage
, 0, sizeof(*storage
));
11310 ops
->ndo_get_stats64(dev
, storage
);
11311 } else if (ops
->ndo_get_stats
) {
11312 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
11313 } else if (dev
->pcpu_stat_type
== NETDEV_PCPU_STAT_TSTATS
) {
11314 dev_get_tstats64(dev
, storage
);
11315 } else if (dev
->pcpu_stat_type
== NETDEV_PCPU_STAT_DSTATS
) {
11316 dev_get_dstats64(dev
, storage
);
11318 netdev_stats_to_stats64(storage
, &dev
->stats
);
11321 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
11322 p
= READ_ONCE(dev
->core_stats
);
11324 const struct net_device_core_stats
*core_stats
;
11327 for_each_possible_cpu(i
) {
11328 core_stats
= per_cpu_ptr(p
, i
);
11329 storage
->rx_dropped
+= READ_ONCE(core_stats
->rx_dropped
);
11330 storage
->tx_dropped
+= READ_ONCE(core_stats
->tx_dropped
);
11331 storage
->rx_nohandler
+= READ_ONCE(core_stats
->rx_nohandler
);
11332 storage
->rx_otherhost_dropped
+= READ_ONCE(core_stats
->rx_otherhost_dropped
);
11337 EXPORT_SYMBOL(dev_get_stats
);
11340 * dev_fetch_sw_netstats - get per-cpu network device statistics
11341 * @s: place to store stats
11342 * @netstats: per-cpu network stats to read from
11344 * Read per-cpu network statistics and populate the related fields in @s.
11346 void dev_fetch_sw_netstats(struct rtnl_link_stats64
*s
,
11347 const struct pcpu_sw_netstats __percpu
*netstats
)
11351 for_each_possible_cpu(cpu
) {
11352 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
11353 const struct pcpu_sw_netstats
*stats
;
11354 unsigned int start
;
11356 stats
= per_cpu_ptr(netstats
, cpu
);
11358 start
= u64_stats_fetch_begin(&stats
->syncp
);
11359 rx_packets
= u64_stats_read(&stats
->rx_packets
);
11360 rx_bytes
= u64_stats_read(&stats
->rx_bytes
);
11361 tx_packets
= u64_stats_read(&stats
->tx_packets
);
11362 tx_bytes
= u64_stats_read(&stats
->tx_bytes
);
11363 } while (u64_stats_fetch_retry(&stats
->syncp
, start
));
11365 s
->rx_packets
+= rx_packets
;
11366 s
->rx_bytes
+= rx_bytes
;
11367 s
->tx_packets
+= tx_packets
;
11368 s
->tx_bytes
+= tx_bytes
;
11371 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats
);
11374 * dev_get_tstats64 - ndo_get_stats64 implementation
11375 * @dev: device to get statistics from
11376 * @s: place to store stats
11378 * Populate @s from dev->stats and dev->tstats. Can be used as
11379 * ndo_get_stats64() callback.
11381 void dev_get_tstats64(struct net_device
*dev
, struct rtnl_link_stats64
*s
)
11383 netdev_stats_to_stats64(s
, &dev
->stats
);
11384 dev_fetch_sw_netstats(s
, dev
->tstats
);
11386 EXPORT_SYMBOL_GPL(dev_get_tstats64
);
11388 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
11390 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
11392 #ifdef CONFIG_NET_CLS_ACT
11395 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
11398 netdev_init_one_queue(dev
, queue
, NULL
);
11399 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
11400 RCU_INIT_POINTER(queue
->qdisc_sleeping
, &noop_qdisc
);
11401 rcu_assign_pointer(dev
->ingress_queue
, queue
);
11406 static const struct ethtool_ops default_ethtool_ops
;
11408 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
11409 const struct ethtool_ops
*ops
)
11411 if (dev
->ethtool_ops
== &default_ethtool_ops
)
11412 dev
->ethtool_ops
= ops
;
11414 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
11417 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
11418 * @dev: netdev to enable the IRQ coalescing on
11420 * Sets a conservative default for SW IRQ coalescing. Users can use
11421 * sysfs attributes to override the default values.
11423 void netdev_sw_irq_coalesce_default_on(struct net_device
*dev
)
11425 WARN_ON(dev
->reg_state
== NETREG_REGISTERED
);
11427 if (!IS_ENABLED(CONFIG_PREEMPT_RT
)) {
11428 netdev_set_gro_flush_timeout(dev
, 20000);
11429 netdev_set_defer_hard_irqs(dev
, 1);
11432 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on
);
11435 * alloc_netdev_mqs - allocate network device
11436 * @sizeof_priv: size of private data to allocate space for
11437 * @name: device name format string
11438 * @name_assign_type: origin of device name
11439 * @setup: callback to initialize device
11440 * @txqs: the number of TX subqueues to allocate
11441 * @rxqs: the number of RX subqueues to allocate
11443 * Allocates a struct net_device with private data area for driver use
11444 * and performs basic initialization. Also allocates subqueue structs
11445 * for each queue on the device.
11447 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
11448 unsigned char name_assign_type
,
11449 void (*setup
)(struct net_device
*),
11450 unsigned int txqs
, unsigned int rxqs
)
11452 struct net_device
*dev
;
11453 size_t napi_config_sz
;
11454 unsigned int maxqs
;
11456 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
11459 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
11464 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
11468 maxqs
= max(txqs
, rxqs
);
11470 dev
= kvzalloc(struct_size(dev
, priv
, sizeof_priv
),
11471 GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
11475 dev
->priv_len
= sizeof_priv
;
11477 ref_tracker_dir_init(&dev
->refcnt_tracker
, 128, name
);
11478 #ifdef CONFIG_PCPU_DEV_REFCNT
11479 dev
->pcpu_refcnt
= alloc_percpu(int);
11480 if (!dev
->pcpu_refcnt
)
11484 refcount_set(&dev
->dev_refcnt
, 1);
11487 if (dev_addr_init(dev
))
11493 dev_net_set(dev
, &init_net
);
11495 dev
->gso_max_size
= GSO_LEGACY_MAX_SIZE
;
11496 dev
->xdp_zc_max_segs
= 1;
11497 dev
->gso_max_segs
= GSO_MAX_SEGS
;
11498 dev
->gro_max_size
= GRO_LEGACY_MAX_SIZE
;
11499 dev
->gso_ipv4_max_size
= GSO_LEGACY_MAX_SIZE
;
11500 dev
->gro_ipv4_max_size
= GRO_LEGACY_MAX_SIZE
;
11501 dev
->tso_max_size
= TSO_LEGACY_MAX_SIZE
;
11502 dev
->tso_max_segs
= TSO_MAX_SEGS
;
11503 dev
->upper_level
= 1;
11504 dev
->lower_level
= 1;
11505 #ifdef CONFIG_LOCKDEP
11506 dev
->nested_level
= 0;
11507 INIT_LIST_HEAD(&dev
->unlink_list
);
11510 INIT_LIST_HEAD(&dev
->napi_list
);
11511 INIT_LIST_HEAD(&dev
->unreg_list
);
11512 INIT_LIST_HEAD(&dev
->close_list
);
11513 INIT_LIST_HEAD(&dev
->link_watch_list
);
11514 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
11515 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
11516 INIT_LIST_HEAD(&dev
->ptype_all
);
11517 INIT_LIST_HEAD(&dev
->ptype_specific
);
11518 INIT_LIST_HEAD(&dev
->net_notifier_list
);
11519 #ifdef CONFIG_NET_SCHED
11520 hash_init(dev
->qdisc_hash
);
11523 mutex_init(&dev
->lock
);
11525 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
11528 if (!dev
->tx_queue_len
) {
11529 dev
->priv_flags
|= IFF_NO_QUEUE
;
11530 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
11533 dev
->num_tx_queues
= txqs
;
11534 dev
->real_num_tx_queues
= txqs
;
11535 if (netif_alloc_netdev_queues(dev
))
11538 dev
->num_rx_queues
= rxqs
;
11539 dev
->real_num_rx_queues
= rxqs
;
11540 if (netif_alloc_rx_queues(dev
))
11542 dev
->ethtool
= kzalloc(sizeof(*dev
->ethtool
), GFP_KERNEL_ACCOUNT
);
11546 dev
->cfg
= kzalloc(sizeof(*dev
->cfg
), GFP_KERNEL_ACCOUNT
);
11549 dev
->cfg_pending
= dev
->cfg
;
11551 napi_config_sz
= array_size(maxqs
, sizeof(*dev
->napi_config
));
11552 dev
->napi_config
= kvzalloc(napi_config_sz
, GFP_KERNEL_ACCOUNT
);
11553 if (!dev
->napi_config
)
11556 strscpy(dev
->name
, name
);
11557 dev
->name_assign_type
= name_assign_type
;
11558 dev
->group
= INIT_NETDEV_GROUP
;
11559 if (!dev
->ethtool_ops
)
11560 dev
->ethtool_ops
= &default_ethtool_ops
;
11562 nf_hook_netdev_init(dev
);
11571 #ifdef CONFIG_PCPU_DEV_REFCNT
11572 free_percpu(dev
->pcpu_refcnt
);
11578 EXPORT_SYMBOL(alloc_netdev_mqs
);
11580 static void netdev_napi_exit(struct net_device
*dev
)
11582 if (!list_empty(&dev
->napi_list
)) {
11583 struct napi_struct
*p
, *n
;
11586 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
11587 __netif_napi_del_locked(p
);
11588 netdev_unlock(dev
);
11593 kvfree(dev
->napi_config
);
11597 * free_netdev - free network device
11600 * This function does the last stage of destroying an allocated device
11601 * interface. The reference to the device object is released. If this
11602 * is the last reference then it will be freed.Must be called in process
11605 void free_netdev(struct net_device
*dev
)
11609 /* When called immediately after register_netdevice() failed the unwind
11610 * handling may still be dismantling the device. Handle that case by
11611 * deferring the free.
11613 if (dev
->reg_state
== NETREG_UNREGISTERING
) {
11615 dev
->needs_free_netdev
= true;
11619 WARN_ON(dev
->cfg
!= dev
->cfg_pending
);
11621 kfree(dev
->ethtool
);
11622 netif_free_tx_queues(dev
);
11623 netif_free_rx_queues(dev
);
11625 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
11627 /* Flush device addresses */
11628 dev_addr_flush(dev
);
11630 netdev_napi_exit(dev
);
11632 ref_tracker_dir_exit(&dev
->refcnt_tracker
);
11633 #ifdef CONFIG_PCPU_DEV_REFCNT
11634 free_percpu(dev
->pcpu_refcnt
);
11635 dev
->pcpu_refcnt
= NULL
;
11637 free_percpu(dev
->core_stats
);
11638 dev
->core_stats
= NULL
;
11639 free_percpu(dev
->xdp_bulkq
);
11640 dev
->xdp_bulkq
= NULL
;
11642 netdev_free_phy_link_topology(dev
);
11644 mutex_destroy(&dev
->lock
);
11646 /* Compatibility with error handling in drivers */
11647 if (dev
->reg_state
== NETREG_UNINITIALIZED
||
11648 dev
->reg_state
== NETREG_DUMMY
) {
11653 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
11654 WRITE_ONCE(dev
->reg_state
, NETREG_RELEASED
);
11656 /* will free via device release */
11657 put_device(&dev
->dev
);
11659 EXPORT_SYMBOL(free_netdev
);
11662 * alloc_netdev_dummy - Allocate and initialize a dummy net device.
11663 * @sizeof_priv: size of private data to allocate space for
11665 * Return: the allocated net_device on success, NULL otherwise
11667 struct net_device
*alloc_netdev_dummy(int sizeof_priv
)
11669 return alloc_netdev(sizeof_priv
, "dummy#", NET_NAME_UNKNOWN
,
11670 init_dummy_netdev
);
11672 EXPORT_SYMBOL_GPL(alloc_netdev_dummy
);
11675 * synchronize_net - Synchronize with packet receive processing
11677 * Wait for packets currently being received to be done.
11678 * Does not block later packets from starting.
11680 void synchronize_net(void)
11683 if (from_cleanup_net() || rtnl_is_locked())
11684 synchronize_rcu_expedited();
11688 EXPORT_SYMBOL(synchronize_net
);
11690 static void netdev_rss_contexts_free(struct net_device
*dev
)
11692 struct ethtool_rxfh_context
*ctx
;
11693 unsigned long context
;
11695 mutex_lock(&dev
->ethtool
->rss_lock
);
11696 xa_for_each(&dev
->ethtool
->rss_ctx
, context
, ctx
) {
11697 struct ethtool_rxfh_param rxfh
;
11699 rxfh
.indir
= ethtool_rxfh_context_indir(ctx
);
11700 rxfh
.key
= ethtool_rxfh_context_key(ctx
);
11701 rxfh
.hfunc
= ctx
->hfunc
;
11702 rxfh
.input_xfrm
= ctx
->input_xfrm
;
11703 rxfh
.rss_context
= context
;
11704 rxfh
.rss_delete
= true;
11706 xa_erase(&dev
->ethtool
->rss_ctx
, context
);
11707 if (dev
->ethtool_ops
->create_rxfh_context
)
11708 dev
->ethtool_ops
->remove_rxfh_context(dev
, ctx
,
11711 dev
->ethtool_ops
->set_rxfh(dev
, &rxfh
, NULL
);
11714 xa_destroy(&dev
->ethtool
->rss_ctx
);
11715 mutex_unlock(&dev
->ethtool
->rss_lock
);
11719 * unregister_netdevice_queue - remove device from the kernel
11723 * This function shuts down a device interface and removes it
11724 * from the kernel tables.
11725 * If head not NULL, device is queued to be unregistered later.
11727 * Callers must hold the rtnl semaphore. You may want
11728 * unregister_netdev() instead of this.
11731 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
11736 list_move_tail(&dev
->unreg_list
, head
);
11740 list_add(&dev
->unreg_list
, &single
);
11741 unregister_netdevice_many(&single
);
11744 EXPORT_SYMBOL(unregister_netdevice_queue
);
11746 void unregister_netdevice_many_notify(struct list_head
*head
,
11747 u32 portid
, const struct nlmsghdr
*nlh
)
11749 struct net_device
*dev
, *tmp
;
11750 LIST_HEAD(close_head
);
11753 BUG_ON(dev_boot_phase
);
11756 if (list_empty(head
))
11759 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
11760 /* Some devices call without registering
11761 * for initialization unwind. Remove those
11762 * devices and proceed with the remaining.
11764 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
11765 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
11769 list_del(&dev
->unreg_list
);
11772 dev
->dismantle
= true;
11773 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
11776 /* If device is running, close it first. */
11777 list_for_each_entry(dev
, head
, unreg_list
)
11778 list_add_tail(&dev
->close_list
, &close_head
);
11779 dev_close_many(&close_head
, true);
11781 list_for_each_entry(dev
, head
, unreg_list
) {
11782 /* And unlink it from device chain. */
11783 unlist_netdevice(dev
);
11785 WRITE_ONCE(dev
->reg_state
, NETREG_UNREGISTERING
);
11786 netdev_unlock(dev
);
11789 rtnl_drop_if_cleanup_net();
11790 flush_all_backlogs();
11792 rtnl_acquire_if_cleanup_net();
11794 list_for_each_entry(dev
, head
, unreg_list
) {
11795 struct sk_buff
*skb
= NULL
;
11797 /* Shutdown queueing discipline. */
11799 dev_tcx_uninstall(dev
);
11800 dev_xdp_uninstall(dev
);
11801 bpf_dev_bound_netdev_unregister(dev
);
11802 dev_dmabuf_uninstall(dev
);
11804 netdev_offload_xstats_disable_all(dev
);
11806 /* Notify protocols, that we are about to destroy
11807 * this device. They should clean all the things.
11809 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
11811 if (!dev
->rtnl_link_ops
||
11812 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
11813 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
11814 GFP_KERNEL
, NULL
, 0,
11818 * Flush the unicast and multicast chains
11823 netdev_name_node_alt_flush(dev
);
11824 netdev_name_node_free(dev
->name_node
);
11826 netdev_rss_contexts_free(dev
);
11828 call_netdevice_notifiers(NETDEV_PRE_UNINIT
, dev
);
11830 if (dev
->netdev_ops
->ndo_uninit
)
11831 dev
->netdev_ops
->ndo_uninit(dev
);
11833 mutex_destroy(&dev
->ethtool
->rss_lock
);
11835 net_shaper_flush_netdev(dev
);
11838 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
, portid
, nlh
);
11840 /* Notifier chain MUST detach us all upper devices. */
11841 WARN_ON(netdev_has_any_upper_dev(dev
));
11842 WARN_ON(netdev_has_any_lower_dev(dev
));
11844 /* Remove entries from kobject tree */
11845 netdev_unregister_kobject(dev
);
11847 /* Remove XPS queueing entries */
11848 netif_reset_xps_queues_gt(dev
, 0);
11852 rtnl_drop_if_cleanup_net();
11854 rtnl_acquire_if_cleanup_net();
11856 list_for_each_entry(dev
, head
, unreg_list
) {
11857 netdev_put(dev
, &dev
->dev_registered_tracker
);
11861 atomic_add(cnt
, &dev_unreg_count
);
11867 * unregister_netdevice_many - unregister many devices
11868 * @head: list of devices
11870 * Note: As most callers use a stack allocated list_head,
11871 * we force a list_del() to make sure stack won't be corrupted later.
11873 void unregister_netdevice_many(struct list_head
*head
)
11875 unregister_netdevice_many_notify(head
, 0, NULL
);
11877 EXPORT_SYMBOL(unregister_netdevice_many
);
11880 * unregister_netdev - remove device from the kernel
11883 * This function shuts down a device interface and removes it
11884 * from the kernel tables.
11886 * This is just a wrapper for unregister_netdevice that takes
11887 * the rtnl semaphore. In general you want to use this and not
11888 * unregister_netdevice.
11890 void unregister_netdev(struct net_device
*dev
)
11892 struct net
*net
= dev_net(dev
);
11894 rtnl_net_lock(net
);
11895 unregister_netdevice(dev
);
11896 rtnl_net_unlock(net
);
11898 EXPORT_SYMBOL(unregister_netdev
);
11901 * __dev_change_net_namespace - move device to different nethost namespace
11903 * @net: network namespace
11904 * @pat: If not NULL name pattern to try if the current device name
11905 * is already taken in the destination network namespace.
11906 * @new_ifindex: If not zero, specifies device index in the target
11909 * This function shuts down a device interface and moves it
11910 * to a new network namespace. On success 0 is returned, on
11911 * a failure a netagive errno code is returned.
11913 * Callers must hold the rtnl semaphore.
11916 int __dev_change_net_namespace(struct net_device
*dev
, struct net
*net
,
11917 const char *pat
, int new_ifindex
)
11919 struct netdev_name_node
*name_node
;
11920 struct net
*net_old
= dev_net(dev
);
11921 char new_name
[IFNAMSIZ
] = {};
11926 /* Don't allow namespace local devices to be moved. */
11928 if (dev
->netns_local
)
11931 /* Ensure the device has been registered */
11932 if (dev
->reg_state
!= NETREG_REGISTERED
)
11935 /* Get out if there is nothing todo */
11937 if (net_eq(net_old
, net
))
11940 /* Pick the destination device name, and ensure
11941 * we can use it in the destination network namespace.
11944 if (netdev_name_in_use(net
, dev
->name
)) {
11945 /* We get here if we can't use the current device name */
11948 err
= dev_prep_valid_name(net
, dev
, pat
, new_name
, EEXIST
);
11952 /* Check that none of the altnames conflicts. */
11954 netdev_for_each_altname(dev
, name_node
)
11955 if (netdev_name_in_use(net
, name_node
->name
))
11958 /* Check that new_ifindex isn't used yet. */
11960 err
= dev_index_reserve(net
, new_ifindex
);
11964 /* If there is an ifindex conflict assign a new one */
11965 err
= dev_index_reserve(net
, dev
->ifindex
);
11967 err
= dev_index_reserve(net
, 0);
11974 * And now a mini version of register_netdevice unregister_netdevice.
11977 /* If device is running close it first. */
11980 /* And unlink it from device chain */
11981 unlist_netdevice(dev
);
11985 /* Shutdown queueing discipline. */
11988 /* Notify protocols, that we are about to destroy
11989 * this device. They should clean all the things.
11991 * Note that dev->reg_state stays at NETREG_REGISTERED.
11992 * This is wanted because this way 8021q and macvlan know
11993 * the device is just moving and can keep their slaves up.
11995 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
11998 new_nsid
= peernet2id_alloc(dev_net(dev
), net
, GFP_KERNEL
);
12000 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
12004 * Flush the unicast and multicast chains
12009 /* Send a netdev-removed uevent to the old namespace */
12010 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
12011 netdev_adjacent_del_links(dev
);
12013 /* Move per-net netdevice notifiers that are following the netdevice */
12014 move_netdevice_notifiers_dev_net(dev
, net
);
12016 /* Actually switch the network namespace */
12017 dev_net_set(dev
, net
);
12018 dev
->ifindex
= new_ifindex
;
12021 /* Rename the netdev to prepared name */
12022 write_seqlock_bh(&netdev_rename_lock
);
12023 strscpy(dev
->name
, new_name
, IFNAMSIZ
);
12024 write_sequnlock_bh(&netdev_rename_lock
);
12027 /* Fixup kobjects */
12028 dev_set_uevent_suppress(&dev
->dev
, 1);
12029 err
= device_rename(&dev
->dev
, dev
->name
);
12030 dev_set_uevent_suppress(&dev
->dev
, 0);
12033 /* Send a netdev-add uevent to the new namespace */
12034 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
12035 netdev_adjacent_add_links(dev
);
12037 /* Adapt owner in case owning user namespace of target network
12038 * namespace is different from the original one.
12040 err
= netdev_change_owner(dev
, net_old
, net
);
12043 /* Add the device back in the hashes */
12044 list_netdevice(dev
);
12046 /* Notify protocols, that a new device appeared. */
12047 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
12050 * Prevent userspace races by waiting until the network
12051 * device is fully setup before sending notifications.
12053 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
, 0, NULL
);
12060 EXPORT_SYMBOL_GPL(__dev_change_net_namespace
);
12062 static int dev_cpu_dead(unsigned int oldcpu
)
12064 struct sk_buff
**list_skb
;
12065 struct sk_buff
*skb
;
12067 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
12069 local_irq_disable();
12070 cpu
= smp_processor_id();
12071 sd
= &per_cpu(softnet_data
, cpu
);
12072 oldsd
= &per_cpu(softnet_data
, oldcpu
);
12074 /* Find end of our completion_queue. */
12075 list_skb
= &sd
->completion_queue
;
12077 list_skb
= &(*list_skb
)->next
;
12078 /* Append completion queue from offline CPU. */
12079 *list_skb
= oldsd
->completion_queue
;
12080 oldsd
->completion_queue
= NULL
;
12082 /* Append output queue from offline CPU. */
12083 if (oldsd
->output_queue
) {
12084 *sd
->output_queue_tailp
= oldsd
->output_queue
;
12085 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
12086 oldsd
->output_queue
= NULL
;
12087 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
12089 /* Append NAPI poll list from offline CPU, with one exception :
12090 * process_backlog() must be called by cpu owning percpu backlog.
12091 * We properly handle process_queue & input_pkt_queue later.
12093 while (!list_empty(&oldsd
->poll_list
)) {
12094 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
12095 struct napi_struct
,
12098 list_del_init(&napi
->poll_list
);
12099 if (napi
->poll
== process_backlog
)
12100 napi
->state
&= NAPIF_STATE_THREADED
;
12102 ____napi_schedule(sd
, napi
);
12105 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
12106 local_irq_enable();
12108 if (!use_backlog_threads()) {
12110 remsd
= oldsd
->rps_ipi_list
;
12111 oldsd
->rps_ipi_list
= NULL
;
12113 /* send out pending IPI's on offline CPU */
12114 net_rps_send_ipi(remsd
);
12117 /* Process offline CPU's input_pkt_queue */
12118 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
12120 rps_input_queue_head_incr(oldsd
);
12122 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
12124 rps_input_queue_head_incr(oldsd
);
12131 * netdev_increment_features - increment feature set by one
12132 * @all: current feature set
12133 * @one: new feature set
12134 * @mask: mask feature set
12136 * Computes a new feature set after adding a device with feature set
12137 * @one to the master device with current feature set @all. Will not
12138 * enable anything that is off in @mask. Returns the new feature set.
12140 netdev_features_t
netdev_increment_features(netdev_features_t all
,
12141 netdev_features_t one
, netdev_features_t mask
)
12143 if (mask
& NETIF_F_HW_CSUM
)
12144 mask
|= NETIF_F_CSUM_MASK
;
12145 mask
|= NETIF_F_VLAN_CHALLENGED
;
12147 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
12148 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
12150 /* If one device supports hw checksumming, set for all. */
12151 if (all
& NETIF_F_HW_CSUM
)
12152 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
12156 EXPORT_SYMBOL(netdev_increment_features
);
12158 static struct hlist_head
* __net_init
netdev_create_hash(void)
12161 struct hlist_head
*hash
;
12163 hash
= kmalloc_array(NETDEV_HASHENTRIES
, sizeof(*hash
), GFP_KERNEL
);
12165 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
12166 INIT_HLIST_HEAD(&hash
[i
]);
12171 /* Initialize per network namespace state */
12172 static int __net_init
netdev_init(struct net
*net
)
12174 BUILD_BUG_ON(GRO_HASH_BUCKETS
>
12175 8 * sizeof_field(struct napi_struct
, gro_bitmask
));
12177 INIT_LIST_HEAD(&net
->dev_base_head
);
12179 net
->dev_name_head
= netdev_create_hash();
12180 if (net
->dev_name_head
== NULL
)
12183 net
->dev_index_head
= netdev_create_hash();
12184 if (net
->dev_index_head
== NULL
)
12187 xa_init_flags(&net
->dev_by_index
, XA_FLAGS_ALLOC1
);
12189 RAW_INIT_NOTIFIER_HEAD(&net
->netdev_chain
);
12194 kfree(net
->dev_name_head
);
12200 * netdev_drivername - network driver for the device
12201 * @dev: network device
12203 * Determine network driver for device.
12205 const char *netdev_drivername(const struct net_device
*dev
)
12207 const struct device_driver
*driver
;
12208 const struct device
*parent
;
12209 const char *empty
= "";
12211 parent
= dev
->dev
.parent
;
12215 driver
= parent
->driver
;
12216 if (driver
&& driver
->name
)
12217 return driver
->name
;
12221 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
12222 struct va_format
*vaf
)
12224 if (dev
&& dev
->dev
.parent
) {
12225 dev_printk_emit(level
[1] - '0',
12228 dev_driver_string(dev
->dev
.parent
),
12229 dev_name(dev
->dev
.parent
),
12230 netdev_name(dev
), netdev_reg_state(dev
),
12233 printk("%s%s%s: %pV",
12234 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
12236 printk("%s(NULL net_device): %pV", level
, vaf
);
12240 void netdev_printk(const char *level
, const struct net_device
*dev
,
12241 const char *format
, ...)
12243 struct va_format vaf
;
12246 va_start(args
, format
);
12251 __netdev_printk(level
, dev
, &vaf
);
12255 EXPORT_SYMBOL(netdev_printk
);
12257 #define define_netdev_printk_level(func, level) \
12258 void func(const struct net_device *dev, const char *fmt, ...) \
12260 struct va_format vaf; \
12263 va_start(args, fmt); \
12268 __netdev_printk(level, dev, &vaf); \
12272 EXPORT_SYMBOL(func);
12274 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
12275 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
12276 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
12277 define_netdev_printk_level(netdev_err
, KERN_ERR
);
12278 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
12279 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
12280 define_netdev_printk_level(netdev_info
, KERN_INFO
);
12282 static void __net_exit
netdev_exit(struct net
*net
)
12284 kfree(net
->dev_name_head
);
12285 kfree(net
->dev_index_head
);
12286 xa_destroy(&net
->dev_by_index
);
12287 if (net
!= &init_net
)
12288 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
12291 static struct pernet_operations __net_initdata netdev_net_ops
= {
12292 .init
= netdev_init
,
12293 .exit
= netdev_exit
,
12296 static void __net_exit
default_device_exit_net(struct net
*net
)
12298 struct netdev_name_node
*name_node
, *tmp
;
12299 struct net_device
*dev
, *aux
;
12301 * Push all migratable network devices back to the
12302 * initial network namespace
12305 for_each_netdev_safe(net
, dev
, aux
) {
12307 char fb_name
[IFNAMSIZ
];
12309 /* Ignore unmoveable devices (i.e. loopback) */
12310 if (dev
->netns_local
)
12313 /* Leave virtual devices for the generic cleanup */
12314 if (dev
->rtnl_link_ops
&& !dev
->rtnl_link_ops
->netns_refund
)
12317 /* Push remaining network devices to init_net */
12318 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
12319 if (netdev_name_in_use(&init_net
, fb_name
))
12320 snprintf(fb_name
, IFNAMSIZ
, "dev%%d");
12322 netdev_for_each_altname_safe(dev
, name_node
, tmp
)
12323 if (netdev_name_in_use(&init_net
, name_node
->name
))
12324 __netdev_name_node_alt_destroy(name_node
);
12326 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
12328 pr_emerg("%s: failed to move %s to init_net: %d\n",
12329 __func__
, dev
->name
, err
);
12335 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
12337 /* At exit all network devices most be removed from a network
12338 * namespace. Do this in the reverse order of registration.
12339 * Do this across as many network namespaces as possible to
12340 * improve batching efficiency.
12342 struct net_device
*dev
;
12344 LIST_HEAD(dev_kill_list
);
12347 list_for_each_entry(net
, net_list
, exit_list
) {
12348 default_device_exit_net(net
);
12352 list_for_each_entry(net
, net_list
, exit_list
) {
12353 for_each_netdev_reverse(net
, dev
) {
12354 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
12355 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
12357 unregister_netdevice_queue(dev
, &dev_kill_list
);
12360 unregister_netdevice_many(&dev_kill_list
);
12364 static struct pernet_operations __net_initdata default_device_ops
= {
12365 .exit_batch
= default_device_exit_batch
,
12368 static void __init
net_dev_struct_check(void)
12370 /* TX read-mostly hotpath */
12371 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, priv_flags_fast
);
12372 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, netdev_ops
);
12373 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, header_ops
);
12374 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, _tx
);
12375 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, real_num_tx_queues
);
12376 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, gso_max_size
);
12377 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, gso_ipv4_max_size
);
12378 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, gso_max_segs
);
12379 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, gso_partial_features
);
12380 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, num_tc
);
12381 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, mtu
);
12382 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, needed_headroom
);
12383 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, tc_to_txq
);
12385 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, xps_maps
);
12387 #ifdef CONFIG_NETFILTER_EGRESS
12388 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, nf_hooks_egress
);
12390 #ifdef CONFIG_NET_XGRESS
12391 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, tcx_egress
);
12393 CACHELINE_ASSERT_GROUP_SIZE(struct net_device
, net_device_read_tx
, 160);
12395 /* TXRX read-mostly hotpath */
12396 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, lstats
);
12397 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, state
);
12398 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, flags
);
12399 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, hard_header_len
);
12400 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, features
);
12401 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, ip6_ptr
);
12402 CACHELINE_ASSERT_GROUP_SIZE(struct net_device
, net_device_read_txrx
, 46);
12404 /* RX read-mostly hotpath */
12405 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, ptype_specific
);
12406 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, ifindex
);
12407 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, real_num_rx_queues
);
12408 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, _rx
);
12409 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, gro_max_size
);
12410 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, gro_ipv4_max_size
);
12411 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, rx_handler
);
12412 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, rx_handler_data
);
12413 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, nd_net
);
12414 #ifdef CONFIG_NETPOLL
12415 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, npinfo
);
12417 #ifdef CONFIG_NET_XGRESS
12418 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, tcx_ingress
);
12420 CACHELINE_ASSERT_GROUP_SIZE(struct net_device
, net_device_read_rx
, 92);
12424 * Initialize the DEV module. At boot time this walks the device list and
12425 * unhooks any devices that fail to initialise (normally hardware not
12426 * present) and leaves us with a valid list of present and active devices.
12430 /* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
12431 #define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
12433 static int net_page_pool_create(int cpuid
)
12435 #if IS_ENABLED(CONFIG_PAGE_POOL)
12436 struct page_pool_params page_pool_params
= {
12437 .pool_size
= SYSTEM_PERCPU_PAGE_POOL_SIZE
,
12438 .flags
= PP_FLAG_SYSTEM_POOL
,
12439 .nid
= cpu_to_mem(cpuid
),
12441 struct page_pool
*pp_ptr
;
12444 pp_ptr
= page_pool_create_percpu(&page_pool_params
, cpuid
);
12445 if (IS_ERR(pp_ptr
))
12448 err
= xdp_reg_page_pool(pp_ptr
);
12450 page_pool_destroy(pp_ptr
);
12454 per_cpu(system_page_pool
, cpuid
) = pp_ptr
;
12459 static int backlog_napi_should_run(unsigned int cpu
)
12461 struct softnet_data
*sd
= per_cpu_ptr(&softnet_data
, cpu
);
12462 struct napi_struct
*napi
= &sd
->backlog
;
12464 return test_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
);
12467 static void run_backlog_napi(unsigned int cpu
)
12469 struct softnet_data
*sd
= per_cpu_ptr(&softnet_data
, cpu
);
12471 napi_threaded_poll_loop(&sd
->backlog
);
12474 static void backlog_napi_setup(unsigned int cpu
)
12476 struct softnet_data
*sd
= per_cpu_ptr(&softnet_data
, cpu
);
12477 struct napi_struct
*napi
= &sd
->backlog
;
12479 napi
->thread
= this_cpu_read(backlog_napi
);
12480 set_bit(NAPI_STATE_THREADED
, &napi
->state
);
12483 static struct smp_hotplug_thread backlog_threads
= {
12484 .store
= &backlog_napi
,
12485 .thread_should_run
= backlog_napi_should_run
,
12486 .thread_fn
= run_backlog_napi
,
12487 .thread_comm
= "backlog_napi/%u",
12488 .setup
= backlog_napi_setup
,
12492 * This is called single threaded during boot, so no need
12493 * to take the rtnl semaphore.
12495 static int __init
net_dev_init(void)
12497 int i
, rc
= -ENOMEM
;
12499 BUG_ON(!dev_boot_phase
);
12501 net_dev_struct_check();
12503 if (dev_proc_init())
12506 if (netdev_kobject_init())
12509 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
12510 INIT_LIST_HEAD(&ptype_base
[i
]);
12512 if (register_pernet_subsys(&netdev_net_ops
))
12516 * Initialise the packet receive queues.
12519 flush_backlogs_fallback
= flush_backlogs_alloc();
12520 if (!flush_backlogs_fallback
)
12523 for_each_possible_cpu(i
) {
12524 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
12526 skb_queue_head_init(&sd
->input_pkt_queue
);
12527 skb_queue_head_init(&sd
->process_queue
);
12528 #ifdef CONFIG_XFRM_OFFLOAD
12529 skb_queue_head_init(&sd
->xfrm_backlog
);
12531 INIT_LIST_HEAD(&sd
->poll_list
);
12532 sd
->output_queue_tailp
= &sd
->output_queue
;
12534 INIT_CSD(&sd
->csd
, rps_trigger_softirq
, sd
);
12537 INIT_CSD(&sd
->defer_csd
, trigger_rx_softirq
, sd
);
12538 spin_lock_init(&sd
->defer_lock
);
12540 init_gro_hash(&sd
->backlog
);
12541 sd
->backlog
.poll
= process_backlog
;
12542 sd
->backlog
.weight
= weight_p
;
12543 INIT_LIST_HEAD(&sd
->backlog
.poll_list
);
12545 if (net_page_pool_create(i
))
12548 if (use_backlog_threads())
12549 smpboot_register_percpu_thread(&backlog_threads
);
12551 dev_boot_phase
= 0;
12553 /* The loopback device is special if any other network devices
12554 * is present in a network namespace the loopback device must
12555 * be present. Since we now dynamically allocate and free the
12556 * loopback device ensure this invariant is maintained by
12557 * keeping the loopback device as the first device on the
12558 * list of network devices. Ensuring the loopback devices
12559 * is the first device that appears and the last network device
12562 if (register_pernet_device(&loopback_net_ops
))
12565 if (register_pernet_device(&default_device_ops
))
12568 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
12569 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
12571 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
12572 NULL
, dev_cpu_dead
);
12576 /* avoid static key IPIs to isolated CPUs */
12577 if (housekeeping_enabled(HK_TYPE_MISC
))
12578 net_enable_timestamp();
12581 for_each_possible_cpu(i
) {
12582 struct page_pool
*pp_ptr
;
12584 pp_ptr
= per_cpu(system_page_pool
, i
);
12588 xdp_unreg_page_pool(pp_ptr
);
12589 page_pool_destroy(pp_ptr
);
12590 per_cpu(system_page_pool
, i
) = NULL
;
12597 subsys_initcall(net_dev_init
);