1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3 Protocol independent device support routines.
5 * Derived from the non IP parts of dev.c 1.0.19
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
47 * Rudi Cilibrasi : Pass the right thing to
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
71 #include <linux/uaccess.h>
72 #include <linux/bitmap.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/isolation.h>
81 #include <linux/sched/mm.h>
82 #include <linux/smpboot.h>
83 #include <linux/mutex.h>
84 #include <linux/rwsem.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/skbuff.h>
96 #include <linux/kthread.h>
97 #include <linux/bpf.h>
98 #include <linux/bpf_trace.h>
99 #include <net/net_namespace.h>
100 #include <net/sock.h>
101 #include <net/busy_poll.h>
102 #include <linux/rtnetlink.h>
103 #include <linux/stat.h>
106 #include <net/dst_metadata.h>
108 #include <net/pkt_sched.h>
109 #include <net/pkt_cls.h>
110 #include <net/checksum.h>
111 #include <net/xfrm.h>
113 #include <linux/highmem.h>
114 #include <linux/init.h>
115 #include <linux/module.h>
116 #include <linux/netpoll.h>
117 #include <linux/rcupdate.h>
118 #include <linux/delay.h>
119 #include <net/iw_handler.h>
120 #include <asm/current.h>
121 #include <linux/audit.h>
122 #include <linux/dmaengine.h>
123 #include <linux/err.h>
124 #include <linux/ctype.h>
125 #include <linux/if_arp.h>
126 #include <linux/if_vlan.h>
127 #include <linux/ip.h>
129 #include <net/mpls.h>
130 #include <linux/ipv6.h>
131 #include <linux/in.h>
132 #include <linux/jhash.h>
133 #include <linux/random.h>
134 #include <trace/events/napi.h>
135 #include <trace/events/net.h>
136 #include <trace/events/skb.h>
137 #include <trace/events/qdisc.h>
138 #include <trace/events/xdp.h>
139 #include <linux/inetdevice.h>
140 #include <linux/cpu_rmap.h>
141 #include <linux/static_key.h>
142 #include <linux/hashtable.h>
143 #include <linux/vmalloc.h>
144 #include <linux/if_macvlan.h>
145 #include <linux/errqueue.h>
146 #include <linux/hrtimer.h>
147 #include <linux/netfilter_netdev.h>
148 #include <linux/crash_dump.h>
149 #include <linux/sctp.h>
150 #include <net/udp_tunnel.h>
151 #include <linux/net_namespace.h>
152 #include <linux/indirect_call_wrapper.h>
153 #include <net/devlink.h>
154 #include <linux/pm_runtime.h>
155 #include <linux/prandom.h>
156 #include <linux/once_lite.h>
157 #include <net/netdev_rx_queue.h>
158 #include <net/page_pool/types.h>
159 #include <net/page_pool/helpers.h>
161 #include <linux/phy_link_topology.h>
165 #include "net-sysfs.h"
167 static DEFINE_SPINLOCK(ptype_lock
);
168 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
170 static int netif_rx_internal(struct sk_buff
*skb
);
171 static int call_netdevice_notifiers_extack(unsigned long val
,
172 struct net_device
*dev
,
173 struct netlink_ext_ack
*extack
);
175 static DEFINE_MUTEX(ifalias_mutex
);
177 /* protects napi_hash addition/deletion and napi_gen_id */
178 static DEFINE_SPINLOCK(napi_hash_lock
);
180 static unsigned int napi_gen_id
= NR_CPUS
;
181 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
183 static DECLARE_RWSEM(devnet_rename_sem
);
185 static inline void dev_base_seq_inc(struct net
*net
)
187 unsigned int val
= net
->dev_base_seq
+ 1;
189 WRITE_ONCE(net
->dev_base_seq
, val
?: 1);
192 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
194 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
196 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
199 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
201 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
204 #ifndef CONFIG_PREEMPT_RT
206 static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key
);
208 static int __init
setup_backlog_napi_threads(char *arg
)
210 static_branch_enable(&use_backlog_threads_key
);
213 early_param("thread_backlog_napi", setup_backlog_napi_threads
);
215 static bool use_backlog_threads(void)
217 return static_branch_unlikely(&use_backlog_threads_key
);
222 static bool use_backlog_threads(void)
229 static inline void backlog_lock_irq_save(struct softnet_data
*sd
,
230 unsigned long *flags
)
232 if (IS_ENABLED(CONFIG_RPS
) || use_backlog_threads())
233 spin_lock_irqsave(&sd
->input_pkt_queue
.lock
, *flags
);
235 local_irq_save(*flags
);
238 static inline void backlog_lock_irq_disable(struct softnet_data
*sd
)
240 if (IS_ENABLED(CONFIG_RPS
) || use_backlog_threads())
241 spin_lock_irq(&sd
->input_pkt_queue
.lock
);
246 static inline void backlog_unlock_irq_restore(struct softnet_data
*sd
,
247 unsigned long *flags
)
249 if (IS_ENABLED(CONFIG_RPS
) || use_backlog_threads())
250 spin_unlock_irqrestore(&sd
->input_pkt_queue
.lock
, *flags
);
252 local_irq_restore(*flags
);
255 static inline void backlog_unlock_irq_enable(struct softnet_data
*sd
)
257 if (IS_ENABLED(CONFIG_RPS
) || use_backlog_threads())
258 spin_unlock_irq(&sd
->input_pkt_queue
.lock
);
263 static struct netdev_name_node
*netdev_name_node_alloc(struct net_device
*dev
,
266 struct netdev_name_node
*name_node
;
268 name_node
= kmalloc(sizeof(*name_node
), GFP_KERNEL
);
271 INIT_HLIST_NODE(&name_node
->hlist
);
272 name_node
->dev
= dev
;
273 name_node
->name
= name
;
277 static struct netdev_name_node
*
278 netdev_name_node_head_alloc(struct net_device
*dev
)
280 struct netdev_name_node
*name_node
;
282 name_node
= netdev_name_node_alloc(dev
, dev
->name
);
285 INIT_LIST_HEAD(&name_node
->list
);
289 static void netdev_name_node_free(struct netdev_name_node
*name_node
)
294 static void netdev_name_node_add(struct net
*net
,
295 struct netdev_name_node
*name_node
)
297 hlist_add_head_rcu(&name_node
->hlist
,
298 dev_name_hash(net
, name_node
->name
));
301 static void netdev_name_node_del(struct netdev_name_node
*name_node
)
303 hlist_del_rcu(&name_node
->hlist
);
306 static struct netdev_name_node
*netdev_name_node_lookup(struct net
*net
,
309 struct hlist_head
*head
= dev_name_hash(net
, name
);
310 struct netdev_name_node
*name_node
;
312 hlist_for_each_entry(name_node
, head
, hlist
)
313 if (!strcmp(name_node
->name
, name
))
318 static struct netdev_name_node
*netdev_name_node_lookup_rcu(struct net
*net
,
321 struct hlist_head
*head
= dev_name_hash(net
, name
);
322 struct netdev_name_node
*name_node
;
324 hlist_for_each_entry_rcu(name_node
, head
, hlist
)
325 if (!strcmp(name_node
->name
, name
))
330 bool netdev_name_in_use(struct net
*net
, const char *name
)
332 return netdev_name_node_lookup(net
, name
);
334 EXPORT_SYMBOL(netdev_name_in_use
);
336 int netdev_name_node_alt_create(struct net_device
*dev
, const char *name
)
338 struct netdev_name_node
*name_node
;
339 struct net
*net
= dev_net(dev
);
341 name_node
= netdev_name_node_lookup(net
, name
);
344 name_node
= netdev_name_node_alloc(dev
, name
);
347 netdev_name_node_add(net
, name_node
);
348 /* The node that holds dev->name acts as a head of per-device list. */
349 list_add_tail_rcu(&name_node
->list
, &dev
->name_node
->list
);
354 static void netdev_name_node_alt_free(struct rcu_head
*head
)
356 struct netdev_name_node
*name_node
=
357 container_of(head
, struct netdev_name_node
, rcu
);
359 kfree(name_node
->name
);
360 netdev_name_node_free(name_node
);
363 static void __netdev_name_node_alt_destroy(struct netdev_name_node
*name_node
)
365 netdev_name_node_del(name_node
);
366 list_del(&name_node
->list
);
367 call_rcu(&name_node
->rcu
, netdev_name_node_alt_free
);
370 int netdev_name_node_alt_destroy(struct net_device
*dev
, const char *name
)
372 struct netdev_name_node
*name_node
;
373 struct net
*net
= dev_net(dev
);
375 name_node
= netdev_name_node_lookup(net
, name
);
378 /* lookup might have found our primary name or a name belonging
381 if (name_node
== dev
->name_node
|| name_node
->dev
!= dev
)
384 __netdev_name_node_alt_destroy(name_node
);
388 static void netdev_name_node_alt_flush(struct net_device
*dev
)
390 struct netdev_name_node
*name_node
, *tmp
;
392 list_for_each_entry_safe(name_node
, tmp
, &dev
->name_node
->list
, list
) {
393 list_del(&name_node
->list
);
394 netdev_name_node_alt_free(&name_node
->rcu
);
398 /* Device list insertion */
399 static void list_netdevice(struct net_device
*dev
)
401 struct netdev_name_node
*name_node
;
402 struct net
*net
= dev_net(dev
);
406 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
407 netdev_name_node_add(net
, dev
->name_node
);
408 hlist_add_head_rcu(&dev
->index_hlist
,
409 dev_index_hash(net
, dev
->ifindex
));
411 netdev_for_each_altname(dev
, name_node
)
412 netdev_name_node_add(net
, name_node
);
414 /* We reserved the ifindex, this can't fail */
415 WARN_ON(xa_store(&net
->dev_by_index
, dev
->ifindex
, dev
, GFP_KERNEL
));
417 dev_base_seq_inc(net
);
420 /* Device list removal
421 * caller must respect a RCU grace period before freeing/reusing dev
423 static void unlist_netdevice(struct net_device
*dev
)
425 struct netdev_name_node
*name_node
;
426 struct net
*net
= dev_net(dev
);
430 xa_erase(&net
->dev_by_index
, dev
->ifindex
);
432 netdev_for_each_altname(dev
, name_node
)
433 netdev_name_node_del(name_node
);
435 /* Unlink dev from the device chain */
436 list_del_rcu(&dev
->dev_list
);
437 netdev_name_node_del(dev
->name_node
);
438 hlist_del_rcu(&dev
->index_hlist
);
440 dev_base_seq_inc(dev_net(dev
));
447 static RAW_NOTIFIER_HEAD(netdev_chain
);
450 * Device drivers call our routines to queue packets here. We empty the
451 * queue in the local softnet handler.
454 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
) = {
455 .process_queue_bh_lock
= INIT_LOCAL_LOCK(process_queue_bh_lock
),
457 EXPORT_PER_CPU_SYMBOL(softnet_data
);
459 /* Page_pool has a lockless array/stack to alloc/recycle pages.
460 * PP consumers must pay attention to run APIs in the appropriate context
461 * (e.g. NAPI context).
463 static DEFINE_PER_CPU(struct page_pool
*, system_page_pool
);
465 #ifdef CONFIG_LOCKDEP
467 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
468 * according to dev->type
470 static const unsigned short netdev_lock_type
[] = {
471 ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
472 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
473 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
474 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
475 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
476 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
477 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
478 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
479 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
480 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
481 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
482 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
483 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
484 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
485 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
487 static const char *const netdev_lock_name
[] = {
488 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
489 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
490 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
491 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
492 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
493 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
494 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
495 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
496 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
497 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
498 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
499 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
500 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
501 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
502 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
504 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
505 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
507 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
511 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
512 if (netdev_lock_type
[i
] == dev_type
)
514 /* the last key is used by default */
515 return ARRAY_SIZE(netdev_lock_type
) - 1;
518 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
519 unsigned short dev_type
)
523 i
= netdev_lock_pos(dev_type
);
524 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
525 netdev_lock_name
[i
]);
528 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
532 i
= netdev_lock_pos(dev
->type
);
533 lockdep_set_class_and_name(&dev
->addr_list_lock
,
534 &netdev_addr_lock_key
[i
],
535 netdev_lock_name
[i
]);
538 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
539 unsigned short dev_type
)
543 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
548 /*******************************************************************************
550 * Protocol management and registration routines
552 *******************************************************************************/
556 * Add a protocol ID to the list. Now that the input handler is
557 * smarter we can dispense with all the messy stuff that used to be
560 * BEWARE!!! Protocol handlers, mangling input packets,
561 * MUST BE last in hash buckets and checking protocol handlers
562 * MUST start from promiscuous ptype_all chain in net_bh.
563 * It is true now, do not change it.
564 * Explanation follows: if protocol handler, mangling packet, will
565 * be the first on list, it is not able to sense, that packet
566 * is cloned and should be copied-on-write, so that it will
567 * change it and subsequent readers will get broken packet.
571 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
573 if (pt
->type
== htons(ETH_P_ALL
))
574 return pt
->dev
? &pt
->dev
->ptype_all
: &net_hotdata
.ptype_all
;
576 return pt
->dev
? &pt
->dev
->ptype_specific
:
577 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
581 * dev_add_pack - add packet handler
582 * @pt: packet type declaration
584 * Add a protocol handler to the networking stack. The passed &packet_type
585 * is linked into kernel lists and may not be freed until it has been
586 * removed from the kernel lists.
588 * This call does not sleep therefore it can not
589 * guarantee all CPU's that are in middle of receiving packets
590 * will see the new packet type (until the next received packet).
593 void dev_add_pack(struct packet_type
*pt
)
595 struct list_head
*head
= ptype_head(pt
);
597 spin_lock(&ptype_lock
);
598 list_add_rcu(&pt
->list
, head
);
599 spin_unlock(&ptype_lock
);
601 EXPORT_SYMBOL(dev_add_pack
);
604 * __dev_remove_pack - remove packet handler
605 * @pt: packet type declaration
607 * Remove a protocol handler that was previously added to the kernel
608 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
609 * from the kernel lists and can be freed or reused once this function
612 * The packet type might still be in use by receivers
613 * and must not be freed until after all the CPU's have gone
614 * through a quiescent state.
616 void __dev_remove_pack(struct packet_type
*pt
)
618 struct list_head
*head
= ptype_head(pt
);
619 struct packet_type
*pt1
;
621 spin_lock(&ptype_lock
);
623 list_for_each_entry(pt1
, head
, list
) {
625 list_del_rcu(&pt
->list
);
630 pr_warn("dev_remove_pack: %p not found\n", pt
);
632 spin_unlock(&ptype_lock
);
634 EXPORT_SYMBOL(__dev_remove_pack
);
637 * dev_remove_pack - remove packet handler
638 * @pt: packet type declaration
640 * Remove a protocol handler that was previously added to the kernel
641 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
642 * from the kernel lists and can be freed or reused once this function
645 * This call sleeps to guarantee that no CPU is looking at the packet
648 void dev_remove_pack(struct packet_type
*pt
)
650 __dev_remove_pack(pt
);
654 EXPORT_SYMBOL(dev_remove_pack
);
657 /*******************************************************************************
659 * Device Interface Subroutines
661 *******************************************************************************/
664 * dev_get_iflink - get 'iflink' value of a interface
665 * @dev: targeted interface
667 * Indicates the ifindex the interface is linked to.
668 * Physical interfaces have the same 'ifindex' and 'iflink' values.
671 int dev_get_iflink(const struct net_device
*dev
)
673 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
674 return dev
->netdev_ops
->ndo_get_iflink(dev
);
676 return READ_ONCE(dev
->ifindex
);
678 EXPORT_SYMBOL(dev_get_iflink
);
681 * dev_fill_metadata_dst - Retrieve tunnel egress information.
682 * @dev: targeted interface
685 * For better visibility of tunnel traffic OVS needs to retrieve
686 * egress tunnel information for a packet. Following API allows
687 * user to get this info.
689 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
691 struct ip_tunnel_info
*info
;
693 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
696 info
= skb_tunnel_info_unclone(skb
);
699 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
702 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
704 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
706 static struct net_device_path
*dev_fwd_path(struct net_device_path_stack
*stack
)
708 int k
= stack
->num_paths
++;
710 if (WARN_ON_ONCE(k
>= NET_DEVICE_PATH_STACK_MAX
))
713 return &stack
->path
[k
];
716 int dev_fill_forward_path(const struct net_device
*dev
, const u8
*daddr
,
717 struct net_device_path_stack
*stack
)
719 const struct net_device
*last_dev
;
720 struct net_device_path_ctx ctx
= {
723 struct net_device_path
*path
;
726 memcpy(ctx
.daddr
, daddr
, sizeof(ctx
.daddr
));
727 stack
->num_paths
= 0;
728 while (ctx
.dev
&& ctx
.dev
->netdev_ops
->ndo_fill_forward_path
) {
730 path
= dev_fwd_path(stack
);
734 memset(path
, 0, sizeof(struct net_device_path
));
735 ret
= ctx
.dev
->netdev_ops
->ndo_fill_forward_path(&ctx
, path
);
739 if (WARN_ON_ONCE(last_dev
== ctx
.dev
))
746 path
= dev_fwd_path(stack
);
749 path
->type
= DEV_PATH_ETHERNET
;
754 EXPORT_SYMBOL_GPL(dev_fill_forward_path
);
757 * __dev_get_by_name - find a device by its name
758 * @net: the applicable net namespace
759 * @name: name to find
761 * Find an interface by name. Must be called under RTNL semaphore.
762 * If the name is found a pointer to the device is returned.
763 * If the name is not found then %NULL is returned. The
764 * reference counters are not incremented so the caller must be
765 * careful with locks.
768 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
770 struct netdev_name_node
*node_name
;
772 node_name
= netdev_name_node_lookup(net
, name
);
773 return node_name
? node_name
->dev
: NULL
;
775 EXPORT_SYMBOL(__dev_get_by_name
);
778 * dev_get_by_name_rcu - find a device by its name
779 * @net: the applicable net namespace
780 * @name: name to find
782 * Find an interface by name.
783 * If the name is found a pointer to the device is returned.
784 * If the name is not found then %NULL is returned.
785 * The reference counters are not incremented so the caller must be
786 * careful with locks. The caller must hold RCU lock.
789 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
791 struct netdev_name_node
*node_name
;
793 node_name
= netdev_name_node_lookup_rcu(net
, name
);
794 return node_name
? node_name
->dev
: NULL
;
796 EXPORT_SYMBOL(dev_get_by_name_rcu
);
798 /* Deprecated for new users, call netdev_get_by_name() instead */
799 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
801 struct net_device
*dev
;
804 dev
= dev_get_by_name_rcu(net
, name
);
809 EXPORT_SYMBOL(dev_get_by_name
);
812 * netdev_get_by_name() - find a device by its name
813 * @net: the applicable net namespace
814 * @name: name to find
815 * @tracker: tracking object for the acquired reference
816 * @gfp: allocation flags for the tracker
818 * Find an interface by name. This can be called from any
819 * context and does its own locking. The returned handle has
820 * the usage count incremented and the caller must use netdev_put() to
821 * release it when it is no longer needed. %NULL is returned if no
822 * matching device is found.
824 struct net_device
*netdev_get_by_name(struct net
*net
, const char *name
,
825 netdevice_tracker
*tracker
, gfp_t gfp
)
827 struct net_device
*dev
;
829 dev
= dev_get_by_name(net
, name
);
831 netdev_tracker_alloc(dev
, tracker
, gfp
);
834 EXPORT_SYMBOL(netdev_get_by_name
);
837 * __dev_get_by_index - find a device by its ifindex
838 * @net: the applicable net namespace
839 * @ifindex: index of device
841 * Search for an interface by index. Returns %NULL if the device
842 * is not found or a pointer to the device. The device has not
843 * had its reference counter increased so the caller must be careful
844 * about locking. The caller must hold the RTNL semaphore.
847 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
849 struct net_device
*dev
;
850 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
852 hlist_for_each_entry(dev
, head
, index_hlist
)
853 if (dev
->ifindex
== ifindex
)
858 EXPORT_SYMBOL(__dev_get_by_index
);
861 * dev_get_by_index_rcu - find a device by its ifindex
862 * @net: the applicable net namespace
863 * @ifindex: index of device
865 * Search for an interface by index. Returns %NULL if the device
866 * is not found or a pointer to the device. The device has not
867 * had its reference counter increased so the caller must be careful
868 * about locking. The caller must hold RCU lock.
871 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
873 struct net_device
*dev
;
874 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
876 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
877 if (dev
->ifindex
== ifindex
)
882 EXPORT_SYMBOL(dev_get_by_index_rcu
);
884 /* Deprecated for new users, call netdev_get_by_index() instead */
885 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
887 struct net_device
*dev
;
890 dev
= dev_get_by_index_rcu(net
, ifindex
);
895 EXPORT_SYMBOL(dev_get_by_index
);
898 * netdev_get_by_index() - find a device by its ifindex
899 * @net: the applicable net namespace
900 * @ifindex: index of device
901 * @tracker: tracking object for the acquired reference
902 * @gfp: allocation flags for the tracker
904 * Search for an interface by index. Returns NULL if the device
905 * is not found or a pointer to the device. The device returned has
906 * had a reference added and the pointer is safe until the user calls
907 * netdev_put() to indicate they have finished with it.
909 struct net_device
*netdev_get_by_index(struct net
*net
, int ifindex
,
910 netdevice_tracker
*tracker
, gfp_t gfp
)
912 struct net_device
*dev
;
914 dev
= dev_get_by_index(net
, ifindex
);
916 netdev_tracker_alloc(dev
, tracker
, gfp
);
919 EXPORT_SYMBOL(netdev_get_by_index
);
922 * dev_get_by_napi_id - find a device by napi_id
923 * @napi_id: ID of the NAPI struct
925 * Search for an interface by NAPI ID. Returns %NULL if the device
926 * is not found or a pointer to the device. The device has not had
927 * its reference counter increased so the caller must be careful
928 * about locking. The caller must hold RCU lock.
931 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
933 struct napi_struct
*napi
;
935 WARN_ON_ONCE(!rcu_read_lock_held());
937 if (napi_id
< MIN_NAPI_ID
)
940 napi
= napi_by_id(napi_id
);
942 return napi
? napi
->dev
: NULL
;
944 EXPORT_SYMBOL(dev_get_by_napi_id
);
946 static DEFINE_SEQLOCK(netdev_rename_lock
);
948 void netdev_copy_name(struct net_device
*dev
, char *name
)
953 seq
= read_seqbegin(&netdev_rename_lock
);
954 strscpy(name
, dev
->name
, IFNAMSIZ
);
955 } while (read_seqretry(&netdev_rename_lock
, seq
));
959 * netdev_get_name - get a netdevice name, knowing its ifindex.
960 * @net: network namespace
961 * @name: a pointer to the buffer where the name will be stored.
962 * @ifindex: the ifindex of the interface to get the name from.
964 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
966 struct net_device
*dev
;
971 dev
= dev_get_by_index_rcu(net
, ifindex
);
977 netdev_copy_name(dev
, name
);
986 * dev_getbyhwaddr_rcu - find a device by its hardware address
987 * @net: the applicable net namespace
988 * @type: media type of device
989 * @ha: hardware address
991 * Search for an interface by MAC address. Returns NULL if the device
992 * is not found or a pointer to the device.
993 * The caller must hold RCU or RTNL.
994 * The returned device has not had its ref count increased
995 * and the caller must therefore be careful about locking
999 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
1002 struct net_device
*dev
;
1004 for_each_netdev_rcu(net
, dev
)
1005 if (dev
->type
== type
&&
1006 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
1011 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
1013 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
1015 struct net_device
*dev
, *ret
= NULL
;
1018 for_each_netdev_rcu(net
, dev
)
1019 if (dev
->type
== type
) {
1027 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
1030 * __dev_get_by_flags - find any device with given flags
1031 * @net: the applicable net namespace
1032 * @if_flags: IFF_* values
1033 * @mask: bitmask of bits in if_flags to check
1035 * Search for any interface with the given flags. Returns NULL if a device
1036 * is not found or a pointer to the device. Must be called inside
1037 * rtnl_lock(), and result refcount is unchanged.
1040 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
1041 unsigned short mask
)
1043 struct net_device
*dev
, *ret
;
1048 for_each_netdev(net
, dev
) {
1049 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
1056 EXPORT_SYMBOL(__dev_get_by_flags
);
1059 * dev_valid_name - check if name is okay for network device
1060 * @name: name string
1062 * Network device names need to be valid file names to
1063 * allow sysfs to work. We also disallow any kind of
1066 bool dev_valid_name(const char *name
)
1070 if (strnlen(name
, IFNAMSIZ
) == IFNAMSIZ
)
1072 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
1076 if (*name
== '/' || *name
== ':' || isspace(*name
))
1082 EXPORT_SYMBOL(dev_valid_name
);
1085 * __dev_alloc_name - allocate a name for a device
1086 * @net: network namespace to allocate the device name in
1087 * @name: name format string
1088 * @res: result name string
1090 * Passed a format string - eg "lt%d" it will try and find a suitable
1091 * id. It scans list of devices to build up a free map, then chooses
1092 * the first empty slot. The caller must hold the dev_base or rtnl lock
1093 * while allocating the name and adding the device in order to avoid
1095 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1096 * Returns the number of the unit assigned or a negative errno code.
1099 static int __dev_alloc_name(struct net
*net
, const char *name
, char *res
)
1103 const int max_netdevices
= 8*PAGE_SIZE
;
1104 unsigned long *inuse
;
1105 struct net_device
*d
;
1108 /* Verify the string as this thing may have come from the user.
1109 * There must be one "%d" and no other "%" characters.
1111 p
= strchr(name
, '%');
1112 if (!p
|| p
[1] != 'd' || strchr(p
+ 2, '%'))
1115 /* Use one page as a bit array of possible slots */
1116 inuse
= bitmap_zalloc(max_netdevices
, GFP_ATOMIC
);
1120 for_each_netdev(net
, d
) {
1121 struct netdev_name_node
*name_node
;
1123 netdev_for_each_altname(d
, name_node
) {
1124 if (!sscanf(name_node
->name
, name
, &i
))
1126 if (i
< 0 || i
>= max_netdevices
)
1129 /* avoid cases where sscanf is not exact inverse of printf */
1130 snprintf(buf
, IFNAMSIZ
, name
, i
);
1131 if (!strncmp(buf
, name_node
->name
, IFNAMSIZ
))
1132 __set_bit(i
, inuse
);
1134 if (!sscanf(d
->name
, name
, &i
))
1136 if (i
< 0 || i
>= max_netdevices
)
1139 /* avoid cases where sscanf is not exact inverse of printf */
1140 snprintf(buf
, IFNAMSIZ
, name
, i
);
1141 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1142 __set_bit(i
, inuse
);
1145 i
= find_first_zero_bit(inuse
, max_netdevices
);
1147 if (i
== max_netdevices
)
1150 /* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */
1151 strscpy(buf
, name
, IFNAMSIZ
);
1152 snprintf(res
, IFNAMSIZ
, buf
, i
);
1156 /* Returns negative errno or allocated unit id (see __dev_alloc_name()) */
1157 static int dev_prep_valid_name(struct net
*net
, struct net_device
*dev
,
1158 const char *want_name
, char *out_name
,
1161 if (!dev_valid_name(want_name
))
1164 if (strchr(want_name
, '%'))
1165 return __dev_alloc_name(net
, want_name
, out_name
);
1167 if (netdev_name_in_use(net
, want_name
))
1169 if (out_name
!= want_name
)
1170 strscpy(out_name
, want_name
, IFNAMSIZ
);
1175 * dev_alloc_name - allocate a name for a device
1177 * @name: name format string
1179 * Passed a format string - eg "lt%d" it will try and find a suitable
1180 * id. It scans list of devices to build up a free map, then chooses
1181 * the first empty slot. The caller must hold the dev_base or rtnl lock
1182 * while allocating the name and adding the device in order to avoid
1184 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1185 * Returns the number of the unit assigned or a negative errno code.
1188 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1190 return dev_prep_valid_name(dev_net(dev
), dev
, name
, dev
->name
, ENFILE
);
1192 EXPORT_SYMBOL(dev_alloc_name
);
1194 static int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1199 ret
= dev_prep_valid_name(net
, dev
, name
, dev
->name
, EEXIST
);
1200 return ret
< 0 ? ret
: 0;
1204 * dev_change_name - change name of a device
1206 * @newname: name (or format string) must be at least IFNAMSIZ
1208 * Change name of a device, can pass format strings "eth%d".
1211 int dev_change_name(struct net_device
*dev
, const char *newname
)
1213 unsigned char old_assign_type
;
1214 char oldname
[IFNAMSIZ
];
1220 BUG_ON(!dev_net(dev
));
1224 down_write(&devnet_rename_sem
);
1226 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1227 up_write(&devnet_rename_sem
);
1231 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1233 write_seqlock_bh(&netdev_rename_lock
);
1234 err
= dev_get_valid_name(net
, dev
, newname
);
1235 write_sequnlock_bh(&netdev_rename_lock
);
1238 up_write(&devnet_rename_sem
);
1242 if (oldname
[0] && !strchr(oldname
, '%'))
1243 netdev_info(dev
, "renamed from %s%s\n", oldname
,
1244 dev
->flags
& IFF_UP
? " (while UP)" : "");
1246 old_assign_type
= dev
->name_assign_type
;
1247 WRITE_ONCE(dev
->name_assign_type
, NET_NAME_RENAMED
);
1250 ret
= device_rename(&dev
->dev
, dev
->name
);
1252 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1253 WRITE_ONCE(dev
->name_assign_type
, old_assign_type
);
1254 up_write(&devnet_rename_sem
);
1258 up_write(&devnet_rename_sem
);
1260 netdev_adjacent_rename_links(dev
, oldname
);
1262 netdev_name_node_del(dev
->name_node
);
1266 netdev_name_node_add(net
, dev
->name_node
);
1268 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1269 ret
= notifier_to_errno(ret
);
1272 /* err >= 0 after dev_alloc_name() or stores the first errno */
1275 down_write(&devnet_rename_sem
);
1276 write_seqlock_bh(&netdev_rename_lock
);
1277 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1278 write_sequnlock_bh(&netdev_rename_lock
);
1279 memcpy(oldname
, newname
, IFNAMSIZ
);
1280 WRITE_ONCE(dev
->name_assign_type
, old_assign_type
);
1281 old_assign_type
= NET_NAME_RENAMED
;
1284 netdev_err(dev
, "name change rollback failed: %d\n",
1293 * dev_set_alias - change ifalias of a device
1295 * @alias: name up to IFALIASZ
1296 * @len: limit of bytes to copy from info
1298 * Set ifalias for a device,
1300 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1302 struct dev_ifalias
*new_alias
= NULL
;
1304 if (len
>= IFALIASZ
)
1308 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1312 memcpy(new_alias
->ifalias
, alias
, len
);
1313 new_alias
->ifalias
[len
] = 0;
1316 mutex_lock(&ifalias_mutex
);
1317 new_alias
= rcu_replace_pointer(dev
->ifalias
, new_alias
,
1318 mutex_is_locked(&ifalias_mutex
));
1319 mutex_unlock(&ifalias_mutex
);
1322 kfree_rcu(new_alias
, rcuhead
);
1326 EXPORT_SYMBOL(dev_set_alias
);
1329 * dev_get_alias - get ifalias of a device
1331 * @name: buffer to store name of ifalias
1332 * @len: size of buffer
1334 * get ifalias for a device. Caller must make sure dev cannot go
1335 * away, e.g. rcu read lock or own a reference count to device.
1337 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1339 const struct dev_ifalias
*alias
;
1343 alias
= rcu_dereference(dev
->ifalias
);
1345 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1352 * netdev_features_change - device changes features
1353 * @dev: device to cause notification
1355 * Called to indicate a device has changed features.
1357 void netdev_features_change(struct net_device
*dev
)
1359 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1361 EXPORT_SYMBOL(netdev_features_change
);
1364 * netdev_state_change - device changes state
1365 * @dev: device to cause notification
1367 * Called to indicate a device has changed state. This function calls
1368 * the notifier chains for netdev_chain and sends a NEWLINK message
1369 * to the routing socket.
1371 void netdev_state_change(struct net_device
*dev
)
1373 if (dev
->flags
& IFF_UP
) {
1374 struct netdev_notifier_change_info change_info
= {
1378 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1380 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
, 0, NULL
);
1383 EXPORT_SYMBOL(netdev_state_change
);
1386 * __netdev_notify_peers - notify network peers about existence of @dev,
1387 * to be called when rtnl lock is already held.
1388 * @dev: network device
1390 * Generate traffic such that interested network peers are aware of
1391 * @dev, such as by generating a gratuitous ARP. This may be used when
1392 * a device wants to inform the rest of the network about some sort of
1393 * reconfiguration such as a failover event or virtual machine
1396 void __netdev_notify_peers(struct net_device
*dev
)
1399 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1400 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1402 EXPORT_SYMBOL(__netdev_notify_peers
);
1405 * netdev_notify_peers - notify network peers about existence of @dev
1406 * @dev: network device
1408 * Generate traffic such that interested network peers are aware of
1409 * @dev, such as by generating a gratuitous ARP. This may be used when
1410 * a device wants to inform the rest of the network about some sort of
1411 * reconfiguration such as a failover event or virtual machine
1414 void netdev_notify_peers(struct net_device
*dev
)
1417 __netdev_notify_peers(dev
);
1420 EXPORT_SYMBOL(netdev_notify_peers
);
1422 static int napi_threaded_poll(void *data
);
1424 static int napi_kthread_create(struct napi_struct
*n
)
1428 /* Create and wake up the kthread once to put it in
1429 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1430 * warning and work with loadavg.
1432 n
->thread
= kthread_run(napi_threaded_poll
, n
, "napi/%s-%d",
1433 n
->dev
->name
, n
->napi_id
);
1434 if (IS_ERR(n
->thread
)) {
1435 err
= PTR_ERR(n
->thread
);
1436 pr_err("kthread_run failed with err %d\n", err
);
1443 static int __dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1445 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1449 dev_addr_check(dev
);
1451 if (!netif_device_present(dev
)) {
1452 /* may be detached because parent is runtime-suspended */
1453 if (dev
->dev
.parent
)
1454 pm_runtime_resume(dev
->dev
.parent
);
1455 if (!netif_device_present(dev
))
1459 /* Block netpoll from trying to do any rx path servicing.
1460 * If we don't do this there is a chance ndo_poll_controller
1461 * or ndo_poll may be running while we open the device
1463 netpoll_poll_disable(dev
);
1465 ret
= call_netdevice_notifiers_extack(NETDEV_PRE_UP
, dev
, extack
);
1466 ret
= notifier_to_errno(ret
);
1470 set_bit(__LINK_STATE_START
, &dev
->state
);
1472 if (ops
->ndo_validate_addr
)
1473 ret
= ops
->ndo_validate_addr(dev
);
1475 if (!ret
&& ops
->ndo_open
)
1476 ret
= ops
->ndo_open(dev
);
1478 netpoll_poll_enable(dev
);
1481 clear_bit(__LINK_STATE_START
, &dev
->state
);
1483 dev
->flags
|= IFF_UP
;
1484 dev_set_rx_mode(dev
);
1486 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1493 * dev_open - prepare an interface for use.
1494 * @dev: device to open
1495 * @extack: netlink extended ack
1497 * Takes a device from down to up state. The device's private open
1498 * function is invoked and then the multicast lists are loaded. Finally
1499 * the device is moved into the up state and a %NETDEV_UP message is
1500 * sent to the netdev notifier chain.
1502 * Calling this function on an active interface is a nop. On a failure
1503 * a negative errno code is returned.
1505 int dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1509 if (dev
->flags
& IFF_UP
)
1512 ret
= __dev_open(dev
, extack
);
1516 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
| IFF_RUNNING
, GFP_KERNEL
, 0, NULL
);
1517 call_netdevice_notifiers(NETDEV_UP
, dev
);
1521 EXPORT_SYMBOL(dev_open
);
1523 static void __dev_close_many(struct list_head
*head
)
1525 struct net_device
*dev
;
1530 list_for_each_entry(dev
, head
, close_list
) {
1531 /* Temporarily disable netpoll until the interface is down */
1532 netpoll_poll_disable(dev
);
1534 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1536 clear_bit(__LINK_STATE_START
, &dev
->state
);
1538 /* Synchronize to scheduled poll. We cannot touch poll list, it
1539 * can be even on different cpu. So just clear netif_running().
1541 * dev->stop() will invoke napi_disable() on all of it's
1542 * napi_struct instances on this device.
1544 smp_mb__after_atomic(); /* Commit netif_running(). */
1547 dev_deactivate_many(head
);
1549 list_for_each_entry(dev
, head
, close_list
) {
1550 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1553 * Call the device specific close. This cannot fail.
1554 * Only if device is UP
1556 * We allow it to be called even after a DETACH hot-plug
1562 dev
->flags
&= ~IFF_UP
;
1563 netpoll_poll_enable(dev
);
1567 static void __dev_close(struct net_device
*dev
)
1571 list_add(&dev
->close_list
, &single
);
1572 __dev_close_many(&single
);
1576 void dev_close_many(struct list_head
*head
, bool unlink
)
1578 struct net_device
*dev
, *tmp
;
1580 /* Remove the devices that don't need to be closed */
1581 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1582 if (!(dev
->flags
& IFF_UP
))
1583 list_del_init(&dev
->close_list
);
1585 __dev_close_many(head
);
1587 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1588 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
| IFF_RUNNING
, GFP_KERNEL
, 0, NULL
);
1589 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1591 list_del_init(&dev
->close_list
);
1594 EXPORT_SYMBOL(dev_close_many
);
1597 * dev_close - shutdown an interface.
1598 * @dev: device to shutdown
1600 * This function moves an active device into down state. A
1601 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1602 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1605 void dev_close(struct net_device
*dev
)
1607 if (dev
->flags
& IFF_UP
) {
1610 list_add(&dev
->close_list
, &single
);
1611 dev_close_many(&single
, true);
1615 EXPORT_SYMBOL(dev_close
);
1619 * dev_disable_lro - disable Large Receive Offload on a device
1622 * Disable Large Receive Offload (LRO) on a net device. Must be
1623 * called under RTNL. This is needed if received packets may be
1624 * forwarded to another interface.
1626 void dev_disable_lro(struct net_device
*dev
)
1628 struct net_device
*lower_dev
;
1629 struct list_head
*iter
;
1631 dev
->wanted_features
&= ~NETIF_F_LRO
;
1632 netdev_update_features(dev
);
1634 if (unlikely(dev
->features
& NETIF_F_LRO
))
1635 netdev_WARN(dev
, "failed to disable LRO!\n");
1637 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1638 dev_disable_lro(lower_dev
);
1640 EXPORT_SYMBOL(dev_disable_lro
);
1643 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1646 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1647 * called under RTNL. This is needed if Generic XDP is installed on
1650 static void dev_disable_gro_hw(struct net_device
*dev
)
1652 dev
->wanted_features
&= ~NETIF_F_GRO_HW
;
1653 netdev_update_features(dev
);
1655 if (unlikely(dev
->features
& NETIF_F_GRO_HW
))
1656 netdev_WARN(dev
, "failed to disable GRO_HW!\n");
1659 const char *netdev_cmd_to_name(enum netdev_cmd cmd
)
1662 case NETDEV_##val: \
1663 return "NETDEV_" __stringify(val);
1665 N(UP
) N(DOWN
) N(REBOOT
) N(CHANGE
) N(REGISTER
) N(UNREGISTER
)
1666 N(CHANGEMTU
) N(CHANGEADDR
) N(GOING_DOWN
) N(CHANGENAME
) N(FEAT_CHANGE
)
1667 N(BONDING_FAILOVER
) N(PRE_UP
) N(PRE_TYPE_CHANGE
) N(POST_TYPE_CHANGE
)
1668 N(POST_INIT
) N(PRE_UNINIT
) N(RELEASE
) N(NOTIFY_PEERS
) N(JOIN
)
1669 N(CHANGEUPPER
) N(RESEND_IGMP
) N(PRECHANGEMTU
) N(CHANGEINFODATA
)
1670 N(BONDING_INFO
) N(PRECHANGEUPPER
) N(CHANGELOWERSTATE
)
1671 N(UDP_TUNNEL_PUSH_INFO
) N(UDP_TUNNEL_DROP_INFO
) N(CHANGE_TX_QUEUE_LEN
)
1672 N(CVLAN_FILTER_PUSH_INFO
) N(CVLAN_FILTER_DROP_INFO
)
1673 N(SVLAN_FILTER_PUSH_INFO
) N(SVLAN_FILTER_DROP_INFO
)
1674 N(PRE_CHANGEADDR
) N(OFFLOAD_XSTATS_ENABLE
) N(OFFLOAD_XSTATS_DISABLE
)
1675 N(OFFLOAD_XSTATS_REPORT_USED
) N(OFFLOAD_XSTATS_REPORT_DELTA
)
1679 return "UNKNOWN_NETDEV_EVENT";
1681 EXPORT_SYMBOL_GPL(netdev_cmd_to_name
);
1683 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1684 struct net_device
*dev
)
1686 struct netdev_notifier_info info
= {
1690 return nb
->notifier_call(nb
, val
, &info
);
1693 static int call_netdevice_register_notifiers(struct notifier_block
*nb
,
1694 struct net_device
*dev
)
1698 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1699 err
= notifier_to_errno(err
);
1703 if (!(dev
->flags
& IFF_UP
))
1706 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1710 static void call_netdevice_unregister_notifiers(struct notifier_block
*nb
,
1711 struct net_device
*dev
)
1713 if (dev
->flags
& IFF_UP
) {
1714 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1716 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1718 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1721 static int call_netdevice_register_net_notifiers(struct notifier_block
*nb
,
1724 struct net_device
*dev
;
1727 for_each_netdev(net
, dev
) {
1728 err
= call_netdevice_register_notifiers(nb
, dev
);
1735 for_each_netdev_continue_reverse(net
, dev
)
1736 call_netdevice_unregister_notifiers(nb
, dev
);
1740 static void call_netdevice_unregister_net_notifiers(struct notifier_block
*nb
,
1743 struct net_device
*dev
;
1745 for_each_netdev(net
, dev
)
1746 call_netdevice_unregister_notifiers(nb
, dev
);
1749 static int dev_boot_phase
= 1;
1752 * register_netdevice_notifier - register a network notifier block
1755 * Register a notifier to be called when network device events occur.
1756 * The notifier passed is linked into the kernel structures and must
1757 * not be reused until it has been unregistered. A negative errno code
1758 * is returned on a failure.
1760 * When registered all registration and up events are replayed
1761 * to the new notifier to allow device to have a race free
1762 * view of the network device list.
1765 int register_netdevice_notifier(struct notifier_block
*nb
)
1770 /* Close race with setup_net() and cleanup_net() */
1771 down_write(&pernet_ops_rwsem
);
1773 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1779 err
= call_netdevice_register_net_notifiers(nb
, net
);
1786 up_write(&pernet_ops_rwsem
);
1790 for_each_net_continue_reverse(net
)
1791 call_netdevice_unregister_net_notifiers(nb
, net
);
1793 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1796 EXPORT_SYMBOL(register_netdevice_notifier
);
1799 * unregister_netdevice_notifier - unregister a network notifier block
1802 * Unregister a notifier previously registered by
1803 * register_netdevice_notifier(). The notifier is unlinked into the
1804 * kernel structures and may then be reused. A negative errno code
1805 * is returned on a failure.
1807 * After unregistering unregister and down device events are synthesized
1808 * for all devices on the device list to the removed notifier to remove
1809 * the need for special case cleanup code.
1812 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1817 /* Close race with setup_net() and cleanup_net() */
1818 down_write(&pernet_ops_rwsem
);
1820 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1825 call_netdevice_unregister_net_notifiers(nb
, net
);
1829 up_write(&pernet_ops_rwsem
);
1832 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1834 static int __register_netdevice_notifier_net(struct net
*net
,
1835 struct notifier_block
*nb
,
1836 bool ignore_call_fail
)
1840 err
= raw_notifier_chain_register(&net
->netdev_chain
, nb
);
1846 err
= call_netdevice_register_net_notifiers(nb
, net
);
1847 if (err
&& !ignore_call_fail
)
1848 goto chain_unregister
;
1853 raw_notifier_chain_unregister(&net
->netdev_chain
, nb
);
1857 static int __unregister_netdevice_notifier_net(struct net
*net
,
1858 struct notifier_block
*nb
)
1862 err
= raw_notifier_chain_unregister(&net
->netdev_chain
, nb
);
1866 call_netdevice_unregister_net_notifiers(nb
, net
);
1871 * register_netdevice_notifier_net - register a per-netns network notifier block
1872 * @net: network namespace
1875 * Register a notifier to be called when network device events occur.
1876 * The notifier passed is linked into the kernel structures and must
1877 * not be reused until it has been unregistered. A negative errno code
1878 * is returned on a failure.
1880 * When registered all registration and up events are replayed
1881 * to the new notifier to allow device to have a race free
1882 * view of the network device list.
1885 int register_netdevice_notifier_net(struct net
*net
, struct notifier_block
*nb
)
1890 err
= __register_netdevice_notifier_net(net
, nb
, false);
1894 EXPORT_SYMBOL(register_netdevice_notifier_net
);
1897 * unregister_netdevice_notifier_net - unregister a per-netns
1898 * network notifier block
1899 * @net: network namespace
1902 * Unregister a notifier previously registered by
1903 * register_netdevice_notifier_net(). The notifier is unlinked from the
1904 * kernel structures and may then be reused. A negative errno code
1905 * is returned on a failure.
1907 * After unregistering unregister and down device events are synthesized
1908 * for all devices on the device list to the removed notifier to remove
1909 * the need for special case cleanup code.
1912 int unregister_netdevice_notifier_net(struct net
*net
,
1913 struct notifier_block
*nb
)
1918 err
= __unregister_netdevice_notifier_net(net
, nb
);
1922 EXPORT_SYMBOL(unregister_netdevice_notifier_net
);
1924 static void __move_netdevice_notifier_net(struct net
*src_net
,
1925 struct net
*dst_net
,
1926 struct notifier_block
*nb
)
1928 __unregister_netdevice_notifier_net(src_net
, nb
);
1929 __register_netdevice_notifier_net(dst_net
, nb
, true);
1932 int register_netdevice_notifier_dev_net(struct net_device
*dev
,
1933 struct notifier_block
*nb
,
1934 struct netdev_net_notifier
*nn
)
1939 err
= __register_netdevice_notifier_net(dev_net(dev
), nb
, false);
1942 list_add(&nn
->list
, &dev
->net_notifier_list
);
1947 EXPORT_SYMBOL(register_netdevice_notifier_dev_net
);
1949 int unregister_netdevice_notifier_dev_net(struct net_device
*dev
,
1950 struct notifier_block
*nb
,
1951 struct netdev_net_notifier
*nn
)
1956 list_del(&nn
->list
);
1957 err
= __unregister_netdevice_notifier_net(dev_net(dev
), nb
);
1961 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net
);
1963 static void move_netdevice_notifiers_dev_net(struct net_device
*dev
,
1966 struct netdev_net_notifier
*nn
;
1968 list_for_each_entry(nn
, &dev
->net_notifier_list
, list
)
1969 __move_netdevice_notifier_net(dev_net(dev
), net
, nn
->nb
);
1973 * call_netdevice_notifiers_info - call all network notifier blocks
1974 * @val: value passed unmodified to notifier function
1975 * @info: notifier information data
1977 * Call all network notifier blocks. Parameters and return value
1978 * are as for raw_notifier_call_chain().
1981 int call_netdevice_notifiers_info(unsigned long val
,
1982 struct netdev_notifier_info
*info
)
1984 struct net
*net
= dev_net(info
->dev
);
1989 /* Run per-netns notifier block chain first, then run the global one.
1990 * Hopefully, one day, the global one is going to be removed after
1991 * all notifier block registrators get converted to be per-netns.
1993 ret
= raw_notifier_call_chain(&net
->netdev_chain
, val
, info
);
1994 if (ret
& NOTIFY_STOP_MASK
)
1996 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
2000 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks
2001 * for and rollback on error
2002 * @val_up: value passed unmodified to notifier function
2003 * @val_down: value passed unmodified to the notifier function when
2004 * recovering from an error on @val_up
2005 * @info: notifier information data
2007 * Call all per-netns network notifier blocks, but not notifier blocks on
2008 * the global notifier chain. Parameters and return value are as for
2009 * raw_notifier_call_chain_robust().
2013 call_netdevice_notifiers_info_robust(unsigned long val_up
,
2014 unsigned long val_down
,
2015 struct netdev_notifier_info
*info
)
2017 struct net
*net
= dev_net(info
->dev
);
2021 return raw_notifier_call_chain_robust(&net
->netdev_chain
,
2022 val_up
, val_down
, info
);
2025 static int call_netdevice_notifiers_extack(unsigned long val
,
2026 struct net_device
*dev
,
2027 struct netlink_ext_ack
*extack
)
2029 struct netdev_notifier_info info
= {
2034 return call_netdevice_notifiers_info(val
, &info
);
2038 * call_netdevice_notifiers - call all network notifier blocks
2039 * @val: value passed unmodified to notifier function
2040 * @dev: net_device pointer passed unmodified to notifier function
2042 * Call all network notifier blocks. Parameters and return value
2043 * are as for raw_notifier_call_chain().
2046 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
2048 return call_netdevice_notifiers_extack(val
, dev
, NULL
);
2050 EXPORT_SYMBOL(call_netdevice_notifiers
);
2053 * call_netdevice_notifiers_mtu - call all network notifier blocks
2054 * @val: value passed unmodified to notifier function
2055 * @dev: net_device pointer passed unmodified to notifier function
2056 * @arg: additional u32 argument passed to the notifier function
2058 * Call all network notifier blocks. Parameters and return value
2059 * are as for raw_notifier_call_chain().
2061 static int call_netdevice_notifiers_mtu(unsigned long val
,
2062 struct net_device
*dev
, u32 arg
)
2064 struct netdev_notifier_info_ext info
= {
2069 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext
, info
) != 0);
2071 return call_netdevice_notifiers_info(val
, &info
.info
);
2074 #ifdef CONFIG_NET_INGRESS
2075 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key
);
2077 void net_inc_ingress_queue(void)
2079 static_branch_inc(&ingress_needed_key
);
2081 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
2083 void net_dec_ingress_queue(void)
2085 static_branch_dec(&ingress_needed_key
);
2087 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
2090 #ifdef CONFIG_NET_EGRESS
2091 static DEFINE_STATIC_KEY_FALSE(egress_needed_key
);
2093 void net_inc_egress_queue(void)
2095 static_branch_inc(&egress_needed_key
);
2097 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
2099 void net_dec_egress_queue(void)
2101 static_branch_dec(&egress_needed_key
);
2103 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
2106 #ifdef CONFIG_NET_CLS_ACT
2107 DEFINE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key
);
2108 EXPORT_SYMBOL(tcf_bypass_check_needed_key
);
2111 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key
);
2112 EXPORT_SYMBOL(netstamp_needed_key
);
2113 #ifdef CONFIG_JUMP_LABEL
2114 static atomic_t netstamp_needed_deferred
;
2115 static atomic_t netstamp_wanted
;
2116 static void netstamp_clear(struct work_struct
*work
)
2118 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
2121 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
2123 static_branch_enable(&netstamp_needed_key
);
2125 static_branch_disable(&netstamp_needed_key
);
2127 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
2130 void net_enable_timestamp(void)
2132 #ifdef CONFIG_JUMP_LABEL
2133 int wanted
= atomic_read(&netstamp_wanted
);
2135 while (wanted
> 0) {
2136 if (atomic_try_cmpxchg(&netstamp_wanted
, &wanted
, wanted
+ 1))
2139 atomic_inc(&netstamp_needed_deferred
);
2140 schedule_work(&netstamp_work
);
2142 static_branch_inc(&netstamp_needed_key
);
2145 EXPORT_SYMBOL(net_enable_timestamp
);
2147 void net_disable_timestamp(void)
2149 #ifdef CONFIG_JUMP_LABEL
2150 int wanted
= atomic_read(&netstamp_wanted
);
2152 while (wanted
> 1) {
2153 if (atomic_try_cmpxchg(&netstamp_wanted
, &wanted
, wanted
- 1))
2156 atomic_dec(&netstamp_needed_deferred
);
2157 schedule_work(&netstamp_work
);
2159 static_branch_dec(&netstamp_needed_key
);
2162 EXPORT_SYMBOL(net_disable_timestamp
);
2164 static inline void net_timestamp_set(struct sk_buff
*skb
)
2167 skb
->tstamp_type
= SKB_CLOCK_REALTIME
;
2168 if (static_branch_unlikely(&netstamp_needed_key
))
2169 skb
->tstamp
= ktime_get_real();
2172 #define net_timestamp_check(COND, SKB) \
2173 if (static_branch_unlikely(&netstamp_needed_key)) { \
2174 if ((COND) && !(SKB)->tstamp) \
2175 (SKB)->tstamp = ktime_get_real(); \
2178 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2180 return __is_skb_forwardable(dev
, skb
, true);
2182 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
2184 static int __dev_forward_skb2(struct net_device
*dev
, struct sk_buff
*skb
,
2187 int ret
= ____dev_forward_skb(dev
, skb
, check_mtu
);
2190 skb
->protocol
= eth_type_trans(skb
, dev
);
2191 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
2197 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2199 return __dev_forward_skb2(dev
, skb
, true);
2201 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
2204 * dev_forward_skb - loopback an skb to another netif
2206 * @dev: destination network device
2207 * @skb: buffer to forward
2210 * NET_RX_SUCCESS (no congestion)
2211 * NET_RX_DROP (packet was dropped, but freed)
2213 * dev_forward_skb can be used for injecting an skb from the
2214 * start_xmit function of one device into the receive queue
2215 * of another device.
2217 * The receiving device may be in another namespace, so
2218 * we have to clear all information in the skb that could
2219 * impact namespace isolation.
2221 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2223 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
2225 EXPORT_SYMBOL_GPL(dev_forward_skb
);
2227 int dev_forward_skb_nomtu(struct net_device
*dev
, struct sk_buff
*skb
)
2229 return __dev_forward_skb2(dev
, skb
, false) ?: netif_rx_internal(skb
);
2232 static inline int deliver_skb(struct sk_buff
*skb
,
2233 struct packet_type
*pt_prev
,
2234 struct net_device
*orig_dev
)
2236 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
2238 refcount_inc(&skb
->users
);
2239 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
2242 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
2243 struct packet_type
**pt
,
2244 struct net_device
*orig_dev
,
2246 struct list_head
*ptype_list
)
2248 struct packet_type
*ptype
, *pt_prev
= *pt
;
2250 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2251 if (ptype
->type
!= type
)
2254 deliver_skb(skb
, pt_prev
, orig_dev
);
2260 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
2262 if (!ptype
->af_packet_priv
|| !skb
->sk
)
2265 if (ptype
->id_match
)
2266 return ptype
->id_match(ptype
, skb
->sk
);
2267 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
2274 * dev_nit_active - return true if any network interface taps are in use
2276 * @dev: network device to check for the presence of taps
2278 bool dev_nit_active(struct net_device
*dev
)
2280 return !list_empty(&net_hotdata
.ptype_all
) ||
2281 !list_empty(&dev
->ptype_all
);
2283 EXPORT_SYMBOL_GPL(dev_nit_active
);
2286 * Support routine. Sends outgoing frames to any network
2287 * taps currently in use.
2290 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
2292 struct list_head
*ptype_list
= &net_hotdata
.ptype_all
;
2293 struct packet_type
*ptype
, *pt_prev
= NULL
;
2294 struct sk_buff
*skb2
= NULL
;
2298 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2299 if (READ_ONCE(ptype
->ignore_outgoing
))
2302 /* Never send packets back to the socket
2303 * they originated from - MvS (miquels@drinkel.ow.org)
2305 if (skb_loop_sk(ptype
, skb
))
2309 deliver_skb(skb2
, pt_prev
, skb
->dev
);
2314 /* need to clone skb, done only once */
2315 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2319 net_timestamp_set(skb2
);
2321 /* skb->nh should be correctly
2322 * set by sender, so that the second statement is
2323 * just protection against buggy protocols.
2325 skb_reset_mac_header(skb2
);
2327 if (skb_network_header(skb2
) < skb2
->data
||
2328 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
2329 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2330 ntohs(skb2
->protocol
),
2332 skb_reset_network_header(skb2
);
2335 skb2
->transport_header
= skb2
->network_header
;
2336 skb2
->pkt_type
= PACKET_OUTGOING
;
2340 if (ptype_list
== &net_hotdata
.ptype_all
) {
2341 ptype_list
= &dev
->ptype_all
;
2346 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
2347 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
2353 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2356 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2357 * @dev: Network device
2358 * @txq: number of queues available
2360 * If real_num_tx_queues is changed the tc mappings may no longer be
2361 * valid. To resolve this verify the tc mapping remains valid and if
2362 * not NULL the mapping. With no priorities mapping to this
2363 * offset/count pair it will no longer be used. In the worst case TC0
2364 * is invalid nothing can be done so disable priority mappings. If is
2365 * expected that drivers will fix this mapping if they can before
2366 * calling netif_set_real_num_tx_queues.
2368 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2371 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2373 /* If TC0 is invalidated disable TC mapping */
2374 if (tc
->offset
+ tc
->count
> txq
) {
2375 netdev_warn(dev
, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2380 /* Invalidated prio to tc mappings set to TC0 */
2381 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2382 int q
= netdev_get_prio_tc_map(dev
, i
);
2384 tc
= &dev
->tc_to_txq
[q
];
2385 if (tc
->offset
+ tc
->count
> txq
) {
2386 netdev_warn(dev
, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2388 netdev_set_prio_tc_map(dev
, i
, 0);
2393 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2396 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2399 /* walk through the TCs and see if it falls into any of them */
2400 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2401 if ((txq
- tc
->offset
) < tc
->count
)
2405 /* didn't find it, just return -1 to indicate no match */
2411 EXPORT_SYMBOL(netdev_txq_to_tc
);
2414 static struct static_key xps_needed __read_mostly
;
2415 static struct static_key xps_rxqs_needed __read_mostly
;
2416 static DEFINE_MUTEX(xps_map_mutex
);
2417 #define xmap_dereference(P) \
2418 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2420 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2421 struct xps_dev_maps
*old_maps
, int tci
, u16 index
)
2423 struct xps_map
*map
= NULL
;
2426 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2430 for (pos
= map
->len
; pos
--;) {
2431 if (map
->queues
[pos
] != index
)
2435 map
->queues
[pos
] = map
->queues
[--map
->len
];
2440 RCU_INIT_POINTER(old_maps
->attr_map
[tci
], NULL
);
2441 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2442 kfree_rcu(map
, rcu
);
2449 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2450 struct xps_dev_maps
*dev_maps
,
2451 int cpu
, u16 offset
, u16 count
)
2453 int num_tc
= dev_maps
->num_tc
;
2454 bool active
= false;
2457 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2460 for (i
= count
, j
= offset
; i
--; j
++) {
2461 if (!remove_xps_queue(dev_maps
, NULL
, tci
, j
))
2471 static void reset_xps_maps(struct net_device
*dev
,
2472 struct xps_dev_maps
*dev_maps
,
2473 enum xps_map_type type
)
2475 static_key_slow_dec_cpuslocked(&xps_needed
);
2476 if (type
== XPS_RXQS
)
2477 static_key_slow_dec_cpuslocked(&xps_rxqs_needed
);
2479 RCU_INIT_POINTER(dev
->xps_maps
[type
], NULL
);
2481 kfree_rcu(dev_maps
, rcu
);
2484 static void clean_xps_maps(struct net_device
*dev
, enum xps_map_type type
,
2485 u16 offset
, u16 count
)
2487 struct xps_dev_maps
*dev_maps
;
2488 bool active
= false;
2491 dev_maps
= xmap_dereference(dev
->xps_maps
[type
]);
2495 for (j
= 0; j
< dev_maps
->nr_ids
; j
++)
2496 active
|= remove_xps_queue_cpu(dev
, dev_maps
, j
, offset
, count
);
2498 reset_xps_maps(dev
, dev_maps
, type
);
2500 if (type
== XPS_CPUS
) {
2501 for (i
= offset
+ (count
- 1); count
--; i
--)
2502 netdev_queue_numa_node_write(
2503 netdev_get_tx_queue(dev
, i
), NUMA_NO_NODE
);
2507 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2510 if (!static_key_false(&xps_needed
))
2514 mutex_lock(&xps_map_mutex
);
2516 if (static_key_false(&xps_rxqs_needed
))
2517 clean_xps_maps(dev
, XPS_RXQS
, offset
, count
);
2519 clean_xps_maps(dev
, XPS_CPUS
, offset
, count
);
2521 mutex_unlock(&xps_map_mutex
);
2525 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2527 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2530 static struct xps_map
*expand_xps_map(struct xps_map
*map
, int attr_index
,
2531 u16 index
, bool is_rxqs_map
)
2533 struct xps_map
*new_map
;
2534 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2537 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2538 if (map
->queues
[pos
] != index
)
2543 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2545 if (pos
< map
->alloc_len
)
2548 alloc_len
= map
->alloc_len
* 2;
2551 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2555 new_map
= kzalloc(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
);
2557 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2558 cpu_to_node(attr_index
));
2562 for (i
= 0; i
< pos
; i
++)
2563 new_map
->queues
[i
] = map
->queues
[i
];
2564 new_map
->alloc_len
= alloc_len
;
2570 /* Copy xps maps at a given index */
2571 static void xps_copy_dev_maps(struct xps_dev_maps
*dev_maps
,
2572 struct xps_dev_maps
*new_dev_maps
, int index
,
2573 int tc
, bool skip_tc
)
2575 int i
, tci
= index
* dev_maps
->num_tc
;
2576 struct xps_map
*map
;
2578 /* copy maps belonging to foreign traffic classes */
2579 for (i
= 0; i
< dev_maps
->num_tc
; i
++, tci
++) {
2580 if (i
== tc
&& skip_tc
)
2583 /* fill in the new device map from the old device map */
2584 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2585 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2589 /* Must be called under cpus_read_lock */
2590 int __netif_set_xps_queue(struct net_device
*dev
, const unsigned long *mask
,
2591 u16 index
, enum xps_map_type type
)
2593 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
, *old_dev_maps
= NULL
;
2594 const unsigned long *online_mask
= NULL
;
2595 bool active
= false, copy
= false;
2596 int i
, j
, tci
, numa_node_id
= -2;
2597 int maps_sz
, num_tc
= 1, tc
= 0;
2598 struct xps_map
*map
, *new_map
;
2599 unsigned int nr_ids
;
2601 WARN_ON_ONCE(index
>= dev
->num_tx_queues
);
2604 /* Do not allow XPS on subordinate device directly */
2605 num_tc
= dev
->num_tc
;
2609 /* If queue belongs to subordinate dev use its map */
2610 dev
= netdev_get_tx_queue(dev
, index
)->sb_dev
? : dev
;
2612 tc
= netdev_txq_to_tc(dev
, index
);
2617 mutex_lock(&xps_map_mutex
);
2619 dev_maps
= xmap_dereference(dev
->xps_maps
[type
]);
2620 if (type
== XPS_RXQS
) {
2621 maps_sz
= XPS_RXQ_DEV_MAPS_SIZE(num_tc
, dev
->num_rx_queues
);
2622 nr_ids
= dev
->num_rx_queues
;
2624 maps_sz
= XPS_CPU_DEV_MAPS_SIZE(num_tc
);
2625 if (num_possible_cpus() > 1)
2626 online_mask
= cpumask_bits(cpu_online_mask
);
2627 nr_ids
= nr_cpu_ids
;
2630 if (maps_sz
< L1_CACHE_BYTES
)
2631 maps_sz
= L1_CACHE_BYTES
;
2633 /* The old dev_maps could be larger or smaller than the one we're
2634 * setting up now, as dev->num_tc or nr_ids could have been updated in
2635 * between. We could try to be smart, but let's be safe instead and only
2636 * copy foreign traffic classes if the two map sizes match.
2639 dev_maps
->num_tc
== num_tc
&& dev_maps
->nr_ids
== nr_ids
)
2642 /* allocate memory for queue storage */
2643 for (j
= -1; j
= netif_attrmask_next_and(j
, online_mask
, mask
, nr_ids
),
2645 if (!new_dev_maps
) {
2646 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2647 if (!new_dev_maps
) {
2648 mutex_unlock(&xps_map_mutex
);
2652 new_dev_maps
->nr_ids
= nr_ids
;
2653 new_dev_maps
->num_tc
= num_tc
;
2656 tci
= j
* num_tc
+ tc
;
2657 map
= copy
? xmap_dereference(dev_maps
->attr_map
[tci
]) : NULL
;
2659 map
= expand_xps_map(map
, j
, index
, type
== XPS_RXQS
);
2663 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2667 goto out_no_new_maps
;
2670 /* Increment static keys at most once per type */
2671 static_key_slow_inc_cpuslocked(&xps_needed
);
2672 if (type
== XPS_RXQS
)
2673 static_key_slow_inc_cpuslocked(&xps_rxqs_needed
);
2676 for (j
= 0; j
< nr_ids
; j
++) {
2677 bool skip_tc
= false;
2679 tci
= j
* num_tc
+ tc
;
2680 if (netif_attr_test_mask(j
, mask
, nr_ids
) &&
2681 netif_attr_test_online(j
, online_mask
, nr_ids
)) {
2682 /* add tx-queue to CPU/rx-queue maps */
2687 map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2688 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2691 if (pos
== map
->len
)
2692 map
->queues
[map
->len
++] = index
;
2694 if (type
== XPS_CPUS
) {
2695 if (numa_node_id
== -2)
2696 numa_node_id
= cpu_to_node(j
);
2697 else if (numa_node_id
!= cpu_to_node(j
))
2704 xps_copy_dev_maps(dev_maps
, new_dev_maps
, j
, tc
,
2708 rcu_assign_pointer(dev
->xps_maps
[type
], new_dev_maps
);
2710 /* Cleanup old maps */
2712 goto out_no_old_maps
;
2714 for (j
= 0; j
< dev_maps
->nr_ids
; j
++) {
2715 for (i
= num_tc
, tci
= j
* dev_maps
->num_tc
; i
--; tci
++) {
2716 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2721 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2726 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2727 kfree_rcu(map
, rcu
);
2731 old_dev_maps
= dev_maps
;
2734 dev_maps
= new_dev_maps
;
2738 if (type
== XPS_CPUS
)
2739 /* update Tx queue numa node */
2740 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2741 (numa_node_id
>= 0) ?
2742 numa_node_id
: NUMA_NO_NODE
);
2747 /* removes tx-queue from unused CPUs/rx-queues */
2748 for (j
= 0; j
< dev_maps
->nr_ids
; j
++) {
2749 tci
= j
* dev_maps
->num_tc
;
2751 for (i
= 0; i
< dev_maps
->num_tc
; i
++, tci
++) {
2753 netif_attr_test_mask(j
, mask
, dev_maps
->nr_ids
) &&
2754 netif_attr_test_online(j
, online_mask
, dev_maps
->nr_ids
))
2757 active
|= remove_xps_queue(dev_maps
,
2758 copy
? old_dev_maps
: NULL
,
2764 kfree_rcu(old_dev_maps
, rcu
);
2766 /* free map if not active */
2768 reset_xps_maps(dev
, dev_maps
, type
);
2771 mutex_unlock(&xps_map_mutex
);
2775 /* remove any maps that we added */
2776 for (j
= 0; j
< nr_ids
; j
++) {
2777 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2778 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2780 xmap_dereference(dev_maps
->attr_map
[tci
]) :
2782 if (new_map
&& new_map
!= map
)
2787 mutex_unlock(&xps_map_mutex
);
2789 kfree(new_dev_maps
);
2792 EXPORT_SYMBOL_GPL(__netif_set_xps_queue
);
2794 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2800 ret
= __netif_set_xps_queue(dev
, cpumask_bits(mask
), index
, XPS_CPUS
);
2805 EXPORT_SYMBOL(netif_set_xps_queue
);
2808 static void netdev_unbind_all_sb_channels(struct net_device
*dev
)
2810 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2812 /* Unbind any subordinate channels */
2813 while (txq
-- != &dev
->_tx
[0]) {
2815 netdev_unbind_sb_channel(dev
, txq
->sb_dev
);
2819 void netdev_reset_tc(struct net_device
*dev
)
2822 netif_reset_xps_queues_gt(dev
, 0);
2824 netdev_unbind_all_sb_channels(dev
);
2826 /* Reset TC configuration of device */
2828 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2829 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2831 EXPORT_SYMBOL(netdev_reset_tc
);
2833 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2835 if (tc
>= dev
->num_tc
)
2839 netif_reset_xps_queues(dev
, offset
, count
);
2841 dev
->tc_to_txq
[tc
].count
= count
;
2842 dev
->tc_to_txq
[tc
].offset
= offset
;
2845 EXPORT_SYMBOL(netdev_set_tc_queue
);
2847 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2849 if (num_tc
> TC_MAX_QUEUE
)
2853 netif_reset_xps_queues_gt(dev
, 0);
2855 netdev_unbind_all_sb_channels(dev
);
2857 dev
->num_tc
= num_tc
;
2860 EXPORT_SYMBOL(netdev_set_num_tc
);
2862 void netdev_unbind_sb_channel(struct net_device
*dev
,
2863 struct net_device
*sb_dev
)
2865 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2868 netif_reset_xps_queues_gt(sb_dev
, 0);
2870 memset(sb_dev
->tc_to_txq
, 0, sizeof(sb_dev
->tc_to_txq
));
2871 memset(sb_dev
->prio_tc_map
, 0, sizeof(sb_dev
->prio_tc_map
));
2873 while (txq
-- != &dev
->_tx
[0]) {
2874 if (txq
->sb_dev
== sb_dev
)
2878 EXPORT_SYMBOL(netdev_unbind_sb_channel
);
2880 int netdev_bind_sb_channel_queue(struct net_device
*dev
,
2881 struct net_device
*sb_dev
,
2882 u8 tc
, u16 count
, u16 offset
)
2884 /* Make certain the sb_dev and dev are already configured */
2885 if (sb_dev
->num_tc
>= 0 || tc
>= dev
->num_tc
)
2888 /* We cannot hand out queues we don't have */
2889 if ((offset
+ count
) > dev
->real_num_tx_queues
)
2892 /* Record the mapping */
2893 sb_dev
->tc_to_txq
[tc
].count
= count
;
2894 sb_dev
->tc_to_txq
[tc
].offset
= offset
;
2896 /* Provide a way for Tx queue to find the tc_to_txq map or
2897 * XPS map for itself.
2900 netdev_get_tx_queue(dev
, count
+ offset
)->sb_dev
= sb_dev
;
2904 EXPORT_SYMBOL(netdev_bind_sb_channel_queue
);
2906 int netdev_set_sb_channel(struct net_device
*dev
, u16 channel
)
2908 /* Do not use a multiqueue device to represent a subordinate channel */
2909 if (netif_is_multiqueue(dev
))
2912 /* We allow channels 1 - 32767 to be used for subordinate channels.
2913 * Channel 0 is meant to be "native" mode and used only to represent
2914 * the main root device. We allow writing 0 to reset the device back
2915 * to normal mode after being used as a subordinate channel.
2917 if (channel
> S16_MAX
)
2920 dev
->num_tc
= -channel
;
2924 EXPORT_SYMBOL(netdev_set_sb_channel
);
2927 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2928 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2930 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2935 disabling
= txq
< dev
->real_num_tx_queues
;
2937 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2940 if (dev
->reg_state
== NETREG_REGISTERED
||
2941 dev
->reg_state
== NETREG_UNREGISTERING
) {
2944 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2950 netif_setup_tc(dev
, txq
);
2952 dev_qdisc_change_real_num_tx(dev
, txq
);
2954 dev
->real_num_tx_queues
= txq
;
2958 qdisc_reset_all_tx_gt(dev
, txq
);
2960 netif_reset_xps_queues_gt(dev
, txq
);
2964 dev
->real_num_tx_queues
= txq
;
2969 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2973 * netif_set_real_num_rx_queues - set actual number of RX queues used
2974 * @dev: Network device
2975 * @rxq: Actual number of RX queues
2977 * This must be called either with the rtnl_lock held or before
2978 * registration of the net device. Returns 0 on success, or a
2979 * negative error code. If called before registration, it always
2982 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2986 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2989 if (dev
->reg_state
== NETREG_REGISTERED
) {
2992 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2998 dev
->real_num_rx_queues
= rxq
;
3001 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
3005 * netif_set_real_num_queues - set actual number of RX and TX queues used
3006 * @dev: Network device
3007 * @txq: Actual number of TX queues
3008 * @rxq: Actual number of RX queues
3010 * Set the real number of both TX and RX queues.
3011 * Does nothing if the number of queues is already correct.
3013 int netif_set_real_num_queues(struct net_device
*dev
,
3014 unsigned int txq
, unsigned int rxq
)
3016 unsigned int old_rxq
= dev
->real_num_rx_queues
;
3019 if (txq
< 1 || txq
> dev
->num_tx_queues
||
3020 rxq
< 1 || rxq
> dev
->num_rx_queues
)
3023 /* Start from increases, so the error path only does decreases -
3024 * decreases can't fail.
3026 if (rxq
> dev
->real_num_rx_queues
) {
3027 err
= netif_set_real_num_rx_queues(dev
, rxq
);
3031 if (txq
> dev
->real_num_tx_queues
) {
3032 err
= netif_set_real_num_tx_queues(dev
, txq
);
3036 if (rxq
< dev
->real_num_rx_queues
)
3037 WARN_ON(netif_set_real_num_rx_queues(dev
, rxq
));
3038 if (txq
< dev
->real_num_tx_queues
)
3039 WARN_ON(netif_set_real_num_tx_queues(dev
, txq
));
3043 WARN_ON(netif_set_real_num_rx_queues(dev
, old_rxq
));
3046 EXPORT_SYMBOL(netif_set_real_num_queues
);
3049 * netif_set_tso_max_size() - set the max size of TSO frames supported
3050 * @dev: netdev to update
3051 * @size: max skb->len of a TSO frame
3053 * Set the limit on the size of TSO super-frames the device can handle.
3054 * Unless explicitly set the stack will assume the value of
3055 * %GSO_LEGACY_MAX_SIZE.
3057 void netif_set_tso_max_size(struct net_device
*dev
, unsigned int size
)
3059 dev
->tso_max_size
= min(GSO_MAX_SIZE
, size
);
3060 if (size
< READ_ONCE(dev
->gso_max_size
))
3061 netif_set_gso_max_size(dev
, size
);
3062 if (size
< READ_ONCE(dev
->gso_ipv4_max_size
))
3063 netif_set_gso_ipv4_max_size(dev
, size
);
3065 EXPORT_SYMBOL(netif_set_tso_max_size
);
3068 * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3069 * @dev: netdev to update
3070 * @segs: max number of TCP segments
3072 * Set the limit on the number of TCP segments the device can generate from
3073 * a single TSO super-frame.
3074 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3076 void netif_set_tso_max_segs(struct net_device
*dev
, unsigned int segs
)
3078 dev
->tso_max_segs
= segs
;
3079 if (segs
< READ_ONCE(dev
->gso_max_segs
))
3080 netif_set_gso_max_segs(dev
, segs
);
3082 EXPORT_SYMBOL(netif_set_tso_max_segs
);
3085 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3086 * @to: netdev to update
3087 * @from: netdev from which to copy the limits
3089 void netif_inherit_tso_max(struct net_device
*to
, const struct net_device
*from
)
3091 netif_set_tso_max_size(to
, from
->tso_max_size
);
3092 netif_set_tso_max_segs(to
, from
->tso_max_segs
);
3094 EXPORT_SYMBOL(netif_inherit_tso_max
);
3097 * netif_get_num_default_rss_queues - default number of RSS queues
3099 * Default value is the number of physical cores if there are only 1 or 2, or
3100 * divided by 2 if there are more.
3102 int netif_get_num_default_rss_queues(void)
3107 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus
, GFP_KERNEL
)))
3110 cpumask_copy(cpus
, cpu_online_mask
);
3111 for_each_cpu(cpu
, cpus
) {
3113 cpumask_andnot(cpus
, cpus
, topology_sibling_cpumask(cpu
));
3115 free_cpumask_var(cpus
);
3117 return count
> 2 ? DIV_ROUND_UP(count
, 2) : count
;
3119 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
3121 static void __netif_reschedule(struct Qdisc
*q
)
3123 struct softnet_data
*sd
;
3124 unsigned long flags
;
3126 local_irq_save(flags
);
3127 sd
= this_cpu_ptr(&softnet_data
);
3128 q
->next_sched
= NULL
;
3129 *sd
->output_queue_tailp
= q
;
3130 sd
->output_queue_tailp
= &q
->next_sched
;
3131 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
3132 local_irq_restore(flags
);
3135 void __netif_schedule(struct Qdisc
*q
)
3137 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
3138 __netif_reschedule(q
);
3140 EXPORT_SYMBOL(__netif_schedule
);
3142 struct dev_kfree_skb_cb
{
3143 enum skb_drop_reason reason
;
3146 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
3148 return (struct dev_kfree_skb_cb
*)skb
->cb
;
3151 void netif_schedule_queue(struct netdev_queue
*txq
)
3154 if (!netif_xmit_stopped(txq
)) {
3155 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
3157 __netif_schedule(q
);
3161 EXPORT_SYMBOL(netif_schedule_queue
);
3163 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
3165 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
3169 q
= rcu_dereference(dev_queue
->qdisc
);
3170 __netif_schedule(q
);
3174 EXPORT_SYMBOL(netif_tx_wake_queue
);
3176 void dev_kfree_skb_irq_reason(struct sk_buff
*skb
, enum skb_drop_reason reason
)
3178 unsigned long flags
;
3183 if (likely(refcount_read(&skb
->users
) == 1)) {
3185 refcount_set(&skb
->users
, 0);
3186 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
3189 get_kfree_skb_cb(skb
)->reason
= reason
;
3190 local_irq_save(flags
);
3191 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
3192 __this_cpu_write(softnet_data
.completion_queue
, skb
);
3193 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
3194 local_irq_restore(flags
);
3196 EXPORT_SYMBOL(dev_kfree_skb_irq_reason
);
3198 void dev_kfree_skb_any_reason(struct sk_buff
*skb
, enum skb_drop_reason reason
)
3200 if (in_hardirq() || irqs_disabled())
3201 dev_kfree_skb_irq_reason(skb
, reason
);
3203 kfree_skb_reason(skb
, reason
);
3205 EXPORT_SYMBOL(dev_kfree_skb_any_reason
);
3209 * netif_device_detach - mark device as removed
3210 * @dev: network device
3212 * Mark device as removed from system and therefore no longer available.
3214 void netif_device_detach(struct net_device
*dev
)
3216 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
3217 netif_running(dev
)) {
3218 netif_tx_stop_all_queues(dev
);
3221 EXPORT_SYMBOL(netif_device_detach
);
3224 * netif_device_attach - mark device as attached
3225 * @dev: network device
3227 * Mark device as attached from system and restart if needed.
3229 void netif_device_attach(struct net_device
*dev
)
3231 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
3232 netif_running(dev
)) {
3233 netif_tx_wake_all_queues(dev
);
3234 __netdev_watchdog_up(dev
);
3237 EXPORT_SYMBOL(netif_device_attach
);
3240 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3241 * to be used as a distribution range.
3243 static u16
skb_tx_hash(const struct net_device
*dev
,
3244 const struct net_device
*sb_dev
,
3245 struct sk_buff
*skb
)
3249 u16 qcount
= dev
->real_num_tx_queues
;
3252 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
3254 qoffset
= sb_dev
->tc_to_txq
[tc
].offset
;
3255 qcount
= sb_dev
->tc_to_txq
[tc
].count
;
3256 if (unlikely(!qcount
)) {
3257 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3258 sb_dev
->name
, qoffset
, tc
);
3260 qcount
= dev
->real_num_tx_queues
;
3264 if (skb_rx_queue_recorded(skb
)) {
3265 DEBUG_NET_WARN_ON_ONCE(qcount
== 0);
3266 hash
= skb_get_rx_queue(skb
);
3267 if (hash
>= qoffset
)
3269 while (unlikely(hash
>= qcount
))
3271 return hash
+ qoffset
;
3274 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
3277 void skb_warn_bad_offload(const struct sk_buff
*skb
)
3279 static const netdev_features_t null_features
;
3280 struct net_device
*dev
= skb
->dev
;
3281 const char *name
= "";
3283 if (!net_ratelimit())
3287 if (dev
->dev
.parent
)
3288 name
= dev_driver_string(dev
->dev
.parent
);
3290 name
= netdev_name(dev
);
3292 skb_dump(KERN_WARNING
, skb
, false);
3293 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3294 name
, dev
? &dev
->features
: &null_features
,
3295 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
);
3299 * Invalidate hardware checksum when packet is to be mangled, and
3300 * complete checksum manually on outgoing path.
3302 int skb_checksum_help(struct sk_buff
*skb
)
3305 int ret
= 0, offset
;
3307 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
3308 goto out_set_summed
;
3310 if (unlikely(skb_is_gso(skb
))) {
3311 skb_warn_bad_offload(skb
);
3315 if (!skb_frags_readable(skb
)) {
3319 /* Before computing a checksum, we should make sure no frag could
3320 * be modified by an external entity : checksum could be wrong.
3322 if (skb_has_shared_frag(skb
)) {
3323 ret
= __skb_linearize(skb
);
3328 offset
= skb_checksum_start_offset(skb
);
3330 if (unlikely(offset
>= skb_headlen(skb
))) {
3331 DO_ONCE_LITE(skb_dump
, KERN_ERR
, skb
, false);
3332 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
3333 offset
, skb_headlen(skb
));
3336 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
3338 offset
+= skb
->csum_offset
;
3339 if (unlikely(offset
+ sizeof(__sum16
) > skb_headlen(skb
))) {
3340 DO_ONCE_LITE(skb_dump
, KERN_ERR
, skb
, false);
3341 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
3342 offset
+ sizeof(__sum16
), skb_headlen(skb
));
3345 ret
= skb_ensure_writable(skb
, offset
+ sizeof(__sum16
));
3349 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
3351 skb
->ip_summed
= CHECKSUM_NONE
;
3355 EXPORT_SYMBOL(skb_checksum_help
);
3357 int skb_crc32c_csum_help(struct sk_buff
*skb
)
3360 int ret
= 0, offset
, start
;
3362 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3365 if (unlikely(skb_is_gso(skb
)))
3368 /* Before computing a checksum, we should make sure no frag could
3369 * be modified by an external entity : checksum could be wrong.
3371 if (unlikely(skb_has_shared_frag(skb
))) {
3372 ret
= __skb_linearize(skb
);
3376 start
= skb_checksum_start_offset(skb
);
3377 offset
= start
+ offsetof(struct sctphdr
, checksum
);
3378 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
3383 ret
= skb_ensure_writable(skb
, offset
+ sizeof(__le32
));
3387 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
3388 skb
->len
- start
, ~(__u32
)0,
3390 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
3391 skb_reset_csum_not_inet(skb
);
3395 EXPORT_SYMBOL(skb_crc32c_csum_help
);
3397 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
3399 __be16 type
= skb
->protocol
;
3401 /* Tunnel gso handlers can set protocol to ethernet. */
3402 if (type
== htons(ETH_P_TEB
)) {
3405 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
3408 eth
= (struct ethhdr
*)skb
->data
;
3409 type
= eth
->h_proto
;
3412 return vlan_get_protocol_and_depth(skb
, type
, depth
);
3416 /* Take action when hardware reception checksum errors are detected. */
3418 static void do_netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3420 netdev_err(dev
, "hw csum failure\n");
3421 skb_dump(KERN_ERR
, skb
, true);
3425 void netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3427 DO_ONCE_LITE(do_netdev_rx_csum_fault
, dev
, skb
);
3429 EXPORT_SYMBOL(netdev_rx_csum_fault
);
3432 /* XXX: check that highmem exists at all on the given machine. */
3433 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
3435 #ifdef CONFIG_HIGHMEM
3438 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
3439 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3440 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3441 struct page
*page
= skb_frag_page(frag
);
3443 if (page
&& PageHighMem(page
))
3451 /* If MPLS offload request, verify we are testing hardware MPLS features
3452 * instead of standard features for the netdev.
3454 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3455 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3456 netdev_features_t features
,
3459 if (eth_p_mpls(type
))
3460 features
&= skb
->dev
->mpls_features
;
3465 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3466 netdev_features_t features
,
3473 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
3474 netdev_features_t features
)
3478 type
= skb_network_protocol(skb
, NULL
);
3479 features
= net_mpls_features(skb
, features
, type
);
3481 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
3482 !can_checksum_protocol(features
, type
)) {
3483 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3485 if (illegal_highdma(skb
->dev
, skb
))
3486 features
&= ~NETIF_F_SG
;
3491 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
3492 struct net_device
*dev
,
3493 netdev_features_t features
)
3497 EXPORT_SYMBOL(passthru_features_check
);
3499 static netdev_features_t
dflt_features_check(struct sk_buff
*skb
,
3500 struct net_device
*dev
,
3501 netdev_features_t features
)
3503 return vlan_features_check(skb
, features
);
3506 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
3507 struct net_device
*dev
,
3508 netdev_features_t features
)
3510 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
3512 if (gso_segs
> READ_ONCE(dev
->gso_max_segs
))
3513 return features
& ~NETIF_F_GSO_MASK
;
3515 if (unlikely(skb
->len
>= netif_get_gso_max_size(dev
, skb
)))
3516 return features
& ~NETIF_F_GSO_MASK
;
3518 if (!skb_shinfo(skb
)->gso_type
) {
3519 skb_warn_bad_offload(skb
);
3520 return features
& ~NETIF_F_GSO_MASK
;
3523 /* Support for GSO partial features requires software
3524 * intervention before we can actually process the packets
3525 * so we need to strip support for any partial features now
3526 * and we can pull them back in after we have partially
3527 * segmented the frame.
3529 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
3530 features
&= ~dev
->gso_partial_features
;
3532 /* Make sure to clear the IPv4 ID mangling feature if the
3533 * IPv4 header has the potential to be fragmented.
3535 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
3536 struct iphdr
*iph
= skb
->encapsulation
?
3537 inner_ip_hdr(skb
) : ip_hdr(skb
);
3539 if (!(iph
->frag_off
& htons(IP_DF
)))
3540 features
&= ~NETIF_F_TSO_MANGLEID
;
3546 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
3548 struct net_device
*dev
= skb
->dev
;
3549 netdev_features_t features
= dev
->features
;
3551 if (skb_is_gso(skb
))
3552 features
= gso_features_check(skb
, dev
, features
);
3554 /* If encapsulation offload request, verify we are testing
3555 * hardware encapsulation features instead of standard
3556 * features for the netdev
3558 if (skb
->encapsulation
)
3559 features
&= dev
->hw_enc_features
;
3561 if (skb_vlan_tagged(skb
))
3562 features
= netdev_intersect_features(features
,
3563 dev
->vlan_features
|
3564 NETIF_F_HW_VLAN_CTAG_TX
|
3565 NETIF_F_HW_VLAN_STAG_TX
);
3567 if (dev
->netdev_ops
->ndo_features_check
)
3568 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3571 features
&= dflt_features_check(skb
, dev
, features
);
3573 return harmonize_features(skb
, features
);
3575 EXPORT_SYMBOL(netif_skb_features
);
3577 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3578 struct netdev_queue
*txq
, bool more
)
3583 if (dev_nit_active(dev
))
3584 dev_queue_xmit_nit(skb
, dev
);
3587 trace_net_dev_start_xmit(skb
, dev
);
3588 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3589 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3594 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3595 struct netdev_queue
*txq
, int *ret
)
3597 struct sk_buff
*skb
= first
;
3598 int rc
= NETDEV_TX_OK
;
3601 struct sk_buff
*next
= skb
->next
;
3603 skb_mark_not_on_list(skb
);
3604 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3605 if (unlikely(!dev_xmit_complete(rc
))) {
3611 if (netif_tx_queue_stopped(txq
) && skb
) {
3612 rc
= NETDEV_TX_BUSY
;
3622 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3623 netdev_features_t features
)
3625 if (skb_vlan_tag_present(skb
) &&
3626 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3627 skb
= __vlan_hwaccel_push_inside(skb
);
3631 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3632 const netdev_features_t features
)
3634 if (unlikely(skb_csum_is_sctp(skb
)))
3635 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3636 skb_crc32c_csum_help(skb
);
3638 if (features
& NETIF_F_HW_CSUM
)
3641 if (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
3642 switch (skb
->csum_offset
) {
3643 case offsetof(struct tcphdr
, check
):
3644 case offsetof(struct udphdr
, check
):
3649 return skb_checksum_help(skb
);
3651 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3653 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3655 netdev_features_t features
;
3657 features
= netif_skb_features(skb
);
3658 skb
= validate_xmit_vlan(skb
, features
);
3662 skb
= sk_validate_xmit_skb(skb
, dev
);
3666 if (netif_needs_gso(skb
, features
)) {
3667 struct sk_buff
*segs
;
3669 segs
= skb_gso_segment(skb
, features
);
3677 if (skb_needs_linearize(skb
, features
) &&
3678 __skb_linearize(skb
))
3681 /* If packet is not checksummed and device does not
3682 * support checksumming for this protocol, complete
3683 * checksumming here.
3685 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3686 if (skb
->encapsulation
)
3687 skb_set_inner_transport_header(skb
,
3688 skb_checksum_start_offset(skb
));
3690 skb_set_transport_header(skb
,
3691 skb_checksum_start_offset(skb
));
3692 if (skb_csum_hwoffload_help(skb
, features
))
3697 skb
= validate_xmit_xfrm(skb
, features
, again
);
3704 dev_core_stats_tx_dropped_inc(dev
);
3708 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3710 struct sk_buff
*next
, *head
= NULL
, *tail
;
3712 for (; skb
!= NULL
; skb
= next
) {
3714 skb_mark_not_on_list(skb
);
3716 /* in case skb won't be segmented, point to itself */
3719 skb
= validate_xmit_skb(skb
, dev
, again
);
3727 /* If skb was segmented, skb->prev points to
3728 * the last segment. If not, it still contains skb.
3734 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3736 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3738 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3740 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3742 /* To get more precise estimation of bytes sent on wire,
3743 * we add to pkt_len the headers size of all segments
3745 if (shinfo
->gso_size
&& skb_transport_header_was_set(skb
)) {
3746 u16 gso_segs
= shinfo
->gso_segs
;
3747 unsigned int hdr_len
;
3749 /* mac layer + network layer */
3750 hdr_len
= skb_transport_offset(skb
);
3752 /* + transport layer */
3753 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3754 const struct tcphdr
*th
;
3755 struct tcphdr _tcphdr
;
3757 th
= skb_header_pointer(skb
, hdr_len
,
3758 sizeof(_tcphdr
), &_tcphdr
);
3760 hdr_len
+= __tcp_hdrlen(th
);
3761 } else if (shinfo
->gso_type
& SKB_GSO_UDP_L4
) {
3762 struct udphdr _udphdr
;
3764 if (skb_header_pointer(skb
, hdr_len
,
3765 sizeof(_udphdr
), &_udphdr
))
3766 hdr_len
+= sizeof(struct udphdr
);
3769 if (unlikely(shinfo
->gso_type
& SKB_GSO_DODGY
)) {
3770 int payload
= skb
->len
- hdr_len
;
3772 /* Malicious packet. */
3775 gso_segs
= DIV_ROUND_UP(payload
, shinfo
->gso_size
);
3777 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3781 static int dev_qdisc_enqueue(struct sk_buff
*skb
, struct Qdisc
*q
,
3782 struct sk_buff
**to_free
,
3783 struct netdev_queue
*txq
)
3787 rc
= q
->enqueue(skb
, q
, to_free
) & NET_XMIT_MASK
;
3788 if (rc
== NET_XMIT_SUCCESS
)
3789 trace_qdisc_enqueue(q
, txq
, skb
);
3793 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3794 struct net_device
*dev
,
3795 struct netdev_queue
*txq
)
3797 spinlock_t
*root_lock
= qdisc_lock(q
);
3798 struct sk_buff
*to_free
= NULL
;
3802 qdisc_calculate_pkt_len(skb
, q
);
3804 tcf_set_drop_reason(skb
, SKB_DROP_REASON_QDISC_DROP
);
3806 if (q
->flags
& TCQ_F_NOLOCK
) {
3807 if (q
->flags
& TCQ_F_CAN_BYPASS
&& nolock_qdisc_is_empty(q
) &&
3808 qdisc_run_begin(q
)) {
3809 /* Retest nolock_qdisc_is_empty() within the protection
3810 * of q->seqlock to protect from racing with requeuing.
3812 if (unlikely(!nolock_qdisc_is_empty(q
))) {
3813 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3820 qdisc_bstats_cpu_update(q
, skb
);
3821 if (sch_direct_xmit(skb
, q
, dev
, txq
, NULL
, true) &&
3822 !nolock_qdisc_is_empty(q
))
3826 return NET_XMIT_SUCCESS
;
3829 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3833 if (unlikely(to_free
))
3834 kfree_skb_list_reason(to_free
,
3835 tcf_get_drop_reason(to_free
));
3839 if (unlikely(READ_ONCE(q
->owner
) == smp_processor_id())) {
3840 kfree_skb_reason(skb
, SKB_DROP_REASON_TC_RECLASSIFY_LOOP
);
3841 return NET_XMIT_DROP
;
3844 * Heuristic to force contended enqueues to serialize on a
3845 * separate lock before trying to get qdisc main lock.
3846 * This permits qdisc->running owner to get the lock more
3847 * often and dequeue packets faster.
3848 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3849 * and then other tasks will only enqueue packets. The packets will be
3850 * sent after the qdisc owner is scheduled again. To prevent this
3851 * scenario the task always serialize on the lock.
3853 contended
= qdisc_is_running(q
) || IS_ENABLED(CONFIG_PREEMPT_RT
);
3854 if (unlikely(contended
))
3855 spin_lock(&q
->busylock
);
3857 spin_lock(root_lock
);
3858 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3859 __qdisc_drop(skb
, &to_free
);
3861 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3862 qdisc_run_begin(q
)) {
3864 * This is a work-conserving queue; there are no old skbs
3865 * waiting to be sent out; and the qdisc is not running -
3866 * xmit the skb directly.
3869 qdisc_bstats_update(q
, skb
);
3871 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3872 if (unlikely(contended
)) {
3873 spin_unlock(&q
->busylock
);
3880 rc
= NET_XMIT_SUCCESS
;
3882 WRITE_ONCE(q
->owner
, smp_processor_id());
3883 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3884 WRITE_ONCE(q
->owner
, -1);
3885 if (qdisc_run_begin(q
)) {
3886 if (unlikely(contended
)) {
3887 spin_unlock(&q
->busylock
);
3894 spin_unlock(root_lock
);
3895 if (unlikely(to_free
))
3896 kfree_skb_list_reason(to_free
,
3897 tcf_get_drop_reason(to_free
));
3898 if (unlikely(contended
))
3899 spin_unlock(&q
->busylock
);
3903 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3904 static void skb_update_prio(struct sk_buff
*skb
)
3906 const struct netprio_map
*map
;
3907 const struct sock
*sk
;
3908 unsigned int prioidx
;
3912 map
= rcu_dereference_bh(skb
->dev
->priomap
);
3915 sk
= skb_to_full_sk(skb
);
3919 prioidx
= sock_cgroup_prioidx(&sk
->sk_cgrp_data
);
3921 if (prioidx
< map
->priomap_len
)
3922 skb
->priority
= map
->priomap
[prioidx
];
3925 #define skb_update_prio(skb)
3929 * dev_loopback_xmit - loop back @skb
3930 * @net: network namespace this loopback is happening in
3931 * @sk: sk needed to be a netfilter okfn
3932 * @skb: buffer to transmit
3934 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3936 skb_reset_mac_header(skb
);
3937 __skb_pull(skb
, skb_network_offset(skb
));
3938 skb
->pkt_type
= PACKET_LOOPBACK
;
3939 if (skb
->ip_summed
== CHECKSUM_NONE
)
3940 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3941 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb
));
3946 EXPORT_SYMBOL(dev_loopback_xmit
);
3948 #ifdef CONFIG_NET_EGRESS
3949 static struct netdev_queue
*
3950 netdev_tx_queue_mapping(struct net_device
*dev
, struct sk_buff
*skb
)
3952 int qm
= skb_get_queue_mapping(skb
);
3954 return netdev_get_tx_queue(dev
, netdev_cap_txqueue(dev
, qm
));
3957 #ifndef CONFIG_PREEMPT_RT
3958 static bool netdev_xmit_txqueue_skipped(void)
3960 return __this_cpu_read(softnet_data
.xmit
.skip_txqueue
);
3963 void netdev_xmit_skip_txqueue(bool skip
)
3965 __this_cpu_write(softnet_data
.xmit
.skip_txqueue
, skip
);
3967 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue
);
3970 static bool netdev_xmit_txqueue_skipped(void)
3972 return current
->net_xmit
.skip_txqueue
;
3975 void netdev_xmit_skip_txqueue(bool skip
)
3977 current
->net_xmit
.skip_txqueue
= skip
;
3979 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue
);
3981 #endif /* CONFIG_NET_EGRESS */
3983 #ifdef CONFIG_NET_XGRESS
3984 static int tc_run(struct tcx_entry
*entry
, struct sk_buff
*skb
,
3985 enum skb_drop_reason
*drop_reason
)
3987 int ret
= TC_ACT_UNSPEC
;
3988 #ifdef CONFIG_NET_CLS_ACT
3989 struct mini_Qdisc
*miniq
= rcu_dereference_bh(entry
->miniq
);
3990 struct tcf_result res
;
3995 if (static_branch_unlikely(&tcf_bypass_check_needed_key
)) {
3996 if (tcf_block_bypass_sw(miniq
->block
))
4000 tc_skb_cb(skb
)->mru
= 0;
4001 tc_skb_cb(skb
)->post_ct
= false;
4002 tcf_set_drop_reason(skb
, *drop_reason
);
4004 mini_qdisc_bstats_cpu_update(miniq
, skb
);
4005 ret
= tcf_classify(skb
, miniq
->block
, miniq
->filter_list
, &res
, false);
4006 /* Only tcf related quirks below. */
4009 *drop_reason
= tcf_get_drop_reason(skb
);
4010 mini_qdisc_qstats_cpu_drop(miniq
);
4013 case TC_ACT_RECLASSIFY
:
4014 skb
->tc_index
= TC_H_MIN(res
.classid
);
4017 #endif /* CONFIG_NET_CLS_ACT */
4021 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key
);
4025 static_branch_inc(&tcx_needed_key
);
4030 static_branch_dec(&tcx_needed_key
);
4033 static __always_inline
enum tcx_action_base
4034 tcx_run(const struct bpf_mprog_entry
*entry
, struct sk_buff
*skb
,
4035 const bool needs_mac
)
4037 const struct bpf_mprog_fp
*fp
;
4038 const struct bpf_prog
*prog
;
4042 __skb_push(skb
, skb
->mac_len
);
4043 bpf_mprog_foreach_prog(entry
, fp
, prog
) {
4044 bpf_compute_data_pointers(skb
);
4045 ret
= bpf_prog_run(prog
, skb
);
4046 if (ret
!= TCX_NEXT
)
4050 __skb_pull(skb
, skb
->mac_len
);
4051 return tcx_action_code(skb
, ret
);
4054 static __always_inline
struct sk_buff
*
4055 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4056 struct net_device
*orig_dev
, bool *another
)
4058 struct bpf_mprog_entry
*entry
= rcu_dereference_bh(skb
->dev
->tcx_ingress
);
4059 enum skb_drop_reason drop_reason
= SKB_DROP_REASON_TC_INGRESS
;
4060 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
4066 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
4068 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4072 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4073 tcx_set_ingress(skb
, true);
4075 if (static_branch_unlikely(&tcx_needed_key
)) {
4076 sch_ret
= tcx_run(entry
, skb
, true);
4077 if (sch_ret
!= TC_ACT_UNSPEC
)
4078 goto ingress_verdict
;
4080 sch_ret
= tc_run(tcx_entry(entry
), skb
, &drop_reason
);
4083 case TC_ACT_REDIRECT
:
4084 /* skb_mac_header check was done by BPF, so we can safely
4085 * push the L2 header back before redirecting to another
4088 __skb_push(skb
, skb
->mac_len
);
4089 if (skb_do_redirect(skb
) == -EAGAIN
) {
4090 __skb_pull(skb
, skb
->mac_len
);
4094 *ret
= NET_RX_SUCCESS
;
4095 bpf_net_ctx_clear(bpf_net_ctx
);
4098 kfree_skb_reason(skb
, drop_reason
);
4100 bpf_net_ctx_clear(bpf_net_ctx
);
4102 /* used by tc_run */
4108 case TC_ACT_CONSUMED
:
4109 *ret
= NET_RX_SUCCESS
;
4110 bpf_net_ctx_clear(bpf_net_ctx
);
4113 bpf_net_ctx_clear(bpf_net_ctx
);
4118 static __always_inline
struct sk_buff
*
4119 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
4121 struct bpf_mprog_entry
*entry
= rcu_dereference_bh(dev
->tcx_egress
);
4122 enum skb_drop_reason drop_reason
= SKB_DROP_REASON_TC_EGRESS
;
4123 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
4129 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
4131 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4132 * already set by the caller.
4134 if (static_branch_unlikely(&tcx_needed_key
)) {
4135 sch_ret
= tcx_run(entry
, skb
, false);
4136 if (sch_ret
!= TC_ACT_UNSPEC
)
4137 goto egress_verdict
;
4139 sch_ret
= tc_run(tcx_entry(entry
), skb
, &drop_reason
);
4142 case TC_ACT_REDIRECT
:
4143 /* No need to push/pop skb's mac_header here on egress! */
4144 skb_do_redirect(skb
);
4145 *ret
= NET_XMIT_SUCCESS
;
4146 bpf_net_ctx_clear(bpf_net_ctx
);
4149 kfree_skb_reason(skb
, drop_reason
);
4150 *ret
= NET_XMIT_DROP
;
4151 bpf_net_ctx_clear(bpf_net_ctx
);
4153 /* used by tc_run */
4159 case TC_ACT_CONSUMED
:
4160 *ret
= NET_XMIT_SUCCESS
;
4161 bpf_net_ctx_clear(bpf_net_ctx
);
4164 bpf_net_ctx_clear(bpf_net_ctx
);
4169 static __always_inline
struct sk_buff
*
4170 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4171 struct net_device
*orig_dev
, bool *another
)
4176 static __always_inline
struct sk_buff
*
4177 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
4181 #endif /* CONFIG_NET_XGRESS */
4184 static int __get_xps_queue_idx(struct net_device
*dev
, struct sk_buff
*skb
,
4185 struct xps_dev_maps
*dev_maps
, unsigned int tci
)
4187 int tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
4188 struct xps_map
*map
;
4189 int queue_index
= -1;
4191 if (tc
>= dev_maps
->num_tc
|| tci
>= dev_maps
->nr_ids
)
4194 tci
*= dev_maps
->num_tc
;
4197 map
= rcu_dereference(dev_maps
->attr_map
[tci
]);
4200 queue_index
= map
->queues
[0];
4202 queue_index
= map
->queues
[reciprocal_scale(
4203 skb_get_hash(skb
), map
->len
)];
4204 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
4211 static int get_xps_queue(struct net_device
*dev
, struct net_device
*sb_dev
,
4212 struct sk_buff
*skb
)
4215 struct xps_dev_maps
*dev_maps
;
4216 struct sock
*sk
= skb
->sk
;
4217 int queue_index
= -1;
4219 if (!static_key_false(&xps_needed
))
4223 if (!static_key_false(&xps_rxqs_needed
))
4226 dev_maps
= rcu_dereference(sb_dev
->xps_maps
[XPS_RXQS
]);
4228 int tci
= sk_rx_queue_get(sk
);
4231 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
4236 if (queue_index
< 0) {
4237 dev_maps
= rcu_dereference(sb_dev
->xps_maps
[XPS_CPUS
]);
4239 unsigned int tci
= skb
->sender_cpu
- 1;
4241 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
4253 u16
dev_pick_tx_zero(struct net_device
*dev
, struct sk_buff
*skb
,
4254 struct net_device
*sb_dev
)
4258 EXPORT_SYMBOL(dev_pick_tx_zero
);
4260 u16
netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
,
4261 struct net_device
*sb_dev
)
4263 struct sock
*sk
= skb
->sk
;
4264 int queue_index
= sk_tx_queue_get(sk
);
4266 sb_dev
= sb_dev
? : dev
;
4268 if (queue_index
< 0 || skb
->ooo_okay
||
4269 queue_index
>= dev
->real_num_tx_queues
) {
4270 int new_index
= get_xps_queue(dev
, sb_dev
, skb
);
4273 new_index
= skb_tx_hash(dev
, sb_dev
, skb
);
4275 if (queue_index
!= new_index
&& sk
&&
4277 rcu_access_pointer(sk
->sk_dst_cache
))
4278 sk_tx_queue_set(sk
, new_index
);
4280 queue_index
= new_index
;
4285 EXPORT_SYMBOL(netdev_pick_tx
);
4287 struct netdev_queue
*netdev_core_pick_tx(struct net_device
*dev
,
4288 struct sk_buff
*skb
,
4289 struct net_device
*sb_dev
)
4291 int queue_index
= 0;
4294 u32 sender_cpu
= skb
->sender_cpu
- 1;
4296 if (sender_cpu
>= (u32
)NR_CPUS
)
4297 skb
->sender_cpu
= raw_smp_processor_id() + 1;
4300 if (dev
->real_num_tx_queues
!= 1) {
4301 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4303 if (ops
->ndo_select_queue
)
4304 queue_index
= ops
->ndo_select_queue(dev
, skb
, sb_dev
);
4306 queue_index
= netdev_pick_tx(dev
, skb
, sb_dev
);
4308 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
4311 skb_set_queue_mapping(skb
, queue_index
);
4312 return netdev_get_tx_queue(dev
, queue_index
);
4316 * __dev_queue_xmit() - transmit a buffer
4317 * @skb: buffer to transmit
4318 * @sb_dev: suboordinate device used for L2 forwarding offload
4320 * Queue a buffer for transmission to a network device. The caller must
4321 * have set the device and priority and built the buffer before calling
4322 * this function. The function can be called from an interrupt.
4324 * When calling this method, interrupts MUST be enabled. This is because
4325 * the BH enable code must have IRQs enabled so that it will not deadlock.
4327 * Regardless of the return value, the skb is consumed, so it is currently
4328 * difficult to retry a send to this method. (You can bump the ref count
4329 * before sending to hold a reference for retry if you are careful.)
4332 * * 0 - buffer successfully transmitted
4333 * * positive qdisc return code - NET_XMIT_DROP etc.
4334 * * negative errno - other errors
4336 int __dev_queue_xmit(struct sk_buff
*skb
, struct net_device
*sb_dev
)
4338 struct net_device
*dev
= skb
->dev
;
4339 struct netdev_queue
*txq
= NULL
;
4344 skb_reset_mac_header(skb
);
4345 skb_assert_len(skb
);
4347 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
4348 __skb_tstamp_tx(skb
, NULL
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
4350 /* Disable soft irqs for various locks below. Also
4351 * stops preemption for RCU.
4355 skb_update_prio(skb
);
4357 qdisc_pkt_len_init(skb
);
4358 tcx_set_ingress(skb
, false);
4359 #ifdef CONFIG_NET_EGRESS
4360 if (static_branch_unlikely(&egress_needed_key
)) {
4361 if (nf_hook_egress_active()) {
4362 skb
= nf_hook_egress(skb
, &rc
, dev
);
4367 netdev_xmit_skip_txqueue(false);
4369 nf_skip_egress(skb
, true);
4370 skb
= sch_handle_egress(skb
, &rc
, dev
);
4373 nf_skip_egress(skb
, false);
4375 if (netdev_xmit_txqueue_skipped())
4376 txq
= netdev_tx_queue_mapping(dev
, skb
);
4379 /* If device/qdisc don't need skb->dst, release it right now while
4380 * its hot in this cpu cache.
4382 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
4388 txq
= netdev_core_pick_tx(dev
, skb
, sb_dev
);
4390 q
= rcu_dereference_bh(txq
->qdisc
);
4392 trace_net_dev_queue(skb
);
4394 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
4398 /* The device has no queue. Common case for software devices:
4399 * loopback, all the sorts of tunnels...
4401 * Really, it is unlikely that netif_tx_lock protection is necessary
4402 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4404 * However, it is possible, that they rely on protection
4407 * Check this and shot the lock. It is not prone from deadlocks.
4408 *Either shot noqueue qdisc, it is even simpler 8)
4410 if (dev
->flags
& IFF_UP
) {
4411 int cpu
= smp_processor_id(); /* ok because BHs are off */
4413 /* Other cpus might concurrently change txq->xmit_lock_owner
4414 * to -1 or to their cpu id, but not to our id.
4416 if (READ_ONCE(txq
->xmit_lock_owner
) != cpu
) {
4417 if (dev_xmit_recursion())
4418 goto recursion_alert
;
4420 skb
= validate_xmit_skb(skb
, dev
, &again
);
4424 HARD_TX_LOCK(dev
, txq
, cpu
);
4426 if (!netif_xmit_stopped(txq
)) {
4427 dev_xmit_recursion_inc();
4428 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
4429 dev_xmit_recursion_dec();
4430 if (dev_xmit_complete(rc
)) {
4431 HARD_TX_UNLOCK(dev
, txq
);
4435 HARD_TX_UNLOCK(dev
, txq
);
4436 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4439 /* Recursion is detected! It is possible,
4443 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4449 rcu_read_unlock_bh();
4451 dev_core_stats_tx_dropped_inc(dev
);
4452 kfree_skb_list(skb
);
4455 rcu_read_unlock_bh();
4458 EXPORT_SYMBOL(__dev_queue_xmit
);
4460 int __dev_direct_xmit(struct sk_buff
*skb
, u16 queue_id
)
4462 struct net_device
*dev
= skb
->dev
;
4463 struct sk_buff
*orig_skb
= skb
;
4464 struct netdev_queue
*txq
;
4465 int ret
= NETDEV_TX_BUSY
;
4468 if (unlikely(!netif_running(dev
) ||
4469 !netif_carrier_ok(dev
)))
4472 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
4473 if (skb
!= orig_skb
)
4476 skb_set_queue_mapping(skb
, queue_id
);
4477 txq
= skb_get_tx_queue(dev
, skb
);
4481 dev_xmit_recursion_inc();
4482 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
4483 if (!netif_xmit_frozen_or_drv_stopped(txq
))
4484 ret
= netdev_start_xmit(skb
, dev
, txq
, false);
4485 HARD_TX_UNLOCK(dev
, txq
);
4486 dev_xmit_recursion_dec();
4491 dev_core_stats_tx_dropped_inc(dev
);
4492 kfree_skb_list(skb
);
4493 return NET_XMIT_DROP
;
4495 EXPORT_SYMBOL(__dev_direct_xmit
);
4497 /*************************************************************************
4499 *************************************************************************/
4500 static DEFINE_PER_CPU(struct task_struct
*, backlog_napi
);
4502 int weight_p __read_mostly
= 64; /* old backlog weight */
4503 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
4504 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
4506 /* Called with irq disabled */
4507 static inline void ____napi_schedule(struct softnet_data
*sd
,
4508 struct napi_struct
*napi
)
4510 struct task_struct
*thread
;
4512 lockdep_assert_irqs_disabled();
4514 if (test_bit(NAPI_STATE_THREADED
, &napi
->state
)) {
4515 /* Paired with smp_mb__before_atomic() in
4516 * napi_enable()/dev_set_threaded().
4517 * Use READ_ONCE() to guarantee a complete
4518 * read on napi->thread. Only call
4519 * wake_up_process() when it's not NULL.
4521 thread
= READ_ONCE(napi
->thread
);
4523 if (use_backlog_threads() && thread
== raw_cpu_read(backlog_napi
))
4524 goto use_local_napi
;
4526 set_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
);
4527 wake_up_process(thread
);
4533 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
4534 WRITE_ONCE(napi
->list_owner
, smp_processor_id());
4535 /* If not called from net_rx_action()
4536 * we have to raise NET_RX_SOFTIRQ.
4538 if (!sd
->in_net_rx_action
)
4539 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4544 struct static_key_false rps_needed __read_mostly
;
4545 EXPORT_SYMBOL(rps_needed
);
4546 struct static_key_false rfs_needed __read_mostly
;
4547 EXPORT_SYMBOL(rfs_needed
);
4549 static struct rps_dev_flow
*
4550 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4551 struct rps_dev_flow
*rflow
, u16 next_cpu
)
4553 if (next_cpu
< nr_cpu_ids
) {
4555 #ifdef CONFIG_RFS_ACCEL
4556 struct netdev_rx_queue
*rxqueue
;
4557 struct rps_dev_flow_table
*flow_table
;
4558 struct rps_dev_flow
*old_rflow
;
4563 /* Should we steer this flow to a different hardware queue? */
4564 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
4565 !(dev
->features
& NETIF_F_NTUPLE
))
4567 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
4568 if (rxq_index
== skb_get_rx_queue(skb
))
4571 rxqueue
= dev
->_rx
+ rxq_index
;
4572 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4575 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
4576 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
4577 rxq_index
, flow_id
);
4581 rflow
= &flow_table
->flows
[flow_id
];
4582 WRITE_ONCE(rflow
->filter
, rc
);
4583 if (old_rflow
->filter
== rc
)
4584 WRITE_ONCE(old_rflow
->filter
, RPS_NO_FILTER
);
4587 head
= READ_ONCE(per_cpu(softnet_data
, next_cpu
).input_queue_head
);
4588 rps_input_queue_tail_save(&rflow
->last_qtail
, head
);
4591 WRITE_ONCE(rflow
->cpu
, next_cpu
);
4596 * get_rps_cpu is called from netif_receive_skb and returns the target
4597 * CPU from the RPS map of the receiving queue for a given skb.
4598 * rcu_read_lock must be held on entry.
4600 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4601 struct rps_dev_flow
**rflowp
)
4603 const struct rps_sock_flow_table
*sock_flow_table
;
4604 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
4605 struct rps_dev_flow_table
*flow_table
;
4606 struct rps_map
*map
;
4611 if (skb_rx_queue_recorded(skb
)) {
4612 u16 index
= skb_get_rx_queue(skb
);
4614 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4615 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4616 "%s received packet on queue %u, but number "
4617 "of RX queues is %u\n",
4618 dev
->name
, index
, dev
->real_num_rx_queues
);
4624 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4626 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4627 map
= rcu_dereference(rxqueue
->rps_map
);
4628 if (!flow_table
&& !map
)
4631 skb_reset_network_header(skb
);
4632 hash
= skb_get_hash(skb
);
4636 sock_flow_table
= rcu_dereference(net_hotdata
.rps_sock_flow_table
);
4637 if (flow_table
&& sock_flow_table
) {
4638 struct rps_dev_flow
*rflow
;
4642 /* First check into global flow table if there is a match.
4643 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4645 ident
= READ_ONCE(sock_flow_table
->ents
[hash
& sock_flow_table
->mask
]);
4646 if ((ident
^ hash
) & ~net_hotdata
.rps_cpu_mask
)
4649 next_cpu
= ident
& net_hotdata
.rps_cpu_mask
;
4651 /* OK, now we know there is a match,
4652 * we can look at the local (per receive queue) flow table
4654 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
4658 * If the desired CPU (where last recvmsg was done) is
4659 * different from current CPU (one in the rx-queue flow
4660 * table entry), switch if one of the following holds:
4661 * - Current CPU is unset (>= nr_cpu_ids).
4662 * - Current CPU is offline.
4663 * - The current CPU's queue tail has advanced beyond the
4664 * last packet that was enqueued using this table entry.
4665 * This guarantees that all previous packets for the flow
4666 * have been dequeued, thus preserving in order delivery.
4668 if (unlikely(tcpu
!= next_cpu
) &&
4669 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
4670 ((int)(READ_ONCE(per_cpu(softnet_data
, tcpu
).input_queue_head
) -
4671 rflow
->last_qtail
)) >= 0)) {
4673 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
4676 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
4686 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
4687 if (cpu_online(tcpu
)) {
4697 #ifdef CONFIG_RFS_ACCEL
4700 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4701 * @dev: Device on which the filter was set
4702 * @rxq_index: RX queue index
4703 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4704 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4706 * Drivers that implement ndo_rx_flow_steer() should periodically call
4707 * this function for each installed filter and remove the filters for
4708 * which it returns %true.
4710 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
4711 u32 flow_id
, u16 filter_id
)
4713 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
4714 struct rps_dev_flow_table
*flow_table
;
4715 struct rps_dev_flow
*rflow
;
4720 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4721 if (flow_table
&& flow_id
<= flow_table
->mask
) {
4722 rflow
= &flow_table
->flows
[flow_id
];
4723 cpu
= READ_ONCE(rflow
->cpu
);
4724 if (READ_ONCE(rflow
->filter
) == filter_id
&& cpu
< nr_cpu_ids
&&
4725 ((int)(READ_ONCE(per_cpu(softnet_data
, cpu
).input_queue_head
) -
4726 READ_ONCE(rflow
->last_qtail
)) <
4727 (int)(10 * flow_table
->mask
)))
4733 EXPORT_SYMBOL(rps_may_expire_flow
);
4735 #endif /* CONFIG_RFS_ACCEL */
4737 /* Called from hardirq (IPI) context */
4738 static void rps_trigger_softirq(void *data
)
4740 struct softnet_data
*sd
= data
;
4742 ____napi_schedule(sd
, &sd
->backlog
);
4746 #endif /* CONFIG_RPS */
4748 /* Called from hardirq (IPI) context */
4749 static void trigger_rx_softirq(void *data
)
4751 struct softnet_data
*sd
= data
;
4753 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4754 smp_store_release(&sd
->defer_ipi_scheduled
, 0);
4758 * After we queued a packet into sd->input_pkt_queue,
4759 * we need to make sure this queue is serviced soon.
4761 * - If this is another cpu queue, link it to our rps_ipi_list,
4762 * and make sure we will process rps_ipi_list from net_rx_action().
4764 * - If this is our own queue, NAPI schedule our backlog.
4765 * Note that this also raises NET_RX_SOFTIRQ.
4767 static void napi_schedule_rps(struct softnet_data
*sd
)
4769 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
4773 if (use_backlog_threads()) {
4774 __napi_schedule_irqoff(&sd
->backlog
);
4778 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
4779 mysd
->rps_ipi_list
= sd
;
4781 /* If not called from net_rx_action() or napi_threaded_poll()
4782 * we have to raise NET_RX_SOFTIRQ.
4784 if (!mysd
->in_net_rx_action
&& !mysd
->in_napi_threaded_poll
)
4785 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4788 #endif /* CONFIG_RPS */
4789 __napi_schedule_irqoff(&mysd
->backlog
);
4792 void kick_defer_list_purge(struct softnet_data
*sd
, unsigned int cpu
)
4794 unsigned long flags
;
4796 if (use_backlog_threads()) {
4797 backlog_lock_irq_save(sd
, &flags
);
4799 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
))
4800 __napi_schedule_irqoff(&sd
->backlog
);
4802 backlog_unlock_irq_restore(sd
, &flags
);
4804 } else if (!cmpxchg(&sd
->defer_ipi_scheduled
, 0, 1)) {
4805 smp_call_function_single_async(cpu
, &sd
->defer_csd
);
4809 #ifdef CONFIG_NET_FLOW_LIMIT
4810 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
4813 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
4815 #ifdef CONFIG_NET_FLOW_LIMIT
4816 struct sd_flow_limit
*fl
;
4817 struct softnet_data
*sd
;
4818 unsigned int old_flow
, new_flow
;
4820 if (qlen
< (READ_ONCE(net_hotdata
.max_backlog
) >> 1))
4823 sd
= this_cpu_ptr(&softnet_data
);
4826 fl
= rcu_dereference(sd
->flow_limit
);
4828 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
4829 old_flow
= fl
->history
[fl
->history_head
];
4830 fl
->history
[fl
->history_head
] = new_flow
;
4833 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
4835 if (likely(fl
->buckets
[old_flow
]))
4836 fl
->buckets
[old_flow
]--;
4838 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
4850 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4851 * queue (may be a remote CPU queue).
4853 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
4854 unsigned int *qtail
)
4856 enum skb_drop_reason reason
;
4857 struct softnet_data
*sd
;
4858 unsigned long flags
;
4863 reason
= SKB_DROP_REASON_DEV_READY
;
4864 if (!netif_running(skb
->dev
))
4867 reason
= SKB_DROP_REASON_CPU_BACKLOG
;
4868 sd
= &per_cpu(softnet_data
, cpu
);
4870 qlen
= skb_queue_len_lockless(&sd
->input_pkt_queue
);
4871 max_backlog
= READ_ONCE(net_hotdata
.max_backlog
);
4872 if (unlikely(qlen
> max_backlog
))
4873 goto cpu_backlog_drop
;
4874 backlog_lock_irq_save(sd
, &flags
);
4875 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
4876 if (qlen
<= max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
4878 /* Schedule NAPI for backlog device. We can use
4879 * non atomic operation as we own the queue lock.
4881 if (!__test_and_set_bit(NAPI_STATE_SCHED
,
4882 &sd
->backlog
.state
))
4883 napi_schedule_rps(sd
);
4885 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
4886 tail
= rps_input_queue_tail_incr(sd
);
4887 backlog_unlock_irq_restore(sd
, &flags
);
4889 /* save the tail outside of the critical section */
4890 rps_input_queue_tail_save(qtail
, tail
);
4891 return NET_RX_SUCCESS
;
4894 backlog_unlock_irq_restore(sd
, &flags
);
4897 atomic_inc(&sd
->dropped
);
4899 dev_core_stats_rx_dropped_inc(skb
->dev
);
4900 kfree_skb_reason(skb
, reason
);
4904 static struct netdev_rx_queue
*netif_get_rxqueue(struct sk_buff
*skb
)
4906 struct net_device
*dev
= skb
->dev
;
4907 struct netdev_rx_queue
*rxqueue
;
4911 if (skb_rx_queue_recorded(skb
)) {
4912 u16 index
= skb_get_rx_queue(skb
);
4914 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4915 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4916 "%s received packet on queue %u, but number "
4917 "of RX queues is %u\n",
4918 dev
->name
, index
, dev
->real_num_rx_queues
);
4920 return rxqueue
; /* Return first rxqueue */
4927 u32
bpf_prog_run_generic_xdp(struct sk_buff
*skb
, struct xdp_buff
*xdp
,
4928 struct bpf_prog
*xdp_prog
)
4930 void *orig_data
, *orig_data_end
, *hard_start
;
4931 struct netdev_rx_queue
*rxqueue
;
4932 bool orig_bcast
, orig_host
;
4933 u32 mac_len
, frame_sz
;
4934 __be16 orig_eth_type
;
4939 /* The XDP program wants to see the packet starting at the MAC
4942 mac_len
= skb
->data
- skb_mac_header(skb
);
4943 hard_start
= skb
->data
- skb_headroom(skb
);
4945 /* SKB "head" area always have tailroom for skb_shared_info */
4946 frame_sz
= (void *)skb_end_pointer(skb
) - hard_start
;
4947 frame_sz
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
4949 rxqueue
= netif_get_rxqueue(skb
);
4950 xdp_init_buff(xdp
, frame_sz
, &rxqueue
->xdp_rxq
);
4951 xdp_prepare_buff(xdp
, hard_start
, skb_headroom(skb
) - mac_len
,
4952 skb_headlen(skb
) + mac_len
, true);
4953 if (skb_is_nonlinear(skb
)) {
4954 skb_shinfo(skb
)->xdp_frags_size
= skb
->data_len
;
4955 xdp_buff_set_frags_flag(xdp
);
4957 xdp_buff_clear_frags_flag(xdp
);
4960 orig_data_end
= xdp
->data_end
;
4961 orig_data
= xdp
->data
;
4962 eth
= (struct ethhdr
*)xdp
->data
;
4963 orig_host
= ether_addr_equal_64bits(eth
->h_dest
, skb
->dev
->dev_addr
);
4964 orig_bcast
= is_multicast_ether_addr_64bits(eth
->h_dest
);
4965 orig_eth_type
= eth
->h_proto
;
4967 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
4969 /* check if bpf_xdp_adjust_head was used */
4970 off
= xdp
->data
- orig_data
;
4973 __skb_pull(skb
, off
);
4975 __skb_push(skb
, -off
);
4977 skb
->mac_header
+= off
;
4978 skb_reset_network_header(skb
);
4981 /* check if bpf_xdp_adjust_tail was used */
4982 off
= xdp
->data_end
- orig_data_end
;
4984 skb_set_tail_pointer(skb
, xdp
->data_end
- xdp
->data
);
4985 skb
->len
+= off
; /* positive on grow, negative on shrink */
4988 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
4989 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
4991 if (xdp_buff_has_frags(xdp
))
4992 skb
->data_len
= skb_shinfo(skb
)->xdp_frags_size
;
4996 /* check if XDP changed eth hdr such SKB needs update */
4997 eth
= (struct ethhdr
*)xdp
->data
;
4998 if ((orig_eth_type
!= eth
->h_proto
) ||
4999 (orig_host
!= ether_addr_equal_64bits(eth
->h_dest
,
5000 skb
->dev
->dev_addr
)) ||
5001 (orig_bcast
!= is_multicast_ether_addr_64bits(eth
->h_dest
))) {
5002 __skb_push(skb
, ETH_HLEN
);
5003 skb
->pkt_type
= PACKET_HOST
;
5004 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5007 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
5008 * before calling us again on redirect path. We do not call do_redirect
5009 * as we leave that up to the caller.
5011 * Caller is responsible for managing lifetime of skb (i.e. calling
5012 * kfree_skb in response to actions it cannot handle/XDP_DROP).
5017 __skb_push(skb
, mac_len
);
5020 metalen
= xdp
->data
- xdp
->data_meta
;
5022 skb_metadata_set(skb
, metalen
);
5030 netif_skb_check_for_xdp(struct sk_buff
**pskb
, struct bpf_prog
*prog
)
5032 struct sk_buff
*skb
= *pskb
;
5033 int err
, hroom
, troom
;
5035 if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool
), pskb
, prog
))
5038 /* In case we have to go down the path and also linearize,
5039 * then lets do the pskb_expand_head() work just once here.
5041 hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
5042 troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
5043 err
= pskb_expand_head(skb
,
5044 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
5045 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
);
5049 return skb_linearize(skb
);
5052 static u32
netif_receive_generic_xdp(struct sk_buff
**pskb
,
5053 struct xdp_buff
*xdp
,
5054 struct bpf_prog
*xdp_prog
)
5056 struct sk_buff
*skb
= *pskb
;
5057 u32 mac_len
, act
= XDP_DROP
;
5059 /* Reinjected packets coming from act_mirred or similar should
5060 * not get XDP generic processing.
5062 if (skb_is_redirected(skb
))
5065 /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
5066 * bytes. This is the guarantee that also native XDP provides,
5067 * thus we need to do it here as well.
5069 mac_len
= skb
->data
- skb_mac_header(skb
);
5070 __skb_push(skb
, mac_len
);
5072 if (skb_cloned(skb
) || skb_is_nonlinear(skb
) ||
5073 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
5074 if (netif_skb_check_for_xdp(pskb
, xdp_prog
))
5078 __skb_pull(*pskb
, mac_len
);
5080 act
= bpf_prog_run_generic_xdp(*pskb
, xdp
, xdp_prog
);
5087 bpf_warn_invalid_xdp_action((*pskb
)->dev
, xdp_prog
, act
);
5090 trace_xdp_exception((*pskb
)->dev
, xdp_prog
, act
);
5101 /* When doing generic XDP we have to bypass the qdisc layer and the
5102 * network taps in order to match in-driver-XDP behavior. This also means
5103 * that XDP packets are able to starve other packets going through a qdisc,
5104 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
5105 * queues, so they do not have this starvation issue.
5107 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
5109 struct net_device
*dev
= skb
->dev
;
5110 struct netdev_queue
*txq
;
5111 bool free_skb
= true;
5114 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
5115 cpu
= smp_processor_id();
5116 HARD_TX_LOCK(dev
, txq
, cpu
);
5117 if (!netif_xmit_frozen_or_drv_stopped(txq
)) {
5118 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
5119 if (dev_xmit_complete(rc
))
5122 HARD_TX_UNLOCK(dev
, txq
);
5124 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
5125 dev_core_stats_tx_dropped_inc(dev
);
5130 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key
);
5132 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
**pskb
)
5134 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
5137 struct xdp_buff xdp
;
5141 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
5142 act
= netif_receive_generic_xdp(pskb
, &xdp
, xdp_prog
);
5143 if (act
!= XDP_PASS
) {
5146 err
= xdp_do_generic_redirect((*pskb
)->dev
, *pskb
,
5152 generic_xdp_tx(*pskb
, xdp_prog
);
5155 bpf_net_ctx_clear(bpf_net_ctx
);
5158 bpf_net_ctx_clear(bpf_net_ctx
);
5162 bpf_net_ctx_clear(bpf_net_ctx
);
5163 kfree_skb_reason(*pskb
, SKB_DROP_REASON_XDP
);
5166 EXPORT_SYMBOL_GPL(do_xdp_generic
);
5168 static int netif_rx_internal(struct sk_buff
*skb
)
5172 net_timestamp_check(READ_ONCE(net_hotdata
.tstamp_prequeue
), skb
);
5174 trace_netif_rx(skb
);
5177 if (static_branch_unlikely(&rps_needed
)) {
5178 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5183 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5185 cpu
= smp_processor_id();
5187 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5195 ret
= enqueue_to_backlog(skb
, smp_processor_id(), &qtail
);
5201 * __netif_rx - Slightly optimized version of netif_rx
5202 * @skb: buffer to post
5204 * This behaves as netif_rx except that it does not disable bottom halves.
5205 * As a result this function may only be invoked from the interrupt context
5206 * (either hard or soft interrupt).
5208 int __netif_rx(struct sk_buff
*skb
)
5212 lockdep_assert_once(hardirq_count() | softirq_count());
5214 trace_netif_rx_entry(skb
);
5215 ret
= netif_rx_internal(skb
);
5216 trace_netif_rx_exit(ret
);
5219 EXPORT_SYMBOL(__netif_rx
);
5222 * netif_rx - post buffer to the network code
5223 * @skb: buffer to post
5225 * This function receives a packet from a device driver and queues it for
5226 * the upper (protocol) levels to process via the backlog NAPI device. It
5227 * always succeeds. The buffer may be dropped during processing for
5228 * congestion control or by the protocol layers.
5229 * The network buffer is passed via the backlog NAPI device. Modern NIC
5230 * driver should use NAPI and GRO.
5231 * This function can used from interrupt and from process context. The
5232 * caller from process context must not disable interrupts before invoking
5236 * NET_RX_SUCCESS (no congestion)
5237 * NET_RX_DROP (packet was dropped)
5240 int netif_rx(struct sk_buff
*skb
)
5242 bool need_bh_off
= !(hardirq_count() | softirq_count());
5247 trace_netif_rx_entry(skb
);
5248 ret
= netif_rx_internal(skb
);
5249 trace_netif_rx_exit(ret
);
5254 EXPORT_SYMBOL(netif_rx
);
5256 static __latent_entropy
void net_tx_action(void)
5258 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
5260 if (sd
->completion_queue
) {
5261 struct sk_buff
*clist
;
5263 local_irq_disable();
5264 clist
= sd
->completion_queue
;
5265 sd
->completion_queue
= NULL
;
5269 struct sk_buff
*skb
= clist
;
5271 clist
= clist
->next
;
5273 WARN_ON(refcount_read(&skb
->users
));
5274 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_CONSUMED
))
5275 trace_consume_skb(skb
, net_tx_action
);
5277 trace_kfree_skb(skb
, net_tx_action
,
5278 get_kfree_skb_cb(skb
)->reason
, NULL
);
5280 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
5283 __napi_kfree_skb(skb
,
5284 get_kfree_skb_cb(skb
)->reason
);
5288 if (sd
->output_queue
) {
5291 local_irq_disable();
5292 head
= sd
->output_queue
;
5293 sd
->output_queue
= NULL
;
5294 sd
->output_queue_tailp
= &sd
->output_queue
;
5300 struct Qdisc
*q
= head
;
5301 spinlock_t
*root_lock
= NULL
;
5303 head
= head
->next_sched
;
5305 /* We need to make sure head->next_sched is read
5306 * before clearing __QDISC_STATE_SCHED
5308 smp_mb__before_atomic();
5310 if (!(q
->flags
& TCQ_F_NOLOCK
)) {
5311 root_lock
= qdisc_lock(q
);
5312 spin_lock(root_lock
);
5313 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
,
5315 /* There is a synchronize_net() between
5316 * STATE_DEACTIVATED flag being set and
5317 * qdisc_reset()/some_qdisc_is_busy() in
5318 * dev_deactivate(), so we can safely bail out
5319 * early here to avoid data race between
5320 * qdisc_deactivate() and some_qdisc_is_busy()
5321 * for lockless qdisc.
5323 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
5327 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
5330 spin_unlock(root_lock
);
5336 xfrm_dev_backlog(sd
);
5339 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5340 /* This hook is defined here for ATM LANE */
5341 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
5342 unsigned char *addr
) __read_mostly
;
5343 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
5347 * netdev_is_rx_handler_busy - check if receive handler is registered
5348 * @dev: device to check
5350 * Check if a receive handler is already registered for a given device.
5351 * Return true if there one.
5353 * The caller must hold the rtnl_mutex.
5355 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
5358 return dev
&& rtnl_dereference(dev
->rx_handler
);
5360 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
5363 * netdev_rx_handler_register - register receive handler
5364 * @dev: device to register a handler for
5365 * @rx_handler: receive handler to register
5366 * @rx_handler_data: data pointer that is used by rx handler
5368 * Register a receive handler for a device. This handler will then be
5369 * called from __netif_receive_skb. A negative errno code is returned
5372 * The caller must hold the rtnl_mutex.
5374 * For a general description of rx_handler, see enum rx_handler_result.
5376 int netdev_rx_handler_register(struct net_device
*dev
,
5377 rx_handler_func_t
*rx_handler
,
5378 void *rx_handler_data
)
5380 if (netdev_is_rx_handler_busy(dev
))
5383 if (dev
->priv_flags
& IFF_NO_RX_HANDLER
)
5386 /* Note: rx_handler_data must be set before rx_handler */
5387 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
5388 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
5392 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
5395 * netdev_rx_handler_unregister - unregister receive handler
5396 * @dev: device to unregister a handler from
5398 * Unregister a receive handler from a device.
5400 * The caller must hold the rtnl_mutex.
5402 void netdev_rx_handler_unregister(struct net_device
*dev
)
5406 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
5407 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5408 * section has a guarantee to see a non NULL rx_handler_data
5412 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
5414 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
5417 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5418 * the special handling of PFMEMALLOC skbs.
5420 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
5422 switch (skb
->protocol
) {
5423 case htons(ETH_P_ARP
):
5424 case htons(ETH_P_IP
):
5425 case htons(ETH_P_IPV6
):
5426 case htons(ETH_P_8021Q
):
5427 case htons(ETH_P_8021AD
):
5434 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
5435 int *ret
, struct net_device
*orig_dev
)
5437 if (nf_hook_ingress_active(skb
)) {
5441 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
5446 ingress_retval
= nf_hook_ingress(skb
);
5448 return ingress_retval
;
5453 static int __netif_receive_skb_core(struct sk_buff
**pskb
, bool pfmemalloc
,
5454 struct packet_type
**ppt_prev
)
5456 struct packet_type
*ptype
, *pt_prev
;
5457 rx_handler_func_t
*rx_handler
;
5458 struct sk_buff
*skb
= *pskb
;
5459 struct net_device
*orig_dev
;
5460 bool deliver_exact
= false;
5461 int ret
= NET_RX_DROP
;
5464 net_timestamp_check(!READ_ONCE(net_hotdata
.tstamp_prequeue
), skb
);
5466 trace_netif_receive_skb(skb
);
5468 orig_dev
= skb
->dev
;
5470 skb_reset_network_header(skb
);
5471 if (!skb_transport_header_was_set(skb
))
5472 skb_reset_transport_header(skb
);
5473 skb_reset_mac_len(skb
);
5478 skb
->skb_iif
= skb
->dev
->ifindex
;
5480 __this_cpu_inc(softnet_data
.processed
);
5482 if (static_branch_unlikely(&generic_xdp_needed_key
)) {
5486 ret2
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
),
5490 if (ret2
!= XDP_PASS
) {
5496 if (eth_type_vlan(skb
->protocol
)) {
5497 skb
= skb_vlan_untag(skb
);
5502 if (skb_skip_tc_classify(skb
))
5508 list_for_each_entry_rcu(ptype
, &net_hotdata
.ptype_all
, list
) {
5510 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5514 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
5516 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5521 #ifdef CONFIG_NET_INGRESS
5522 if (static_branch_unlikely(&ingress_needed_key
)) {
5523 bool another
= false;
5525 nf_skip_egress(skb
, true);
5526 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
,
5533 nf_skip_egress(skb
, false);
5534 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
5538 skb_reset_redirect(skb
);
5540 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
5543 if (skb_vlan_tag_present(skb
)) {
5545 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5548 if (vlan_do_receive(&skb
))
5550 else if (unlikely(!skb
))
5554 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
5557 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5560 switch (rx_handler(&skb
)) {
5561 case RX_HANDLER_CONSUMED
:
5562 ret
= NET_RX_SUCCESS
;
5564 case RX_HANDLER_ANOTHER
:
5566 case RX_HANDLER_EXACT
:
5567 deliver_exact
= true;
5569 case RX_HANDLER_PASS
:
5576 if (unlikely(skb_vlan_tag_present(skb
)) && !netdev_uses_dsa(skb
->dev
)) {
5578 if (skb_vlan_tag_get_id(skb
)) {
5579 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5582 skb
->pkt_type
= PACKET_OTHERHOST
;
5583 } else if (eth_type_vlan(skb
->protocol
)) {
5584 /* Outer header is 802.1P with vlan 0, inner header is
5585 * 802.1Q or 802.1AD and vlan_do_receive() above could
5586 * not find vlan dev for vlan id 0.
5588 __vlan_hwaccel_clear_tag(skb
);
5589 skb
= skb_vlan_untag(skb
);
5592 if (vlan_do_receive(&skb
))
5593 /* After stripping off 802.1P header with vlan 0
5594 * vlan dev is found for inner header.
5597 else if (unlikely(!skb
))
5600 /* We have stripped outer 802.1P vlan 0 header.
5601 * But could not find vlan dev.
5602 * check again for vlan id to set OTHERHOST.
5606 /* Note: we might in the future use prio bits
5607 * and set skb->priority like in vlan_do_receive()
5608 * For the time being, just ignore Priority Code Point
5610 __vlan_hwaccel_clear_tag(skb
);
5613 type
= skb
->protocol
;
5615 /* deliver only exact match when indicated */
5616 if (likely(!deliver_exact
)) {
5617 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5618 &ptype_base
[ntohs(type
) &
5622 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5623 &orig_dev
->ptype_specific
);
5625 if (unlikely(skb
->dev
!= orig_dev
)) {
5626 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5627 &skb
->dev
->ptype_specific
);
5631 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
5633 *ppt_prev
= pt_prev
;
5637 dev_core_stats_rx_dropped_inc(skb
->dev
);
5639 dev_core_stats_rx_nohandler_inc(skb
->dev
);
5640 kfree_skb_reason(skb
, SKB_DROP_REASON_UNHANDLED_PROTO
);
5641 /* Jamal, now you will not able to escape explaining
5642 * me how you were going to use this. :-)
5648 /* The invariant here is that if *ppt_prev is not NULL
5649 * then skb should also be non-NULL.
5651 * Apparently *ppt_prev assignment above holds this invariant due to
5652 * skb dereferencing near it.
5658 static int __netif_receive_skb_one_core(struct sk_buff
*skb
, bool pfmemalloc
)
5660 struct net_device
*orig_dev
= skb
->dev
;
5661 struct packet_type
*pt_prev
= NULL
;
5664 ret
= __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
5666 ret
= INDIRECT_CALL_INET(pt_prev
->func
, ipv6_rcv
, ip_rcv
, skb
,
5667 skb
->dev
, pt_prev
, orig_dev
);
5672 * netif_receive_skb_core - special purpose version of netif_receive_skb
5673 * @skb: buffer to process
5675 * More direct receive version of netif_receive_skb(). It should
5676 * only be used by callers that have a need to skip RPS and Generic XDP.
5677 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5679 * This function may only be called from softirq context and interrupts
5680 * should be enabled.
5682 * Return values (usually ignored):
5683 * NET_RX_SUCCESS: no congestion
5684 * NET_RX_DROP: packet was dropped
5686 int netif_receive_skb_core(struct sk_buff
*skb
)
5691 ret
= __netif_receive_skb_one_core(skb
, false);
5696 EXPORT_SYMBOL(netif_receive_skb_core
);
5698 static inline void __netif_receive_skb_list_ptype(struct list_head
*head
,
5699 struct packet_type
*pt_prev
,
5700 struct net_device
*orig_dev
)
5702 struct sk_buff
*skb
, *next
;
5706 if (list_empty(head
))
5708 if (pt_prev
->list_func
!= NULL
)
5709 INDIRECT_CALL_INET(pt_prev
->list_func
, ipv6_list_rcv
,
5710 ip_list_rcv
, head
, pt_prev
, orig_dev
);
5712 list_for_each_entry_safe(skb
, next
, head
, list
) {
5713 skb_list_del_init(skb
);
5714 pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
5718 static void __netif_receive_skb_list_core(struct list_head
*head
, bool pfmemalloc
)
5720 /* Fast-path assumptions:
5721 * - There is no RX handler.
5722 * - Only one packet_type matches.
5723 * If either of these fails, we will end up doing some per-packet
5724 * processing in-line, then handling the 'last ptype' for the whole
5725 * sublist. This can't cause out-of-order delivery to any single ptype,
5726 * because the 'last ptype' must be constant across the sublist, and all
5727 * other ptypes are handled per-packet.
5729 /* Current (common) ptype of sublist */
5730 struct packet_type
*pt_curr
= NULL
;
5731 /* Current (common) orig_dev of sublist */
5732 struct net_device
*od_curr
= NULL
;
5733 struct sk_buff
*skb
, *next
;
5736 list_for_each_entry_safe(skb
, next
, head
, list
) {
5737 struct net_device
*orig_dev
= skb
->dev
;
5738 struct packet_type
*pt_prev
= NULL
;
5740 skb_list_del_init(skb
);
5741 __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
5744 if (pt_curr
!= pt_prev
|| od_curr
!= orig_dev
) {
5745 /* dispatch old sublist */
5746 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5747 /* start new sublist */
5748 INIT_LIST_HEAD(&sublist
);
5752 list_add_tail(&skb
->list
, &sublist
);
5755 /* dispatch final sublist */
5756 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5759 static int __netif_receive_skb(struct sk_buff
*skb
)
5763 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
5764 unsigned int noreclaim_flag
;
5767 * PFMEMALLOC skbs are special, they should
5768 * - be delivered to SOCK_MEMALLOC sockets only
5769 * - stay away from userspace
5770 * - have bounded memory usage
5772 * Use PF_MEMALLOC as this saves us from propagating the allocation
5773 * context down to all allocation sites.
5775 noreclaim_flag
= memalloc_noreclaim_save();
5776 ret
= __netif_receive_skb_one_core(skb
, true);
5777 memalloc_noreclaim_restore(noreclaim_flag
);
5779 ret
= __netif_receive_skb_one_core(skb
, false);
5784 static void __netif_receive_skb_list(struct list_head
*head
)
5786 unsigned long noreclaim_flag
= 0;
5787 struct sk_buff
*skb
, *next
;
5788 bool pfmemalloc
= false; /* Is current sublist PF_MEMALLOC? */
5790 list_for_each_entry_safe(skb
, next
, head
, list
) {
5791 if ((sk_memalloc_socks() && skb_pfmemalloc(skb
)) != pfmemalloc
) {
5792 struct list_head sublist
;
5794 /* Handle the previous sublist */
5795 list_cut_before(&sublist
, head
, &skb
->list
);
5796 if (!list_empty(&sublist
))
5797 __netif_receive_skb_list_core(&sublist
, pfmemalloc
);
5798 pfmemalloc
= !pfmemalloc
;
5799 /* See comments in __netif_receive_skb */
5801 noreclaim_flag
= memalloc_noreclaim_save();
5803 memalloc_noreclaim_restore(noreclaim_flag
);
5806 /* Handle the remaining sublist */
5807 if (!list_empty(head
))
5808 __netif_receive_skb_list_core(head
, pfmemalloc
);
5809 /* Restore pflags */
5811 memalloc_noreclaim_restore(noreclaim_flag
);
5814 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
5816 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
5817 struct bpf_prog
*new = xdp
->prog
;
5820 switch (xdp
->command
) {
5821 case XDP_SETUP_PROG
:
5822 rcu_assign_pointer(dev
->xdp_prog
, new);
5827 static_branch_dec(&generic_xdp_needed_key
);
5828 } else if (new && !old
) {
5829 static_branch_inc(&generic_xdp_needed_key
);
5830 dev_disable_lro(dev
);
5831 dev_disable_gro_hw(dev
);
5843 static int netif_receive_skb_internal(struct sk_buff
*skb
)
5847 net_timestamp_check(READ_ONCE(net_hotdata
.tstamp_prequeue
), skb
);
5849 if (skb_defer_rx_timestamp(skb
))
5850 return NET_RX_SUCCESS
;
5854 if (static_branch_unlikely(&rps_needed
)) {
5855 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5856 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5859 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5865 ret
= __netif_receive_skb(skb
);
5870 void netif_receive_skb_list_internal(struct list_head
*head
)
5872 struct sk_buff
*skb
, *next
;
5875 list_for_each_entry_safe(skb
, next
, head
, list
) {
5876 net_timestamp_check(READ_ONCE(net_hotdata
.tstamp_prequeue
),
5878 skb_list_del_init(skb
);
5879 if (!skb_defer_rx_timestamp(skb
))
5880 list_add_tail(&skb
->list
, &sublist
);
5882 list_splice_init(&sublist
, head
);
5886 if (static_branch_unlikely(&rps_needed
)) {
5887 list_for_each_entry_safe(skb
, next
, head
, list
) {
5888 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5889 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5892 /* Will be handled, remove from list */
5893 skb_list_del_init(skb
);
5894 enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5899 __netif_receive_skb_list(head
);
5904 * netif_receive_skb - process receive buffer from network
5905 * @skb: buffer to process
5907 * netif_receive_skb() is the main receive data processing function.
5908 * It always succeeds. The buffer may be dropped during processing
5909 * for congestion control or by the protocol layers.
5911 * This function may only be called from softirq context and interrupts
5912 * should be enabled.
5914 * Return values (usually ignored):
5915 * NET_RX_SUCCESS: no congestion
5916 * NET_RX_DROP: packet was dropped
5918 int netif_receive_skb(struct sk_buff
*skb
)
5922 trace_netif_receive_skb_entry(skb
);
5924 ret
= netif_receive_skb_internal(skb
);
5925 trace_netif_receive_skb_exit(ret
);
5929 EXPORT_SYMBOL(netif_receive_skb
);
5932 * netif_receive_skb_list - process many receive buffers from network
5933 * @head: list of skbs to process.
5935 * Since return value of netif_receive_skb() is normally ignored, and
5936 * wouldn't be meaningful for a list, this function returns void.
5938 * This function may only be called from softirq context and interrupts
5939 * should be enabled.
5941 void netif_receive_skb_list(struct list_head
*head
)
5943 struct sk_buff
*skb
;
5945 if (list_empty(head
))
5947 if (trace_netif_receive_skb_list_entry_enabled()) {
5948 list_for_each_entry(skb
, head
, list
)
5949 trace_netif_receive_skb_list_entry(skb
);
5951 netif_receive_skb_list_internal(head
);
5952 trace_netif_receive_skb_list_exit(0);
5954 EXPORT_SYMBOL(netif_receive_skb_list
);
5956 static DEFINE_PER_CPU(struct work_struct
, flush_works
);
5958 /* Network device is going away, flush any packets still pending */
5959 static void flush_backlog(struct work_struct
*work
)
5961 struct sk_buff
*skb
, *tmp
;
5962 struct softnet_data
*sd
;
5965 sd
= this_cpu_ptr(&softnet_data
);
5967 backlog_lock_irq_disable(sd
);
5968 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
5969 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5970 __skb_unlink(skb
, &sd
->input_pkt_queue
);
5971 dev_kfree_skb_irq(skb
);
5972 rps_input_queue_head_incr(sd
);
5975 backlog_unlock_irq_enable(sd
);
5977 local_lock_nested_bh(&softnet_data
.process_queue_bh_lock
);
5978 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
5979 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5980 __skb_unlink(skb
, &sd
->process_queue
);
5982 rps_input_queue_head_incr(sd
);
5985 local_unlock_nested_bh(&softnet_data
.process_queue_bh_lock
);
5989 static bool flush_required(int cpu
)
5991 #if IS_ENABLED(CONFIG_RPS)
5992 struct softnet_data
*sd
= &per_cpu(softnet_data
, cpu
);
5995 backlog_lock_irq_disable(sd
);
5997 /* as insertion into process_queue happens with the rps lock held,
5998 * process_queue access may race only with dequeue
6000 do_flush
= !skb_queue_empty(&sd
->input_pkt_queue
) ||
6001 !skb_queue_empty_lockless(&sd
->process_queue
);
6002 backlog_unlock_irq_enable(sd
);
6006 /* without RPS we can't safely check input_pkt_queue: during a
6007 * concurrent remote skb_queue_splice() we can detect as empty both
6008 * input_pkt_queue and process_queue even if the latter could end-up
6009 * containing a lot of packets.
6014 static void flush_all_backlogs(void)
6016 static cpumask_t flush_cpus
;
6019 /* since we are under rtnl lock protection we can use static data
6020 * for the cpumask and avoid allocating on stack the possibly
6027 cpumask_clear(&flush_cpus
);
6028 for_each_online_cpu(cpu
) {
6029 if (flush_required(cpu
)) {
6030 queue_work_on(cpu
, system_highpri_wq
,
6031 per_cpu_ptr(&flush_works
, cpu
));
6032 cpumask_set_cpu(cpu
, &flush_cpus
);
6036 /* we can have in flight packet[s] on the cpus we are not flushing,
6037 * synchronize_net() in unregister_netdevice_many() will take care of
6040 for_each_cpu(cpu
, &flush_cpus
)
6041 flush_work(per_cpu_ptr(&flush_works
, cpu
));
6046 static void net_rps_send_ipi(struct softnet_data
*remsd
)
6050 struct softnet_data
*next
= remsd
->rps_ipi_next
;
6052 if (cpu_online(remsd
->cpu
))
6053 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
6060 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6061 * Note: called with local irq disabled, but exits with local irq enabled.
6063 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
6066 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
6068 if (!use_backlog_threads() && remsd
) {
6069 sd
->rps_ipi_list
= NULL
;
6073 /* Send pending IPI's to kick RPS processing on remote cpus. */
6074 net_rps_send_ipi(remsd
);
6080 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
6083 return !use_backlog_threads() && sd
->rps_ipi_list
;
6089 static int process_backlog(struct napi_struct
*napi
, int quota
)
6091 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
6095 /* Check if we have pending ipi, its better to send them now,
6096 * not waiting net_rx_action() end.
6098 if (sd_has_rps_ipi_waiting(sd
)) {
6099 local_irq_disable();
6100 net_rps_action_and_irq_enable(sd
);
6103 napi
->weight
= READ_ONCE(net_hotdata
.dev_rx_weight
);
6105 struct sk_buff
*skb
;
6107 local_lock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6108 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
6109 local_unlock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6111 __netif_receive_skb(skb
);
6113 if (++work
>= quota
) {
6114 rps_input_queue_head_add(sd
, work
);
6118 local_lock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6120 local_unlock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6122 backlog_lock_irq_disable(sd
);
6123 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
6125 * Inline a custom version of __napi_complete().
6126 * only current cpu owns and manipulates this napi,
6127 * and NAPI_STATE_SCHED is the only possible flag set
6129 * We can use a plain write instead of clear_bit(),
6130 * and we dont need an smp_mb() memory barrier.
6132 napi
->state
&= NAPIF_STATE_THREADED
;
6135 local_lock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6136 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
6137 &sd
->process_queue
);
6138 local_unlock_nested_bh(&softnet_data
.process_queue_bh_lock
);
6140 backlog_unlock_irq_enable(sd
);
6144 rps_input_queue_head_add(sd
, work
);
6149 * __napi_schedule - schedule for receive
6150 * @n: entry to schedule
6152 * The entry's receive function will be scheduled to run.
6153 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6155 void __napi_schedule(struct napi_struct
*n
)
6157 unsigned long flags
;
6159 local_irq_save(flags
);
6160 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6161 local_irq_restore(flags
);
6163 EXPORT_SYMBOL(__napi_schedule
);
6166 * napi_schedule_prep - check if napi can be scheduled
6169 * Test if NAPI routine is already running, and if not mark
6170 * it as running. This is used as a condition variable to
6171 * insure only one NAPI poll instance runs. We also make
6172 * sure there is no pending NAPI disable.
6174 bool napi_schedule_prep(struct napi_struct
*n
)
6176 unsigned long new, val
= READ_ONCE(n
->state
);
6179 if (unlikely(val
& NAPIF_STATE_DISABLE
))
6181 new = val
| NAPIF_STATE_SCHED
;
6183 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6184 * This was suggested by Alexander Duyck, as compiler
6185 * emits better code than :
6186 * if (val & NAPIF_STATE_SCHED)
6187 * new |= NAPIF_STATE_MISSED;
6189 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
6191 } while (!try_cmpxchg(&n
->state
, &val
, new));
6193 return !(val
& NAPIF_STATE_SCHED
);
6195 EXPORT_SYMBOL(napi_schedule_prep
);
6198 * __napi_schedule_irqoff - schedule for receive
6199 * @n: entry to schedule
6201 * Variant of __napi_schedule() assuming hard irqs are masked.
6203 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6204 * because the interrupt disabled assumption might not be true
6205 * due to force-threaded interrupts and spinlock substitution.
6207 void __napi_schedule_irqoff(struct napi_struct
*n
)
6209 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6210 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6214 EXPORT_SYMBOL(__napi_schedule_irqoff
);
6216 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
6218 unsigned long flags
, val
, new, timeout
= 0;
6222 * 1) Don't let napi dequeue from the cpu poll list
6223 * just in case its running on a different cpu.
6224 * 2) If we are busy polling, do nothing here, we have
6225 * the guarantee we will be called later.
6227 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
6228 NAPIF_STATE_IN_BUSY_POLL
)))
6233 timeout
= READ_ONCE(n
->dev
->gro_flush_timeout
);
6234 n
->defer_hard_irqs_count
= READ_ONCE(n
->dev
->napi_defer_hard_irqs
);
6236 if (n
->defer_hard_irqs_count
> 0) {
6237 n
->defer_hard_irqs_count
--;
6238 timeout
= READ_ONCE(n
->dev
->gro_flush_timeout
);
6242 if (n
->gro_bitmask
) {
6243 /* When the NAPI instance uses a timeout and keeps postponing
6244 * it, we need to bound somehow the time packets are kept in
6247 napi_gro_flush(n
, !!timeout
);
6252 if (unlikely(!list_empty(&n
->poll_list
))) {
6253 /* If n->poll_list is not empty, we need to mask irqs */
6254 local_irq_save(flags
);
6255 list_del_init(&n
->poll_list
);
6256 local_irq_restore(flags
);
6258 WRITE_ONCE(n
->list_owner
, -1);
6260 val
= READ_ONCE(n
->state
);
6262 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
6264 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
|
6265 NAPIF_STATE_SCHED_THREADED
|
6266 NAPIF_STATE_PREFER_BUSY_POLL
);
6268 /* If STATE_MISSED was set, leave STATE_SCHED set,
6269 * because we will call napi->poll() one more time.
6270 * This C code was suggested by Alexander Duyck to help gcc.
6272 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
6274 } while (!try_cmpxchg(&n
->state
, &val
, new));
6276 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
6282 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
6283 HRTIMER_MODE_REL_PINNED
);
6286 EXPORT_SYMBOL(napi_complete_done
);
6288 /* must be called under rcu_read_lock(), as we dont take a reference */
6289 struct napi_struct
*napi_by_id(unsigned int napi_id
)
6291 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
6292 struct napi_struct
*napi
;
6294 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
6295 if (napi
->napi_id
== napi_id
)
6301 static void skb_defer_free_flush(struct softnet_data
*sd
)
6303 struct sk_buff
*skb
, *next
;
6305 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6306 if (!READ_ONCE(sd
->defer_list
))
6309 spin_lock(&sd
->defer_lock
);
6310 skb
= sd
->defer_list
;
6311 sd
->defer_list
= NULL
;
6312 sd
->defer_count
= 0;
6313 spin_unlock(&sd
->defer_lock
);
6315 while (skb
!= NULL
) {
6317 napi_consume_skb(skb
, 1);
6322 #if defined(CONFIG_NET_RX_BUSY_POLL)
6324 static void __busy_poll_stop(struct napi_struct
*napi
, bool skip_schedule
)
6326 if (!skip_schedule
) {
6327 gro_normal_list(napi
);
6328 __napi_schedule(napi
);
6332 if (napi
->gro_bitmask
) {
6333 /* flush too old packets
6334 * If HZ < 1000, flush all packets.
6336 napi_gro_flush(napi
, HZ
>= 1000);
6339 gro_normal_list(napi
);
6340 clear_bit(NAPI_STATE_SCHED
, &napi
->state
);
6344 NAPI_F_PREFER_BUSY_POLL
= 1,
6345 NAPI_F_END_ON_RESCHED
= 2,
6348 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
,
6349 unsigned flags
, u16 budget
)
6351 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
6352 bool skip_schedule
= false;
6353 unsigned long timeout
;
6356 /* Busy polling means there is a high chance device driver hard irq
6357 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6358 * set in napi_schedule_prep().
6359 * Since we are about to call napi->poll() once more, we can safely
6360 * clear NAPI_STATE_MISSED.
6362 * Note: x86 could use a single "lock and ..." instruction
6363 * to perform these two clear_bit()
6365 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
6366 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
6369 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
6371 if (flags
& NAPI_F_PREFER_BUSY_POLL
) {
6372 napi
->defer_hard_irqs_count
= READ_ONCE(napi
->dev
->napi_defer_hard_irqs
);
6373 timeout
= READ_ONCE(napi
->dev
->gro_flush_timeout
);
6374 if (napi
->defer_hard_irqs_count
&& timeout
) {
6375 hrtimer_start(&napi
->timer
, ns_to_ktime(timeout
), HRTIMER_MODE_REL_PINNED
);
6376 skip_schedule
= true;
6380 /* All we really want here is to re-enable device interrupts.
6381 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6383 rc
= napi
->poll(napi
, budget
);
6384 /* We can't gro_normal_list() here, because napi->poll() might have
6385 * rearmed the napi (napi_complete_done()) in which case it could
6386 * already be running on another CPU.
6388 trace_napi_poll(napi
, rc
, budget
);
6389 netpoll_poll_unlock(have_poll_lock
);
6391 __busy_poll_stop(napi
, skip_schedule
);
6392 bpf_net_ctx_clear(bpf_net_ctx
);
6396 static void __napi_busy_loop(unsigned int napi_id
,
6397 bool (*loop_end
)(void *, unsigned long),
6398 void *loop_end_arg
, unsigned flags
, u16 budget
)
6400 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
6401 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
6402 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
6403 void *have_poll_lock
= NULL
;
6404 struct napi_struct
*napi
;
6406 WARN_ON_ONCE(!rcu_read_lock_held());
6411 napi
= napi_by_id(napi_id
);
6415 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6421 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
6423 unsigned long val
= READ_ONCE(napi
->state
);
6425 /* If multiple threads are competing for this napi,
6426 * we avoid dirtying napi->state as much as we can.
6428 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
6429 NAPIF_STATE_IN_BUSY_POLL
)) {
6430 if (flags
& NAPI_F_PREFER_BUSY_POLL
)
6431 set_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6434 if (cmpxchg(&napi
->state
, val
,
6435 val
| NAPIF_STATE_IN_BUSY_POLL
|
6436 NAPIF_STATE_SCHED
) != val
) {
6437 if (flags
& NAPI_F_PREFER_BUSY_POLL
)
6438 set_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6441 have_poll_lock
= netpoll_poll_lock(napi
);
6442 napi_poll
= napi
->poll
;
6444 work
= napi_poll(napi
, budget
);
6445 trace_napi_poll(napi
, work
, budget
);
6446 gro_normal_list(napi
);
6449 __NET_ADD_STATS(dev_net(napi
->dev
),
6450 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
6451 skb_defer_free_flush(this_cpu_ptr(&softnet_data
));
6452 bpf_net_ctx_clear(bpf_net_ctx
);
6455 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
6458 if (unlikely(need_resched())) {
6459 if (flags
& NAPI_F_END_ON_RESCHED
)
6462 busy_poll_stop(napi
, have_poll_lock
, flags
, budget
);
6463 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6468 if (loop_end(loop_end_arg
, start_time
))
6475 busy_poll_stop(napi
, have_poll_lock
, flags
, budget
);
6476 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6480 void napi_busy_loop_rcu(unsigned int napi_id
,
6481 bool (*loop_end
)(void *, unsigned long),
6482 void *loop_end_arg
, bool prefer_busy_poll
, u16 budget
)
6484 unsigned flags
= NAPI_F_END_ON_RESCHED
;
6486 if (prefer_busy_poll
)
6487 flags
|= NAPI_F_PREFER_BUSY_POLL
;
6489 __napi_busy_loop(napi_id
, loop_end
, loop_end_arg
, flags
, budget
);
6492 void napi_busy_loop(unsigned int napi_id
,
6493 bool (*loop_end
)(void *, unsigned long),
6494 void *loop_end_arg
, bool prefer_busy_poll
, u16 budget
)
6496 unsigned flags
= prefer_busy_poll
? NAPI_F_PREFER_BUSY_POLL
: 0;
6499 __napi_busy_loop(napi_id
, loop_end
, loop_end_arg
, flags
, budget
);
6502 EXPORT_SYMBOL(napi_busy_loop
);
6504 #endif /* CONFIG_NET_RX_BUSY_POLL */
6506 static void napi_hash_add(struct napi_struct
*napi
)
6508 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
))
6511 spin_lock(&napi_hash_lock
);
6513 /* 0..NR_CPUS range is reserved for sender_cpu use */
6515 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
6516 napi_gen_id
= MIN_NAPI_ID
;
6517 } while (napi_by_id(napi_gen_id
));
6518 napi
->napi_id
= napi_gen_id
;
6520 hlist_add_head_rcu(&napi
->napi_hash_node
,
6521 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
6523 spin_unlock(&napi_hash_lock
);
6526 /* Warning : caller is responsible to make sure rcu grace period
6527 * is respected before freeing memory containing @napi
6529 static void napi_hash_del(struct napi_struct
*napi
)
6531 spin_lock(&napi_hash_lock
);
6533 hlist_del_init_rcu(&napi
->napi_hash_node
);
6535 spin_unlock(&napi_hash_lock
);
6538 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
6540 struct napi_struct
*napi
;
6542 napi
= container_of(timer
, struct napi_struct
, timer
);
6544 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6545 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6547 if (!napi_disable_pending(napi
) &&
6548 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
)) {
6549 clear_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6550 __napi_schedule_irqoff(napi
);
6553 return HRTIMER_NORESTART
;
6556 static void init_gro_hash(struct napi_struct
*napi
)
6560 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6561 INIT_LIST_HEAD(&napi
->gro_hash
[i
].list
);
6562 napi
->gro_hash
[i
].count
= 0;
6564 napi
->gro_bitmask
= 0;
6567 int dev_set_threaded(struct net_device
*dev
, bool threaded
)
6569 struct napi_struct
*napi
;
6572 if (dev
->threaded
== threaded
)
6576 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
6577 if (!napi
->thread
) {
6578 err
= napi_kthread_create(napi
);
6587 WRITE_ONCE(dev
->threaded
, threaded
);
6589 /* Make sure kthread is created before THREADED bit
6592 smp_mb__before_atomic();
6594 /* Setting/unsetting threaded mode on a napi might not immediately
6595 * take effect, if the current napi instance is actively being
6596 * polled. In this case, the switch between threaded mode and
6597 * softirq mode will happen in the next round of napi_schedule().
6598 * This should not cause hiccups/stalls to the live traffic.
6600 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
)
6601 assign_bit(NAPI_STATE_THREADED
, &napi
->state
, threaded
);
6605 EXPORT_SYMBOL(dev_set_threaded
);
6608 * netif_queue_set_napi - Associate queue with the napi
6609 * @dev: device to which NAPI and queue belong
6610 * @queue_index: Index of queue
6611 * @type: queue type as RX or TX
6612 * @napi: NAPI context, pass NULL to clear previously set NAPI
6614 * Set queue with its corresponding napi context. This should be done after
6615 * registering the NAPI handler for the queue-vector and the queues have been
6616 * mapped to the corresponding interrupt vector.
6618 void netif_queue_set_napi(struct net_device
*dev
, unsigned int queue_index
,
6619 enum netdev_queue_type type
, struct napi_struct
*napi
)
6621 struct netdev_rx_queue
*rxq
;
6622 struct netdev_queue
*txq
;
6624 if (WARN_ON_ONCE(napi
&& !napi
->dev
))
6626 if (dev
->reg_state
>= NETREG_REGISTERED
)
6630 case NETDEV_QUEUE_TYPE_RX
:
6631 rxq
= __netif_get_rx_queue(dev
, queue_index
);
6634 case NETDEV_QUEUE_TYPE_TX
:
6635 txq
= netdev_get_tx_queue(dev
, queue_index
);
6642 EXPORT_SYMBOL(netif_queue_set_napi
);
6644 void netif_napi_add_weight(struct net_device
*dev
, struct napi_struct
*napi
,
6645 int (*poll
)(struct napi_struct
*, int), int weight
)
6647 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED
, &napi
->state
)))
6650 INIT_LIST_HEAD(&napi
->poll_list
);
6651 INIT_HLIST_NODE(&napi
->napi_hash_node
);
6652 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
6653 napi
->timer
.function
= napi_watchdog
;
6654 init_gro_hash(napi
);
6656 INIT_LIST_HEAD(&napi
->rx_list
);
6659 if (weight
> NAPI_POLL_WEIGHT
)
6660 netdev_err_once(dev
, "%s() called with weight %d\n", __func__
,
6662 napi
->weight
= weight
;
6664 #ifdef CONFIG_NETPOLL
6665 napi
->poll_owner
= -1;
6667 napi
->list_owner
= -1;
6668 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
6669 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
6670 list_add_rcu(&napi
->dev_list
, &dev
->napi_list
);
6671 napi_hash_add(napi
);
6672 napi_get_frags_check(napi
);
6673 /* Create kthread for this napi if dev->threaded is set.
6674 * Clear dev->threaded if kthread creation failed so that
6675 * threaded mode will not be enabled in napi_enable().
6677 if (dev
->threaded
&& napi_kthread_create(napi
))
6678 dev
->threaded
= false;
6679 netif_napi_set_irq(napi
, -1);
6681 EXPORT_SYMBOL(netif_napi_add_weight
);
6683 void napi_disable(struct napi_struct
*n
)
6685 unsigned long val
, new;
6688 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
6690 val
= READ_ONCE(n
->state
);
6692 while (val
& (NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
)) {
6693 usleep_range(20, 200);
6694 val
= READ_ONCE(n
->state
);
6697 new = val
| NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
;
6698 new &= ~(NAPIF_STATE_THREADED
| NAPIF_STATE_PREFER_BUSY_POLL
);
6699 } while (!try_cmpxchg(&n
->state
, &val
, new));
6701 hrtimer_cancel(&n
->timer
);
6703 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
6705 EXPORT_SYMBOL(napi_disable
);
6708 * napi_enable - enable NAPI scheduling
6711 * Resume NAPI from being scheduled on this context.
6712 * Must be paired with napi_disable.
6714 void napi_enable(struct napi_struct
*n
)
6716 unsigned long new, val
= READ_ONCE(n
->state
);
6719 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &val
));
6721 new = val
& ~(NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
);
6722 if (n
->dev
->threaded
&& n
->thread
)
6723 new |= NAPIF_STATE_THREADED
;
6724 } while (!try_cmpxchg(&n
->state
, &val
, new));
6726 EXPORT_SYMBOL(napi_enable
);
6728 static void flush_gro_hash(struct napi_struct
*napi
)
6732 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6733 struct sk_buff
*skb
, *n
;
6735 list_for_each_entry_safe(skb
, n
, &napi
->gro_hash
[i
].list
, list
)
6737 napi
->gro_hash
[i
].count
= 0;
6741 /* Must be called in process context */
6742 void __netif_napi_del(struct napi_struct
*napi
)
6744 if (!test_and_clear_bit(NAPI_STATE_LISTED
, &napi
->state
))
6747 napi_hash_del(napi
);
6748 list_del_rcu(&napi
->dev_list
);
6749 napi_free_frags(napi
);
6751 flush_gro_hash(napi
);
6752 napi
->gro_bitmask
= 0;
6755 kthread_stop(napi
->thread
);
6756 napi
->thread
= NULL
;
6759 EXPORT_SYMBOL(__netif_napi_del
);
6761 static int __napi_poll(struct napi_struct
*n
, bool *repoll
)
6767 /* This NAPI_STATE_SCHED test is for avoiding a race
6768 * with netpoll's poll_napi(). Only the entity which
6769 * obtains the lock and sees NAPI_STATE_SCHED set will
6770 * actually make the ->poll() call. Therefore we avoid
6771 * accidentally calling ->poll() when NAPI is not scheduled.
6774 if (napi_is_scheduled(n
)) {
6775 work
= n
->poll(n
, weight
);
6776 trace_napi_poll(n
, work
, weight
);
6778 xdp_do_check_flushed(n
);
6781 if (unlikely(work
> weight
))
6782 netdev_err_once(n
->dev
, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6783 n
->poll
, work
, weight
);
6785 if (likely(work
< weight
))
6788 /* Drivers must not modify the NAPI state if they
6789 * consume the entire weight. In such cases this code
6790 * still "owns" the NAPI instance and therefore can
6791 * move the instance around on the list at-will.
6793 if (unlikely(napi_disable_pending(n
))) {
6798 /* The NAPI context has more processing work, but busy-polling
6799 * is preferred. Exit early.
6801 if (napi_prefer_busy_poll(n
)) {
6802 if (napi_complete_done(n
, work
)) {
6803 /* If timeout is not set, we need to make sure
6804 * that the NAPI is re-scheduled.
6811 if (n
->gro_bitmask
) {
6812 /* flush too old packets
6813 * If HZ < 1000, flush all packets.
6815 napi_gro_flush(n
, HZ
>= 1000);
6820 /* Some drivers may have called napi_schedule
6821 * prior to exhausting their budget.
6823 if (unlikely(!list_empty(&n
->poll_list
))) {
6824 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6825 n
->dev
? n
->dev
->name
: "backlog");
6834 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
6836 bool do_repoll
= false;
6840 list_del_init(&n
->poll_list
);
6842 have
= netpoll_poll_lock(n
);
6844 work
= __napi_poll(n
, &do_repoll
);
6847 list_add_tail(&n
->poll_list
, repoll
);
6849 netpoll_poll_unlock(have
);
6854 static int napi_thread_wait(struct napi_struct
*napi
)
6856 set_current_state(TASK_INTERRUPTIBLE
);
6858 while (!kthread_should_stop()) {
6859 /* Testing SCHED_THREADED bit here to make sure the current
6860 * kthread owns this napi and could poll on this napi.
6861 * Testing SCHED bit is not enough because SCHED bit might be
6862 * set by some other busy poll thread or by napi_disable().
6864 if (test_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
)) {
6865 WARN_ON(!list_empty(&napi
->poll_list
));
6866 __set_current_state(TASK_RUNNING
);
6871 set_current_state(TASK_INTERRUPTIBLE
);
6873 __set_current_state(TASK_RUNNING
);
6878 static void napi_threaded_poll_loop(struct napi_struct
*napi
)
6880 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
6881 struct softnet_data
*sd
;
6882 unsigned long last_qs
= jiffies
;
6885 bool repoll
= false;
6889 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
6891 sd
= this_cpu_ptr(&softnet_data
);
6892 sd
->in_napi_threaded_poll
= true;
6894 have
= netpoll_poll_lock(napi
);
6895 __napi_poll(napi
, &repoll
);
6896 netpoll_poll_unlock(have
);
6898 sd
->in_napi_threaded_poll
= false;
6901 if (sd_has_rps_ipi_waiting(sd
)) {
6902 local_irq_disable();
6903 net_rps_action_and_irq_enable(sd
);
6905 skb_defer_free_flush(sd
);
6906 bpf_net_ctx_clear(bpf_net_ctx
);
6912 rcu_softirq_qs_periodic(last_qs
);
6917 static int napi_threaded_poll(void *data
)
6919 struct napi_struct
*napi
= data
;
6921 while (!napi_thread_wait(napi
))
6922 napi_threaded_poll_loop(napi
);
6927 static __latent_entropy
void net_rx_action(void)
6929 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
6930 unsigned long time_limit
= jiffies
+
6931 usecs_to_jiffies(READ_ONCE(net_hotdata
.netdev_budget_usecs
));
6932 struct bpf_net_context __bpf_net_ctx
, *bpf_net_ctx
;
6933 int budget
= READ_ONCE(net_hotdata
.netdev_budget
);
6937 bpf_net_ctx
= bpf_net_ctx_set(&__bpf_net_ctx
);
6939 sd
->in_net_rx_action
= true;
6940 local_irq_disable();
6941 list_splice_init(&sd
->poll_list
, &list
);
6945 struct napi_struct
*n
;
6947 skb_defer_free_flush(sd
);
6949 if (list_empty(&list
)) {
6950 if (list_empty(&repoll
)) {
6951 sd
->in_net_rx_action
= false;
6953 /* We need to check if ____napi_schedule()
6954 * had refilled poll_list while
6955 * sd->in_net_rx_action was true.
6957 if (!list_empty(&sd
->poll_list
))
6959 if (!sd_has_rps_ipi_waiting(sd
))
6965 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
6966 budget
-= napi_poll(n
, &repoll
);
6968 /* If softirq window is exhausted then punt.
6969 * Allow this to run for 2 jiffies since which will allow
6970 * an average latency of 1.5/HZ.
6972 if (unlikely(budget
<= 0 ||
6973 time_after_eq(jiffies
, time_limit
))) {
6979 local_irq_disable();
6981 list_splice_tail_init(&sd
->poll_list
, &list
);
6982 list_splice_tail(&repoll
, &list
);
6983 list_splice(&list
, &sd
->poll_list
);
6984 if (!list_empty(&sd
->poll_list
))
6985 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
6987 sd
->in_net_rx_action
= false;
6989 net_rps_action_and_irq_enable(sd
);
6991 bpf_net_ctx_clear(bpf_net_ctx
);
6994 struct netdev_adjacent
{
6995 struct net_device
*dev
;
6996 netdevice_tracker dev_tracker
;
6998 /* upper master flag, there can only be one master device per list */
7001 /* lookup ignore flag */
7004 /* counter for the number of times this device was added to us */
7007 /* private field for the users */
7010 struct list_head list
;
7011 struct rcu_head rcu
;
7014 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
7015 struct list_head
*adj_list
)
7017 struct netdev_adjacent
*adj
;
7019 list_for_each_entry(adj
, adj_list
, list
) {
7020 if (adj
->dev
== adj_dev
)
7026 static int ____netdev_has_upper_dev(struct net_device
*upper_dev
,
7027 struct netdev_nested_priv
*priv
)
7029 struct net_device
*dev
= (struct net_device
*)priv
->data
;
7031 return upper_dev
== dev
;
7035 * netdev_has_upper_dev - Check if device is linked to an upper device
7037 * @upper_dev: upper device to check
7039 * Find out if a device is linked to specified upper device and return true
7040 * in case it is. Note that this checks only immediate upper device,
7041 * not through a complete stack of devices. The caller must hold the RTNL lock.
7043 bool netdev_has_upper_dev(struct net_device
*dev
,
7044 struct net_device
*upper_dev
)
7046 struct netdev_nested_priv priv
= {
7047 .data
= (void *)upper_dev
,
7052 return netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
7055 EXPORT_SYMBOL(netdev_has_upper_dev
);
7058 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
7060 * @upper_dev: upper device to check
7062 * Find out if a device is linked to specified upper device and return true
7063 * in case it is. Note that this checks the entire upper device chain.
7064 * The caller must hold rcu lock.
7067 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
7068 struct net_device
*upper_dev
)
7070 struct netdev_nested_priv priv
= {
7071 .data
= (void *)upper_dev
,
7074 return !!netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
7077 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
7080 * netdev_has_any_upper_dev - Check if device is linked to some device
7083 * Find out if a device is linked to an upper device and return true in case
7084 * it is. The caller must hold the RTNL lock.
7086 bool netdev_has_any_upper_dev(struct net_device
*dev
)
7090 return !list_empty(&dev
->adj_list
.upper
);
7092 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
7095 * netdev_master_upper_dev_get - Get master upper device
7098 * Find a master upper device and return pointer to it or NULL in case
7099 * it's not there. The caller must hold the RTNL lock.
7101 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
7103 struct netdev_adjacent
*upper
;
7107 if (list_empty(&dev
->adj_list
.upper
))
7110 upper
= list_first_entry(&dev
->adj_list
.upper
,
7111 struct netdev_adjacent
, list
);
7112 if (likely(upper
->master
))
7116 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
7118 static struct net_device
*__netdev_master_upper_dev_get(struct net_device
*dev
)
7120 struct netdev_adjacent
*upper
;
7124 if (list_empty(&dev
->adj_list
.upper
))
7127 upper
= list_first_entry(&dev
->adj_list
.upper
,
7128 struct netdev_adjacent
, list
);
7129 if (likely(upper
->master
) && !upper
->ignore
)
7135 * netdev_has_any_lower_dev - Check if device is linked to some device
7138 * Find out if a device is linked to a lower device and return true in case
7139 * it is. The caller must hold the RTNL lock.
7141 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
7145 return !list_empty(&dev
->adj_list
.lower
);
7148 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
7150 struct netdev_adjacent
*adj
;
7152 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
7154 return adj
->private;
7156 EXPORT_SYMBOL(netdev_adjacent_get_private
);
7159 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7161 * @iter: list_head ** of the current position
7163 * Gets the next device from the dev's upper list, starting from iter
7164 * position. The caller must hold RCU read lock.
7166 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
7167 struct list_head
**iter
)
7169 struct netdev_adjacent
*upper
;
7171 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7173 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7175 if (&upper
->list
== &dev
->adj_list
.upper
)
7178 *iter
= &upper
->list
;
7182 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
7184 static struct net_device
*__netdev_next_upper_dev(struct net_device
*dev
,
7185 struct list_head
**iter
,
7188 struct netdev_adjacent
*upper
;
7190 upper
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7192 if (&upper
->list
== &dev
->adj_list
.upper
)
7195 *iter
= &upper
->list
;
7196 *ignore
= upper
->ignore
;
7201 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
7202 struct list_head
**iter
)
7204 struct netdev_adjacent
*upper
;
7206 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7208 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7210 if (&upper
->list
== &dev
->adj_list
.upper
)
7213 *iter
= &upper
->list
;
7218 static int __netdev_walk_all_upper_dev(struct net_device
*dev
,
7219 int (*fn
)(struct net_device
*dev
,
7220 struct netdev_nested_priv
*priv
),
7221 struct netdev_nested_priv
*priv
)
7223 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7224 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7229 iter
= &dev
->adj_list
.upper
;
7233 ret
= fn(now
, priv
);
7240 udev
= __netdev_next_upper_dev(now
, &iter
, &ignore
);
7247 niter
= &udev
->adj_list
.upper
;
7248 dev_stack
[cur
] = now
;
7249 iter_stack
[cur
++] = iter
;
7256 next
= dev_stack
[--cur
];
7257 niter
= iter_stack
[cur
];
7267 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
7268 int (*fn
)(struct net_device
*dev
,
7269 struct netdev_nested_priv
*priv
),
7270 struct netdev_nested_priv
*priv
)
7272 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7273 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7277 iter
= &dev
->adj_list
.upper
;
7281 ret
= fn(now
, priv
);
7288 udev
= netdev_next_upper_dev_rcu(now
, &iter
);
7293 niter
= &udev
->adj_list
.upper
;
7294 dev_stack
[cur
] = now
;
7295 iter_stack
[cur
++] = iter
;
7302 next
= dev_stack
[--cur
];
7303 niter
= iter_stack
[cur
];
7312 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
7314 static bool __netdev_has_upper_dev(struct net_device
*dev
,
7315 struct net_device
*upper_dev
)
7317 struct netdev_nested_priv priv
= {
7319 .data
= (void *)upper_dev
,
7324 return __netdev_walk_all_upper_dev(dev
, ____netdev_has_upper_dev
,
7329 * netdev_lower_get_next_private - Get the next ->private from the
7330 * lower neighbour list
7332 * @iter: list_head ** of the current position
7334 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7335 * list, starting from iter position. The caller must hold either hold the
7336 * RTNL lock or its own locking that guarantees that the neighbour lower
7337 * list will remain unchanged.
7339 void *netdev_lower_get_next_private(struct net_device
*dev
,
7340 struct list_head
**iter
)
7342 struct netdev_adjacent
*lower
;
7344 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
7346 if (&lower
->list
== &dev
->adj_list
.lower
)
7349 *iter
= lower
->list
.next
;
7351 return lower
->private;
7353 EXPORT_SYMBOL(netdev_lower_get_next_private
);
7356 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7357 * lower neighbour list, RCU
7360 * @iter: list_head ** of the current position
7362 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7363 * list, starting from iter position. The caller must hold RCU read lock.
7365 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
7366 struct list_head
**iter
)
7368 struct netdev_adjacent
*lower
;
7370 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7372 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7374 if (&lower
->list
== &dev
->adj_list
.lower
)
7377 *iter
= &lower
->list
;
7379 return lower
->private;
7381 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
7384 * netdev_lower_get_next - Get the next device from the lower neighbour
7387 * @iter: list_head ** of the current position
7389 * Gets the next netdev_adjacent from the dev's lower neighbour
7390 * list, starting from iter position. The caller must hold RTNL lock or
7391 * its own locking that guarantees that the neighbour lower
7392 * list will remain unchanged.
7394 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
7396 struct netdev_adjacent
*lower
;
7398 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
7400 if (&lower
->list
== &dev
->adj_list
.lower
)
7403 *iter
= lower
->list
.next
;
7407 EXPORT_SYMBOL(netdev_lower_get_next
);
7409 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
7410 struct list_head
**iter
)
7412 struct netdev_adjacent
*lower
;
7414 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7416 if (&lower
->list
== &dev
->adj_list
.lower
)
7419 *iter
= &lower
->list
;
7424 static struct net_device
*__netdev_next_lower_dev(struct net_device
*dev
,
7425 struct list_head
**iter
,
7428 struct netdev_adjacent
*lower
;
7430 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7432 if (&lower
->list
== &dev
->adj_list
.lower
)
7435 *iter
= &lower
->list
;
7436 *ignore
= lower
->ignore
;
7441 int netdev_walk_all_lower_dev(struct net_device
*dev
,
7442 int (*fn
)(struct net_device
*dev
,
7443 struct netdev_nested_priv
*priv
),
7444 struct netdev_nested_priv
*priv
)
7446 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7447 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7451 iter
= &dev
->adj_list
.lower
;
7455 ret
= fn(now
, priv
);
7462 ldev
= netdev_next_lower_dev(now
, &iter
);
7467 niter
= &ldev
->adj_list
.lower
;
7468 dev_stack
[cur
] = now
;
7469 iter_stack
[cur
++] = iter
;
7476 next
= dev_stack
[--cur
];
7477 niter
= iter_stack
[cur
];
7486 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
7488 static int __netdev_walk_all_lower_dev(struct net_device
*dev
,
7489 int (*fn
)(struct net_device
*dev
,
7490 struct netdev_nested_priv
*priv
),
7491 struct netdev_nested_priv
*priv
)
7493 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7494 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7499 iter
= &dev
->adj_list
.lower
;
7503 ret
= fn(now
, priv
);
7510 ldev
= __netdev_next_lower_dev(now
, &iter
, &ignore
);
7517 niter
= &ldev
->adj_list
.lower
;
7518 dev_stack
[cur
] = now
;
7519 iter_stack
[cur
++] = iter
;
7526 next
= dev_stack
[--cur
];
7527 niter
= iter_stack
[cur
];
7537 struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
7538 struct list_head
**iter
)
7540 struct netdev_adjacent
*lower
;
7542 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7543 if (&lower
->list
== &dev
->adj_list
.lower
)
7546 *iter
= &lower
->list
;
7550 EXPORT_SYMBOL(netdev_next_lower_dev_rcu
);
7552 static u8
__netdev_upper_depth(struct net_device
*dev
)
7554 struct net_device
*udev
;
7555 struct list_head
*iter
;
7559 for (iter
= &dev
->adj_list
.upper
,
7560 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
);
7562 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
)) {
7565 if (max_depth
< udev
->upper_level
)
7566 max_depth
= udev
->upper_level
;
7572 static u8
__netdev_lower_depth(struct net_device
*dev
)
7574 struct net_device
*ldev
;
7575 struct list_head
*iter
;
7579 for (iter
= &dev
->adj_list
.lower
,
7580 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
);
7582 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
)) {
7585 if (max_depth
< ldev
->lower_level
)
7586 max_depth
= ldev
->lower_level
;
7592 static int __netdev_update_upper_level(struct net_device
*dev
,
7593 struct netdev_nested_priv
*__unused
)
7595 dev
->upper_level
= __netdev_upper_depth(dev
) + 1;
7599 #ifdef CONFIG_LOCKDEP
7600 static LIST_HEAD(net_unlink_list
);
7602 static void net_unlink_todo(struct net_device
*dev
)
7604 if (list_empty(&dev
->unlink_list
))
7605 list_add_tail(&dev
->unlink_list
, &net_unlink_list
);
7609 static int __netdev_update_lower_level(struct net_device
*dev
,
7610 struct netdev_nested_priv
*priv
)
7612 dev
->lower_level
= __netdev_lower_depth(dev
) + 1;
7614 #ifdef CONFIG_LOCKDEP
7618 if (priv
->flags
& NESTED_SYNC_IMM
)
7619 dev
->nested_level
= dev
->lower_level
- 1;
7620 if (priv
->flags
& NESTED_SYNC_TODO
)
7621 net_unlink_todo(dev
);
7626 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
7627 int (*fn
)(struct net_device
*dev
,
7628 struct netdev_nested_priv
*priv
),
7629 struct netdev_nested_priv
*priv
)
7631 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7632 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7636 iter
= &dev
->adj_list
.lower
;
7640 ret
= fn(now
, priv
);
7647 ldev
= netdev_next_lower_dev_rcu(now
, &iter
);
7652 niter
= &ldev
->adj_list
.lower
;
7653 dev_stack
[cur
] = now
;
7654 iter_stack
[cur
++] = iter
;
7661 next
= dev_stack
[--cur
];
7662 niter
= iter_stack
[cur
];
7671 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
7674 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7675 * lower neighbour list, RCU
7679 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7680 * list. The caller must hold RCU read lock.
7682 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
7684 struct netdev_adjacent
*lower
;
7686 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
7687 struct netdev_adjacent
, list
);
7689 return lower
->private;
7692 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
7695 * netdev_master_upper_dev_get_rcu - Get master upper device
7698 * Find a master upper device and return pointer to it or NULL in case
7699 * it's not there. The caller must hold the RCU read lock.
7701 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
7703 struct netdev_adjacent
*upper
;
7705 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
7706 struct netdev_adjacent
, list
);
7707 if (upper
&& likely(upper
->master
))
7711 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
7713 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
7714 struct net_device
*adj_dev
,
7715 struct list_head
*dev_list
)
7717 char linkname
[IFNAMSIZ
+7];
7719 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7720 "upper_%s" : "lower_%s", adj_dev
->name
);
7721 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
7724 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
7726 struct list_head
*dev_list
)
7728 char linkname
[IFNAMSIZ
+7];
7730 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7731 "upper_%s" : "lower_%s", name
);
7732 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
7735 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
7736 struct net_device
*adj_dev
,
7737 struct list_head
*dev_list
)
7739 return (dev_list
== &dev
->adj_list
.upper
||
7740 dev_list
== &dev
->adj_list
.lower
) &&
7741 net_eq(dev_net(dev
), dev_net(adj_dev
));
7744 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
7745 struct net_device
*adj_dev
,
7746 struct list_head
*dev_list
,
7747 void *private, bool master
)
7749 struct netdev_adjacent
*adj
;
7752 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7756 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7757 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
7762 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
7767 adj
->master
= master
;
7769 adj
->private = private;
7770 adj
->ignore
= false;
7771 netdev_hold(adj_dev
, &adj
->dev_tracker
, GFP_KERNEL
);
7773 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7774 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
7776 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
7777 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
7782 /* Ensure that master link is always the first item in list. */
7784 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
7785 &(adj_dev
->dev
.kobj
), "master");
7787 goto remove_symlinks
;
7789 list_add_rcu(&adj
->list
, dev_list
);
7791 list_add_tail_rcu(&adj
->list
, dev_list
);
7797 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7798 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7800 netdev_put(adj_dev
, &adj
->dev_tracker
);
7806 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
7807 struct net_device
*adj_dev
,
7809 struct list_head
*dev_list
)
7811 struct netdev_adjacent
*adj
;
7813 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7814 dev
->name
, adj_dev
->name
, ref_nr
);
7816 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7819 pr_err("Adjacency does not exist for device %s from %s\n",
7820 dev
->name
, adj_dev
->name
);
7825 if (adj
->ref_nr
> ref_nr
) {
7826 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7827 dev
->name
, adj_dev
->name
, ref_nr
,
7828 adj
->ref_nr
- ref_nr
);
7829 adj
->ref_nr
-= ref_nr
;
7834 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
7836 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7837 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7839 list_del_rcu(&adj
->list
);
7840 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7841 adj_dev
->name
, dev
->name
, adj_dev
->name
);
7842 netdev_put(adj_dev
, &adj
->dev_tracker
);
7843 kfree_rcu(adj
, rcu
);
7846 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
7847 struct net_device
*upper_dev
,
7848 struct list_head
*up_list
,
7849 struct list_head
*down_list
,
7850 void *private, bool master
)
7854 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
7859 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
7862 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
7869 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
7870 struct net_device
*upper_dev
,
7872 struct list_head
*up_list
,
7873 struct list_head
*down_list
)
7875 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
7876 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
7879 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
7880 struct net_device
*upper_dev
,
7881 void *private, bool master
)
7883 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
7884 &dev
->adj_list
.upper
,
7885 &upper_dev
->adj_list
.lower
,
7889 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
7890 struct net_device
*upper_dev
)
7892 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
7893 &dev
->adj_list
.upper
,
7894 &upper_dev
->adj_list
.lower
);
7897 static int __netdev_upper_dev_link(struct net_device
*dev
,
7898 struct net_device
*upper_dev
, bool master
,
7899 void *upper_priv
, void *upper_info
,
7900 struct netdev_nested_priv
*priv
,
7901 struct netlink_ext_ack
*extack
)
7903 struct netdev_notifier_changeupper_info changeupper_info
= {
7908 .upper_dev
= upper_dev
,
7911 .upper_info
= upper_info
,
7913 struct net_device
*master_dev
;
7918 if (dev
== upper_dev
)
7921 /* To prevent loops, check if dev is not upper device to upper_dev. */
7922 if (__netdev_has_upper_dev(upper_dev
, dev
))
7925 if ((dev
->lower_level
+ upper_dev
->upper_level
) > MAX_NEST_DEV
)
7929 if (__netdev_has_upper_dev(dev
, upper_dev
))
7932 master_dev
= __netdev_master_upper_dev_get(dev
);
7934 return master_dev
== upper_dev
? -EEXIST
: -EBUSY
;
7937 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
7938 &changeupper_info
.info
);
7939 ret
= notifier_to_errno(ret
);
7943 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
7948 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
7949 &changeupper_info
.info
);
7950 ret
= notifier_to_errno(ret
);
7954 __netdev_update_upper_level(dev
, NULL
);
7955 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
7957 __netdev_update_lower_level(upper_dev
, priv
);
7958 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
7964 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
7970 * netdev_upper_dev_link - Add a link to the upper device
7972 * @upper_dev: new upper device
7973 * @extack: netlink extended ack
7975 * Adds a link to device which is upper to this one. The caller must hold
7976 * the RTNL lock. On a failure a negative errno code is returned.
7977 * On success the reference counts are adjusted and the function
7980 int netdev_upper_dev_link(struct net_device
*dev
,
7981 struct net_device
*upper_dev
,
7982 struct netlink_ext_ack
*extack
)
7984 struct netdev_nested_priv priv
= {
7985 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
7989 return __netdev_upper_dev_link(dev
, upper_dev
, false,
7990 NULL
, NULL
, &priv
, extack
);
7992 EXPORT_SYMBOL(netdev_upper_dev_link
);
7995 * netdev_master_upper_dev_link - Add a master link to the upper device
7997 * @upper_dev: new upper device
7998 * @upper_priv: upper device private
7999 * @upper_info: upper info to be passed down via notifier
8000 * @extack: netlink extended ack
8002 * Adds a link to device which is upper to this one. In this case, only
8003 * one master upper device can be linked, although other non-master devices
8004 * might be linked as well. The caller must hold the RTNL lock.
8005 * On a failure a negative errno code is returned. On success the reference
8006 * counts are adjusted and the function returns zero.
8008 int netdev_master_upper_dev_link(struct net_device
*dev
,
8009 struct net_device
*upper_dev
,
8010 void *upper_priv
, void *upper_info
,
8011 struct netlink_ext_ack
*extack
)
8013 struct netdev_nested_priv priv
= {
8014 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
8018 return __netdev_upper_dev_link(dev
, upper_dev
, true,
8019 upper_priv
, upper_info
, &priv
, extack
);
8021 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
8023 static void __netdev_upper_dev_unlink(struct net_device
*dev
,
8024 struct net_device
*upper_dev
,
8025 struct netdev_nested_priv
*priv
)
8027 struct netdev_notifier_changeupper_info changeupper_info
= {
8031 .upper_dev
= upper_dev
,
8037 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
8039 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
8040 &changeupper_info
.info
);
8042 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
8044 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
8045 &changeupper_info
.info
);
8047 __netdev_update_upper_level(dev
, NULL
);
8048 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
8050 __netdev_update_lower_level(upper_dev
, priv
);
8051 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
8056 * netdev_upper_dev_unlink - Removes a link to upper device
8058 * @upper_dev: new upper device
8060 * Removes a link to device which is upper to this one. The caller must hold
8063 void netdev_upper_dev_unlink(struct net_device
*dev
,
8064 struct net_device
*upper_dev
)
8066 struct netdev_nested_priv priv
= {
8067 .flags
= NESTED_SYNC_TODO
,
8071 __netdev_upper_dev_unlink(dev
, upper_dev
, &priv
);
8073 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
8075 static void __netdev_adjacent_dev_set(struct net_device
*upper_dev
,
8076 struct net_device
*lower_dev
,
8079 struct netdev_adjacent
*adj
;
8081 adj
= __netdev_find_adj(lower_dev
, &upper_dev
->adj_list
.lower
);
8085 adj
= __netdev_find_adj(upper_dev
, &lower_dev
->adj_list
.upper
);
8090 static void netdev_adjacent_dev_disable(struct net_device
*upper_dev
,
8091 struct net_device
*lower_dev
)
8093 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, true);
8096 static void netdev_adjacent_dev_enable(struct net_device
*upper_dev
,
8097 struct net_device
*lower_dev
)
8099 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, false);
8102 int netdev_adjacent_change_prepare(struct net_device
*old_dev
,
8103 struct net_device
*new_dev
,
8104 struct net_device
*dev
,
8105 struct netlink_ext_ack
*extack
)
8107 struct netdev_nested_priv priv
= {
8116 if (old_dev
&& new_dev
!= old_dev
)
8117 netdev_adjacent_dev_disable(dev
, old_dev
);
8118 err
= __netdev_upper_dev_link(new_dev
, dev
, false, NULL
, NULL
, &priv
,
8121 if (old_dev
&& new_dev
!= old_dev
)
8122 netdev_adjacent_dev_enable(dev
, old_dev
);
8128 EXPORT_SYMBOL(netdev_adjacent_change_prepare
);
8130 void netdev_adjacent_change_commit(struct net_device
*old_dev
,
8131 struct net_device
*new_dev
,
8132 struct net_device
*dev
)
8134 struct netdev_nested_priv priv
= {
8135 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
8139 if (!new_dev
|| !old_dev
)
8142 if (new_dev
== old_dev
)
8145 netdev_adjacent_dev_enable(dev
, old_dev
);
8146 __netdev_upper_dev_unlink(old_dev
, dev
, &priv
);
8148 EXPORT_SYMBOL(netdev_adjacent_change_commit
);
8150 void netdev_adjacent_change_abort(struct net_device
*old_dev
,
8151 struct net_device
*new_dev
,
8152 struct net_device
*dev
)
8154 struct netdev_nested_priv priv
= {
8162 if (old_dev
&& new_dev
!= old_dev
)
8163 netdev_adjacent_dev_enable(dev
, old_dev
);
8165 __netdev_upper_dev_unlink(new_dev
, dev
, &priv
);
8167 EXPORT_SYMBOL(netdev_adjacent_change_abort
);
8170 * netdev_bonding_info_change - Dispatch event about slave change
8172 * @bonding_info: info to dispatch
8174 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8175 * The caller must hold the RTNL lock.
8177 void netdev_bonding_info_change(struct net_device
*dev
,
8178 struct netdev_bonding_info
*bonding_info
)
8180 struct netdev_notifier_bonding_info info
= {
8184 memcpy(&info
.bonding_info
, bonding_info
,
8185 sizeof(struct netdev_bonding_info
));
8186 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
8189 EXPORT_SYMBOL(netdev_bonding_info_change
);
8191 static int netdev_offload_xstats_enable_l3(struct net_device
*dev
,
8192 struct netlink_ext_ack
*extack
)
8194 struct netdev_notifier_offload_xstats_info info
= {
8196 .info
.extack
= extack
,
8197 .type
= NETDEV_OFFLOAD_XSTATS_TYPE_L3
,
8202 dev
->offload_xstats_l3
= kzalloc(sizeof(*dev
->offload_xstats_l3
),
8204 if (!dev
->offload_xstats_l3
)
8207 rc
= call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE
,
8208 NETDEV_OFFLOAD_XSTATS_DISABLE
,
8210 err
= notifier_to_errno(rc
);
8217 kfree(dev
->offload_xstats_l3
);
8218 dev
->offload_xstats_l3
= NULL
;
8222 int netdev_offload_xstats_enable(struct net_device
*dev
,
8223 enum netdev_offload_xstats_type type
,
8224 struct netlink_ext_ack
*extack
)
8228 if (netdev_offload_xstats_enabled(dev
, type
))
8232 case NETDEV_OFFLOAD_XSTATS_TYPE_L3
:
8233 return netdev_offload_xstats_enable_l3(dev
, extack
);
8239 EXPORT_SYMBOL(netdev_offload_xstats_enable
);
8241 static void netdev_offload_xstats_disable_l3(struct net_device
*dev
)
8243 struct netdev_notifier_offload_xstats_info info
= {
8245 .type
= NETDEV_OFFLOAD_XSTATS_TYPE_L3
,
8248 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE
,
8250 kfree(dev
->offload_xstats_l3
);
8251 dev
->offload_xstats_l3
= NULL
;
8254 int netdev_offload_xstats_disable(struct net_device
*dev
,
8255 enum netdev_offload_xstats_type type
)
8259 if (!netdev_offload_xstats_enabled(dev
, type
))
8263 case NETDEV_OFFLOAD_XSTATS_TYPE_L3
:
8264 netdev_offload_xstats_disable_l3(dev
);
8271 EXPORT_SYMBOL(netdev_offload_xstats_disable
);
8273 static void netdev_offload_xstats_disable_all(struct net_device
*dev
)
8275 netdev_offload_xstats_disable(dev
, NETDEV_OFFLOAD_XSTATS_TYPE_L3
);
8278 static struct rtnl_hw_stats64
*
8279 netdev_offload_xstats_get_ptr(const struct net_device
*dev
,
8280 enum netdev_offload_xstats_type type
)
8283 case NETDEV_OFFLOAD_XSTATS_TYPE_L3
:
8284 return dev
->offload_xstats_l3
;
8291 bool netdev_offload_xstats_enabled(const struct net_device
*dev
,
8292 enum netdev_offload_xstats_type type
)
8296 return netdev_offload_xstats_get_ptr(dev
, type
);
8298 EXPORT_SYMBOL(netdev_offload_xstats_enabled
);
8300 struct netdev_notifier_offload_xstats_ru
{
8304 struct netdev_notifier_offload_xstats_rd
{
8305 struct rtnl_hw_stats64 stats
;
8309 static void netdev_hw_stats64_add(struct rtnl_hw_stats64
*dest
,
8310 const struct rtnl_hw_stats64
*src
)
8312 dest
->rx_packets
+= src
->rx_packets
;
8313 dest
->tx_packets
+= src
->tx_packets
;
8314 dest
->rx_bytes
+= src
->rx_bytes
;
8315 dest
->tx_bytes
+= src
->tx_bytes
;
8316 dest
->rx_errors
+= src
->rx_errors
;
8317 dest
->tx_errors
+= src
->tx_errors
;
8318 dest
->rx_dropped
+= src
->rx_dropped
;
8319 dest
->tx_dropped
+= src
->tx_dropped
;
8320 dest
->multicast
+= src
->multicast
;
8323 static int netdev_offload_xstats_get_used(struct net_device
*dev
,
8324 enum netdev_offload_xstats_type type
,
8326 struct netlink_ext_ack
*extack
)
8328 struct netdev_notifier_offload_xstats_ru report_used
= {};
8329 struct netdev_notifier_offload_xstats_info info
= {
8331 .info
.extack
= extack
,
8333 .report_used
= &report_used
,
8337 WARN_ON(!netdev_offload_xstats_enabled(dev
, type
));
8338 rc
= call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED
,
8340 *p_used
= report_used
.used
;
8341 return notifier_to_errno(rc
);
8344 static int netdev_offload_xstats_get_stats(struct net_device
*dev
,
8345 enum netdev_offload_xstats_type type
,
8346 struct rtnl_hw_stats64
*p_stats
,
8348 struct netlink_ext_ack
*extack
)
8350 struct netdev_notifier_offload_xstats_rd report_delta
= {};
8351 struct netdev_notifier_offload_xstats_info info
= {
8353 .info
.extack
= extack
,
8355 .report_delta
= &report_delta
,
8357 struct rtnl_hw_stats64
*stats
;
8360 stats
= netdev_offload_xstats_get_ptr(dev
, type
);
8361 if (WARN_ON(!stats
))
8364 rc
= call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA
,
8367 /* Cache whatever we got, even if there was an error, otherwise the
8368 * successful stats retrievals would get lost.
8370 netdev_hw_stats64_add(stats
, &report_delta
.stats
);
8374 *p_used
= report_delta
.used
;
8376 return notifier_to_errno(rc
);
8379 int netdev_offload_xstats_get(struct net_device
*dev
,
8380 enum netdev_offload_xstats_type type
,
8381 struct rtnl_hw_stats64
*p_stats
, bool *p_used
,
8382 struct netlink_ext_ack
*extack
)
8387 return netdev_offload_xstats_get_stats(dev
, type
, p_stats
,
8390 return netdev_offload_xstats_get_used(dev
, type
, p_used
,
8393 EXPORT_SYMBOL(netdev_offload_xstats_get
);
8396 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd
*report_delta
,
8397 const struct rtnl_hw_stats64
*stats
)
8399 report_delta
->used
= true;
8400 netdev_hw_stats64_add(&report_delta
->stats
, stats
);
8402 EXPORT_SYMBOL(netdev_offload_xstats_report_delta
);
8405 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru
*report_used
)
8407 report_used
->used
= true;
8409 EXPORT_SYMBOL(netdev_offload_xstats_report_used
);
8411 void netdev_offload_xstats_push_delta(struct net_device
*dev
,
8412 enum netdev_offload_xstats_type type
,
8413 const struct rtnl_hw_stats64
*p_stats
)
8415 struct rtnl_hw_stats64
*stats
;
8419 stats
= netdev_offload_xstats_get_ptr(dev
, type
);
8420 if (WARN_ON(!stats
))
8423 netdev_hw_stats64_add(stats
, p_stats
);
8425 EXPORT_SYMBOL(netdev_offload_xstats_push_delta
);
8428 * netdev_get_xmit_slave - Get the xmit slave of master device
8431 * @all_slaves: assume all the slaves are active
8433 * The reference counters are not incremented so the caller must be
8434 * careful with locks. The caller must hold RCU lock.
8435 * %NULL is returned if no slave is found.
8438 struct net_device
*netdev_get_xmit_slave(struct net_device
*dev
,
8439 struct sk_buff
*skb
,
8442 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8444 if (!ops
->ndo_get_xmit_slave
)
8446 return ops
->ndo_get_xmit_slave(dev
, skb
, all_slaves
);
8448 EXPORT_SYMBOL(netdev_get_xmit_slave
);
8450 static struct net_device
*netdev_sk_get_lower_dev(struct net_device
*dev
,
8453 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8455 if (!ops
->ndo_sk_get_lower_dev
)
8457 return ops
->ndo_sk_get_lower_dev(dev
, sk
);
8461 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8465 * %NULL is returned if no lower device is found.
8468 struct net_device
*netdev_sk_get_lowest_dev(struct net_device
*dev
,
8471 struct net_device
*lower
;
8473 lower
= netdev_sk_get_lower_dev(dev
, sk
);
8476 lower
= netdev_sk_get_lower_dev(dev
, sk
);
8481 EXPORT_SYMBOL(netdev_sk_get_lowest_dev
);
8483 static void netdev_adjacent_add_links(struct net_device
*dev
)
8485 struct netdev_adjacent
*iter
;
8487 struct net
*net
= dev_net(dev
);
8489 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8490 if (!net_eq(net
, dev_net(iter
->dev
)))
8492 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8493 &iter
->dev
->adj_list
.lower
);
8494 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
8495 &dev
->adj_list
.upper
);
8498 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8499 if (!net_eq(net
, dev_net(iter
->dev
)))
8501 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8502 &iter
->dev
->adj_list
.upper
);
8503 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
8504 &dev
->adj_list
.lower
);
8508 static void netdev_adjacent_del_links(struct net_device
*dev
)
8510 struct netdev_adjacent
*iter
;
8512 struct net
*net
= dev_net(dev
);
8514 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8515 if (!net_eq(net
, dev_net(iter
->dev
)))
8517 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
8518 &iter
->dev
->adj_list
.lower
);
8519 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
8520 &dev
->adj_list
.upper
);
8523 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8524 if (!net_eq(net
, dev_net(iter
->dev
)))
8526 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
8527 &iter
->dev
->adj_list
.upper
);
8528 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
8529 &dev
->adj_list
.lower
);
8533 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
8535 struct netdev_adjacent
*iter
;
8537 struct net
*net
= dev_net(dev
);
8539 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8540 if (!net_eq(net
, dev_net(iter
->dev
)))
8542 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
8543 &iter
->dev
->adj_list
.lower
);
8544 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8545 &iter
->dev
->adj_list
.lower
);
8548 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8549 if (!net_eq(net
, dev_net(iter
->dev
)))
8551 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
8552 &iter
->dev
->adj_list
.upper
);
8553 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8554 &iter
->dev
->adj_list
.upper
);
8558 void *netdev_lower_dev_get_private(struct net_device
*dev
,
8559 struct net_device
*lower_dev
)
8561 struct netdev_adjacent
*lower
;
8565 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
8569 return lower
->private;
8571 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
8575 * netdev_lower_state_changed - Dispatch event about lower device state change
8576 * @lower_dev: device
8577 * @lower_state_info: state to dispatch
8579 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8580 * The caller must hold the RTNL lock.
8582 void netdev_lower_state_changed(struct net_device
*lower_dev
,
8583 void *lower_state_info
)
8585 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
8586 .info
.dev
= lower_dev
,
8590 changelowerstate_info
.lower_state_info
= lower_state_info
;
8591 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
8592 &changelowerstate_info
.info
);
8594 EXPORT_SYMBOL(netdev_lower_state_changed
);
8596 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
8598 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8600 if (ops
->ndo_change_rx_flags
)
8601 ops
->ndo_change_rx_flags(dev
, flags
);
8604 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
8606 unsigned int old_flags
= dev
->flags
;
8607 unsigned int promiscuity
, flags
;
8613 promiscuity
= dev
->promiscuity
+ inc
;
8614 if (promiscuity
== 0) {
8617 * If inc causes overflow, untouch promisc and return error.
8619 if (unlikely(inc
> 0)) {
8620 netdev_warn(dev
, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8623 flags
= old_flags
& ~IFF_PROMISC
;
8625 flags
= old_flags
| IFF_PROMISC
;
8627 WRITE_ONCE(dev
->promiscuity
, promiscuity
);
8628 if (flags
!= old_flags
) {
8629 WRITE_ONCE(dev
->flags
, flags
);
8630 netdev_info(dev
, "%s promiscuous mode\n",
8631 dev
->flags
& IFF_PROMISC
? "entered" : "left");
8632 if (audit_enabled
) {
8633 current_uid_gid(&uid
, &gid
);
8634 audit_log(audit_context(), GFP_ATOMIC
,
8635 AUDIT_ANOM_PROMISCUOUS
,
8636 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8637 dev
->name
, (dev
->flags
& IFF_PROMISC
),
8638 (old_flags
& IFF_PROMISC
),
8639 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
8640 from_kuid(&init_user_ns
, uid
),
8641 from_kgid(&init_user_ns
, gid
),
8642 audit_get_sessionid(current
));
8645 dev_change_rx_flags(dev
, IFF_PROMISC
);
8648 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
, 0, NULL
);
8653 * dev_set_promiscuity - update promiscuity count on a device
8657 * Add or remove promiscuity from a device. While the count in the device
8658 * remains above zero the interface remains promiscuous. Once it hits zero
8659 * the device reverts back to normal filtering operation. A negative inc
8660 * value is used to drop promiscuity on the device.
8661 * Return 0 if successful or a negative errno code on error.
8663 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
8665 unsigned int old_flags
= dev
->flags
;
8668 err
= __dev_set_promiscuity(dev
, inc
, true);
8671 if (dev
->flags
!= old_flags
)
8672 dev_set_rx_mode(dev
);
8675 EXPORT_SYMBOL(dev_set_promiscuity
);
8677 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
8679 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
8680 unsigned int allmulti
, flags
;
8684 allmulti
= dev
->allmulti
+ inc
;
8685 if (allmulti
== 0) {
8688 * If inc causes overflow, untouch allmulti and return error.
8690 if (unlikely(inc
> 0)) {
8691 netdev_warn(dev
, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8694 flags
= old_flags
& ~IFF_ALLMULTI
;
8696 flags
= old_flags
| IFF_ALLMULTI
;
8698 WRITE_ONCE(dev
->allmulti
, allmulti
);
8699 if (flags
!= old_flags
) {
8700 WRITE_ONCE(dev
->flags
, flags
);
8701 netdev_info(dev
, "%s allmulticast mode\n",
8702 dev
->flags
& IFF_ALLMULTI
? "entered" : "left");
8703 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
8704 dev_set_rx_mode(dev
);
8706 __dev_notify_flags(dev
, old_flags
,
8707 dev
->gflags
^ old_gflags
, 0, NULL
);
8713 * dev_set_allmulti - update allmulti count on a device
8717 * Add or remove reception of all multicast frames to a device. While the
8718 * count in the device remains above zero the interface remains listening
8719 * to all interfaces. Once it hits zero the device reverts back to normal
8720 * filtering operation. A negative @inc value is used to drop the counter
8721 * when releasing a resource needing all multicasts.
8722 * Return 0 if successful or a negative errno code on error.
8725 int dev_set_allmulti(struct net_device
*dev
, int inc
)
8727 return __dev_set_allmulti(dev
, inc
, true);
8729 EXPORT_SYMBOL(dev_set_allmulti
);
8732 * Upload unicast and multicast address lists to device and
8733 * configure RX filtering. When the device doesn't support unicast
8734 * filtering it is put in promiscuous mode while unicast addresses
8737 void __dev_set_rx_mode(struct net_device
*dev
)
8739 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8741 /* dev_open will call this function so the list will stay sane. */
8742 if (!(dev
->flags
&IFF_UP
))
8745 if (!netif_device_present(dev
))
8748 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
8749 /* Unicast addresses changes may only happen under the rtnl,
8750 * therefore calling __dev_set_promiscuity here is safe.
8752 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
8753 __dev_set_promiscuity(dev
, 1, false);
8754 dev
->uc_promisc
= true;
8755 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
8756 __dev_set_promiscuity(dev
, -1, false);
8757 dev
->uc_promisc
= false;
8761 if (ops
->ndo_set_rx_mode
)
8762 ops
->ndo_set_rx_mode(dev
);
8765 void dev_set_rx_mode(struct net_device
*dev
)
8767 netif_addr_lock_bh(dev
);
8768 __dev_set_rx_mode(dev
);
8769 netif_addr_unlock_bh(dev
);
8773 * dev_get_flags - get flags reported to userspace
8776 * Get the combination of flag bits exported through APIs to userspace.
8778 unsigned int dev_get_flags(const struct net_device
*dev
)
8782 flags
= (READ_ONCE(dev
->flags
) & ~(IFF_PROMISC
|
8787 (READ_ONCE(dev
->gflags
) & (IFF_PROMISC
|
8790 if (netif_running(dev
)) {
8791 if (netif_oper_up(dev
))
8792 flags
|= IFF_RUNNING
;
8793 if (netif_carrier_ok(dev
))
8794 flags
|= IFF_LOWER_UP
;
8795 if (netif_dormant(dev
))
8796 flags
|= IFF_DORMANT
;
8801 EXPORT_SYMBOL(dev_get_flags
);
8803 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
,
8804 struct netlink_ext_ack
*extack
)
8806 unsigned int old_flags
= dev
->flags
;
8812 * Set the flags on our device.
8815 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
8816 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
8818 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
8822 * Load in the correct multicast list now the flags have changed.
8825 if ((old_flags
^ flags
) & IFF_MULTICAST
)
8826 dev_change_rx_flags(dev
, IFF_MULTICAST
);
8828 dev_set_rx_mode(dev
);
8831 * Have we downed the interface. We handle IFF_UP ourselves
8832 * according to user attempts to set it, rather than blindly
8837 if ((old_flags
^ flags
) & IFF_UP
) {
8838 if (old_flags
& IFF_UP
)
8841 ret
= __dev_open(dev
, extack
);
8844 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
8845 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
8846 unsigned int old_flags
= dev
->flags
;
8848 dev
->gflags
^= IFF_PROMISC
;
8850 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
8851 if (dev
->flags
!= old_flags
)
8852 dev_set_rx_mode(dev
);
8855 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8856 * is important. Some (broken) drivers set IFF_PROMISC, when
8857 * IFF_ALLMULTI is requested not asking us and not reporting.
8859 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
8860 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
8862 dev
->gflags
^= IFF_ALLMULTI
;
8863 __dev_set_allmulti(dev
, inc
, false);
8869 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
8870 unsigned int gchanges
, u32 portid
,
8871 const struct nlmsghdr
*nlh
)
8873 unsigned int changes
= dev
->flags
^ old_flags
;
8876 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
, portid
, nlh
);
8878 if (changes
& IFF_UP
) {
8879 if (dev
->flags
& IFF_UP
)
8880 call_netdevice_notifiers(NETDEV_UP
, dev
);
8882 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
8885 if (dev
->flags
& IFF_UP
&&
8886 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
8887 struct netdev_notifier_change_info change_info
= {
8891 .flags_changed
= changes
,
8894 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
8899 * dev_change_flags - change device settings
8901 * @flags: device state flags
8902 * @extack: netlink extended ack
8904 * Change settings on device based state flags. The flags are
8905 * in the userspace exported format.
8907 int dev_change_flags(struct net_device
*dev
, unsigned int flags
,
8908 struct netlink_ext_ack
*extack
)
8911 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
8913 ret
= __dev_change_flags(dev
, flags
, extack
);
8917 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
8918 __dev_notify_flags(dev
, old_flags
, changes
, 0, NULL
);
8921 EXPORT_SYMBOL(dev_change_flags
);
8923 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
8925 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8927 if (ops
->ndo_change_mtu
)
8928 return ops
->ndo_change_mtu(dev
, new_mtu
);
8930 /* Pairs with all the lockless reads of dev->mtu in the stack */
8931 WRITE_ONCE(dev
->mtu
, new_mtu
);
8934 EXPORT_SYMBOL(__dev_set_mtu
);
8936 int dev_validate_mtu(struct net_device
*dev
, int new_mtu
,
8937 struct netlink_ext_ack
*extack
)
8939 /* MTU must be positive, and in range */
8940 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
8941 NL_SET_ERR_MSG(extack
, "mtu less than device minimum");
8945 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
8946 NL_SET_ERR_MSG(extack
, "mtu greater than device maximum");
8953 * dev_set_mtu_ext - Change maximum transfer unit
8955 * @new_mtu: new transfer unit
8956 * @extack: netlink extended ack
8958 * Change the maximum transfer size of the network device.
8960 int dev_set_mtu_ext(struct net_device
*dev
, int new_mtu
,
8961 struct netlink_ext_ack
*extack
)
8965 if (new_mtu
== dev
->mtu
)
8968 err
= dev_validate_mtu(dev
, new_mtu
, extack
);
8972 if (!netif_device_present(dev
))
8975 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
8976 err
= notifier_to_errno(err
);
8980 orig_mtu
= dev
->mtu
;
8981 err
= __dev_set_mtu(dev
, new_mtu
);
8984 err
= call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8986 err
= notifier_to_errno(err
);
8988 /* setting mtu back and notifying everyone again,
8989 * so that they have a chance to revert changes.
8991 __dev_set_mtu(dev
, orig_mtu
);
8992 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8999 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
9001 struct netlink_ext_ack extack
;
9004 memset(&extack
, 0, sizeof(extack
));
9005 err
= dev_set_mtu_ext(dev
, new_mtu
, &extack
);
9006 if (err
&& extack
._msg
)
9007 net_err_ratelimited("%s: %s\n", dev
->name
, extack
._msg
);
9010 EXPORT_SYMBOL(dev_set_mtu
);
9013 * dev_change_tx_queue_len - Change TX queue length of a netdevice
9015 * @new_len: new tx queue length
9017 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
9019 unsigned int orig_len
= dev
->tx_queue_len
;
9022 if (new_len
!= (unsigned int)new_len
)
9025 if (new_len
!= orig_len
) {
9026 WRITE_ONCE(dev
->tx_queue_len
, new_len
);
9027 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
9028 res
= notifier_to_errno(res
);
9031 res
= dev_qdisc_change_tx_queue_len(dev
);
9039 netdev_err(dev
, "refused to change device tx_queue_len\n");
9040 WRITE_ONCE(dev
->tx_queue_len
, orig_len
);
9045 * dev_set_group - Change group this device belongs to
9047 * @new_group: group this device should belong to
9049 void dev_set_group(struct net_device
*dev
, int new_group
)
9051 dev
->group
= new_group
;
9055 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
9057 * @addr: new address
9058 * @extack: netlink extended ack
9060 int dev_pre_changeaddr_notify(struct net_device
*dev
, const char *addr
,
9061 struct netlink_ext_ack
*extack
)
9063 struct netdev_notifier_pre_changeaddr_info info
= {
9065 .info
.extack
= extack
,
9070 rc
= call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR
, &info
.info
);
9071 return notifier_to_errno(rc
);
9073 EXPORT_SYMBOL(dev_pre_changeaddr_notify
);
9076 * dev_set_mac_address - Change Media Access Control Address
9079 * @extack: netlink extended ack
9081 * Change the hardware (MAC) address of the device
9083 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
,
9084 struct netlink_ext_ack
*extack
)
9086 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9089 if (!ops
->ndo_set_mac_address
)
9091 if (sa
->sa_family
!= dev
->type
)
9093 if (!netif_device_present(dev
))
9095 err
= dev_pre_changeaddr_notify(dev
, sa
->sa_data
, extack
);
9098 if (memcmp(dev
->dev_addr
, sa
->sa_data
, dev
->addr_len
)) {
9099 err
= ops
->ndo_set_mac_address(dev
, sa
);
9103 dev
->addr_assign_type
= NET_ADDR_SET
;
9104 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
9105 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
9108 EXPORT_SYMBOL(dev_set_mac_address
);
9110 DECLARE_RWSEM(dev_addr_sem
);
9112 int dev_set_mac_address_user(struct net_device
*dev
, struct sockaddr
*sa
,
9113 struct netlink_ext_ack
*extack
)
9117 down_write(&dev_addr_sem
);
9118 ret
= dev_set_mac_address(dev
, sa
, extack
);
9119 up_write(&dev_addr_sem
);
9122 EXPORT_SYMBOL(dev_set_mac_address_user
);
9124 int dev_get_mac_address(struct sockaddr
*sa
, struct net
*net
, char *dev_name
)
9126 size_t size
= sizeof(sa
->sa_data_min
);
9127 struct net_device
*dev
;
9130 down_read(&dev_addr_sem
);
9133 dev
= dev_get_by_name_rcu(net
, dev_name
);
9139 memset(sa
->sa_data
, 0, size
);
9141 memcpy(sa
->sa_data
, dev
->dev_addr
,
9142 min_t(size_t, size
, dev
->addr_len
));
9143 sa
->sa_family
= dev
->type
;
9147 up_read(&dev_addr_sem
);
9150 EXPORT_SYMBOL(dev_get_mac_address
);
9153 * dev_change_carrier - Change device carrier
9155 * @new_carrier: new value
9157 * Change device carrier
9159 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
9161 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9163 if (!ops
->ndo_change_carrier
)
9165 if (!netif_device_present(dev
))
9167 return ops
->ndo_change_carrier(dev
, new_carrier
);
9171 * dev_get_phys_port_id - Get device physical port ID
9175 * Get device physical port ID
9177 int dev_get_phys_port_id(struct net_device
*dev
,
9178 struct netdev_phys_item_id
*ppid
)
9180 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9182 if (!ops
->ndo_get_phys_port_id
)
9184 return ops
->ndo_get_phys_port_id(dev
, ppid
);
9188 * dev_get_phys_port_name - Get device physical port name
9191 * @len: limit of bytes to copy to name
9193 * Get device physical port name
9195 int dev_get_phys_port_name(struct net_device
*dev
,
9196 char *name
, size_t len
)
9198 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9201 if (ops
->ndo_get_phys_port_name
) {
9202 err
= ops
->ndo_get_phys_port_name(dev
, name
, len
);
9203 if (err
!= -EOPNOTSUPP
)
9206 return devlink_compat_phys_port_name_get(dev
, name
, len
);
9210 * dev_get_port_parent_id - Get the device's port parent identifier
9211 * @dev: network device
9212 * @ppid: pointer to a storage for the port's parent identifier
9213 * @recurse: allow/disallow recursion to lower devices
9215 * Get the devices's port parent identifier
9217 int dev_get_port_parent_id(struct net_device
*dev
,
9218 struct netdev_phys_item_id
*ppid
,
9221 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9222 struct netdev_phys_item_id first
= { };
9223 struct net_device
*lower_dev
;
9224 struct list_head
*iter
;
9227 if (ops
->ndo_get_port_parent_id
) {
9228 err
= ops
->ndo_get_port_parent_id(dev
, ppid
);
9229 if (err
!= -EOPNOTSUPP
)
9233 err
= devlink_compat_switch_id_get(dev
, ppid
);
9234 if (!recurse
|| err
!= -EOPNOTSUPP
)
9237 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
9238 err
= dev_get_port_parent_id(lower_dev
, ppid
, true);
9243 else if (memcmp(&first
, ppid
, sizeof(*ppid
)))
9249 EXPORT_SYMBOL(dev_get_port_parent_id
);
9252 * netdev_port_same_parent_id - Indicate if two network devices have
9253 * the same port parent identifier
9254 * @a: first network device
9255 * @b: second network device
9257 bool netdev_port_same_parent_id(struct net_device
*a
, struct net_device
*b
)
9259 struct netdev_phys_item_id a_id
= { };
9260 struct netdev_phys_item_id b_id
= { };
9262 if (dev_get_port_parent_id(a
, &a_id
, true) ||
9263 dev_get_port_parent_id(b
, &b_id
, true))
9266 return netdev_phys_item_id_same(&a_id
, &b_id
);
9268 EXPORT_SYMBOL(netdev_port_same_parent_id
);
9271 * dev_change_proto_down - set carrier according to proto_down.
9274 * @proto_down: new value
9276 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
9278 if (!dev
->change_proto_down
)
9280 if (!netif_device_present(dev
))
9283 netif_carrier_off(dev
);
9285 netif_carrier_on(dev
);
9286 WRITE_ONCE(dev
->proto_down
, proto_down
);
9291 * dev_change_proto_down_reason - proto down reason
9294 * @mask: proto down mask
9295 * @value: proto down value
9297 void dev_change_proto_down_reason(struct net_device
*dev
, unsigned long mask
,
9300 u32 proto_down_reason
;
9304 proto_down_reason
= value
;
9306 proto_down_reason
= dev
->proto_down_reason
;
9307 for_each_set_bit(b
, &mask
, 32) {
9308 if (value
& (1 << b
))
9309 proto_down_reason
|= BIT(b
);
9311 proto_down_reason
&= ~BIT(b
);
9314 WRITE_ONCE(dev
->proto_down_reason
, proto_down_reason
);
9317 struct bpf_xdp_link
{
9318 struct bpf_link link
;
9319 struct net_device
*dev
; /* protected by rtnl_lock, no refcnt held */
9323 static enum bpf_xdp_mode
dev_xdp_mode(struct net_device
*dev
, u32 flags
)
9325 if (flags
& XDP_FLAGS_HW_MODE
)
9327 if (flags
& XDP_FLAGS_DRV_MODE
)
9328 return XDP_MODE_DRV
;
9329 if (flags
& XDP_FLAGS_SKB_MODE
)
9330 return XDP_MODE_SKB
;
9331 return dev
->netdev_ops
->ndo_bpf
? XDP_MODE_DRV
: XDP_MODE_SKB
;
9334 static bpf_op_t
dev_xdp_bpf_op(struct net_device
*dev
, enum bpf_xdp_mode mode
)
9338 return generic_xdp_install
;
9341 return dev
->netdev_ops
->ndo_bpf
;
9347 static struct bpf_xdp_link
*dev_xdp_link(struct net_device
*dev
,
9348 enum bpf_xdp_mode mode
)
9350 return dev
->xdp_state
[mode
].link
;
9353 static struct bpf_prog
*dev_xdp_prog(struct net_device
*dev
,
9354 enum bpf_xdp_mode mode
)
9356 struct bpf_xdp_link
*link
= dev_xdp_link(dev
, mode
);
9359 return link
->link
.prog
;
9360 return dev
->xdp_state
[mode
].prog
;
9363 u8
dev_xdp_prog_count(struct net_device
*dev
)
9368 for (i
= 0; i
< __MAX_XDP_MODE
; i
++)
9369 if (dev
->xdp_state
[i
].prog
|| dev
->xdp_state
[i
].link
)
9373 EXPORT_SYMBOL_GPL(dev_xdp_prog_count
);
9375 int dev_xdp_propagate(struct net_device
*dev
, struct netdev_bpf
*bpf
)
9377 if (!dev
->netdev_ops
->ndo_bpf
)
9380 if (dev_get_min_mp_channel_count(dev
)) {
9381 NL_SET_ERR_MSG(bpf
->extack
, "unable to propagate XDP to device using memory provider");
9385 return dev
->netdev_ops
->ndo_bpf(dev
, bpf
);
9387 EXPORT_SYMBOL_GPL(dev_xdp_propagate
);
9389 u32
dev_xdp_prog_id(struct net_device
*dev
, enum bpf_xdp_mode mode
)
9391 struct bpf_prog
*prog
= dev_xdp_prog(dev
, mode
);
9393 return prog
? prog
->aux
->id
: 0;
9396 static void dev_xdp_set_link(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9397 struct bpf_xdp_link
*link
)
9399 dev
->xdp_state
[mode
].link
= link
;
9400 dev
->xdp_state
[mode
].prog
= NULL
;
9403 static void dev_xdp_set_prog(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9404 struct bpf_prog
*prog
)
9406 dev
->xdp_state
[mode
].link
= NULL
;
9407 dev
->xdp_state
[mode
].prog
= prog
;
9410 static int dev_xdp_install(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9411 bpf_op_t bpf_op
, struct netlink_ext_ack
*extack
,
9412 u32 flags
, struct bpf_prog
*prog
)
9414 struct netdev_bpf xdp
;
9417 if (dev_get_min_mp_channel_count(dev
)) {
9418 NL_SET_ERR_MSG(extack
, "unable to install XDP to device using memory provider");
9422 memset(&xdp
, 0, sizeof(xdp
));
9423 xdp
.command
= mode
== XDP_MODE_HW
? XDP_SETUP_PROG_HW
: XDP_SETUP_PROG
;
9424 xdp
.extack
= extack
;
9428 /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9429 * "moved" into driver), so they don't increment it on their own, but
9430 * they do decrement refcnt when program is detached or replaced.
9431 * Given net_device also owns link/prog, we need to bump refcnt here
9432 * to prevent drivers from underflowing it.
9436 err
= bpf_op(dev
, &xdp
);
9443 if (mode
!= XDP_MODE_HW
)
9444 bpf_prog_change_xdp(dev_xdp_prog(dev
, mode
), prog
);
9449 static void dev_xdp_uninstall(struct net_device
*dev
)
9451 struct bpf_xdp_link
*link
;
9452 struct bpf_prog
*prog
;
9453 enum bpf_xdp_mode mode
;
9458 for (mode
= XDP_MODE_SKB
; mode
< __MAX_XDP_MODE
; mode
++) {
9459 prog
= dev_xdp_prog(dev
, mode
);
9463 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9467 WARN_ON(dev_xdp_install(dev
, mode
, bpf_op
, NULL
, 0, NULL
));
9469 /* auto-detach link from net device */
9470 link
= dev_xdp_link(dev
, mode
);
9476 dev_xdp_set_link(dev
, mode
, NULL
);
9480 static int dev_xdp_attach(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
9481 struct bpf_xdp_link
*link
, struct bpf_prog
*new_prog
,
9482 struct bpf_prog
*old_prog
, u32 flags
)
9484 unsigned int num_modes
= hweight32(flags
& XDP_FLAGS_MODES
);
9485 struct bpf_prog
*cur_prog
;
9486 struct net_device
*upper
;
9487 struct list_head
*iter
;
9488 enum bpf_xdp_mode mode
;
9494 /* either link or prog attachment, never both */
9495 if (link
&& (new_prog
|| old_prog
))
9497 /* link supports only XDP mode flags */
9498 if (link
&& (flags
& ~XDP_FLAGS_MODES
)) {
9499 NL_SET_ERR_MSG(extack
, "Invalid XDP flags for BPF link attachment");
9502 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9503 if (num_modes
> 1) {
9504 NL_SET_ERR_MSG(extack
, "Only one XDP mode flag can be set");
9507 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9508 if (!num_modes
&& dev_xdp_prog_count(dev
) > 1) {
9509 NL_SET_ERR_MSG(extack
,
9510 "More than one program loaded, unset mode is ambiguous");
9513 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9514 if (old_prog
&& !(flags
& XDP_FLAGS_REPLACE
)) {
9515 NL_SET_ERR_MSG(extack
, "XDP_FLAGS_REPLACE is not specified");
9519 mode
= dev_xdp_mode(dev
, flags
);
9520 /* can't replace attached link */
9521 if (dev_xdp_link(dev
, mode
)) {
9522 NL_SET_ERR_MSG(extack
, "Can't replace active BPF XDP link");
9526 /* don't allow if an upper device already has a program */
9527 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
) {
9528 if (dev_xdp_prog_count(upper
) > 0) {
9529 NL_SET_ERR_MSG(extack
, "Cannot attach when an upper device already has a program");
9534 cur_prog
= dev_xdp_prog(dev
, mode
);
9535 /* can't replace attached prog with link */
9536 if (link
&& cur_prog
) {
9537 NL_SET_ERR_MSG(extack
, "Can't replace active XDP program with BPF link");
9540 if ((flags
& XDP_FLAGS_REPLACE
) && cur_prog
!= old_prog
) {
9541 NL_SET_ERR_MSG(extack
, "Active program does not match expected");
9545 /* put effective new program into new_prog */
9547 new_prog
= link
->link
.prog
;
9550 bool offload
= mode
== XDP_MODE_HW
;
9551 enum bpf_xdp_mode other_mode
= mode
== XDP_MODE_SKB
9552 ? XDP_MODE_DRV
: XDP_MODE_SKB
;
9554 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) && cur_prog
) {
9555 NL_SET_ERR_MSG(extack
, "XDP program already attached");
9558 if (!offload
&& dev_xdp_prog(dev
, other_mode
)) {
9559 NL_SET_ERR_MSG(extack
, "Native and generic XDP can't be active at the same time");
9562 if (!offload
&& bpf_prog_is_offloaded(new_prog
->aux
)) {
9563 NL_SET_ERR_MSG(extack
, "Using offloaded program without HW_MODE flag is not supported");
9566 if (bpf_prog_is_dev_bound(new_prog
->aux
) && !bpf_offload_dev_match(new_prog
, dev
)) {
9567 NL_SET_ERR_MSG(extack
, "Program bound to different device");
9570 if (new_prog
->expected_attach_type
== BPF_XDP_DEVMAP
) {
9571 NL_SET_ERR_MSG(extack
, "BPF_XDP_DEVMAP programs can not be attached to a device");
9574 if (new_prog
->expected_attach_type
== BPF_XDP_CPUMAP
) {
9575 NL_SET_ERR_MSG(extack
, "BPF_XDP_CPUMAP programs can not be attached to a device");
9580 /* don't call drivers if the effective program didn't change */
9581 if (new_prog
!= cur_prog
) {
9582 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9584 NL_SET_ERR_MSG(extack
, "Underlying driver does not support XDP in native mode");
9588 err
= dev_xdp_install(dev
, mode
, bpf_op
, extack
, flags
, new_prog
);
9594 dev_xdp_set_link(dev
, mode
, link
);
9596 dev_xdp_set_prog(dev
, mode
, new_prog
);
9598 bpf_prog_put(cur_prog
);
9603 static int dev_xdp_attach_link(struct net_device
*dev
,
9604 struct netlink_ext_ack
*extack
,
9605 struct bpf_xdp_link
*link
)
9607 return dev_xdp_attach(dev
, extack
, link
, NULL
, NULL
, link
->flags
);
9610 static int dev_xdp_detach_link(struct net_device
*dev
,
9611 struct netlink_ext_ack
*extack
,
9612 struct bpf_xdp_link
*link
)
9614 enum bpf_xdp_mode mode
;
9619 mode
= dev_xdp_mode(dev
, link
->flags
);
9620 if (dev_xdp_link(dev
, mode
) != link
)
9623 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9624 WARN_ON(dev_xdp_install(dev
, mode
, bpf_op
, NULL
, 0, NULL
));
9625 dev_xdp_set_link(dev
, mode
, NULL
);
9629 static void bpf_xdp_link_release(struct bpf_link
*link
)
9631 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9635 /* if racing with net_device's tear down, xdp_link->dev might be
9636 * already NULL, in which case link was already auto-detached
9638 if (xdp_link
->dev
) {
9639 WARN_ON(dev_xdp_detach_link(xdp_link
->dev
, NULL
, xdp_link
));
9640 xdp_link
->dev
= NULL
;
9646 static int bpf_xdp_link_detach(struct bpf_link
*link
)
9648 bpf_xdp_link_release(link
);
9652 static void bpf_xdp_link_dealloc(struct bpf_link
*link
)
9654 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9659 static void bpf_xdp_link_show_fdinfo(const struct bpf_link
*link
,
9660 struct seq_file
*seq
)
9662 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9667 ifindex
= xdp_link
->dev
->ifindex
;
9670 seq_printf(seq
, "ifindex:\t%u\n", ifindex
);
9673 static int bpf_xdp_link_fill_link_info(const struct bpf_link
*link
,
9674 struct bpf_link_info
*info
)
9676 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9681 ifindex
= xdp_link
->dev
->ifindex
;
9684 info
->xdp
.ifindex
= ifindex
;
9688 static int bpf_xdp_link_update(struct bpf_link
*link
, struct bpf_prog
*new_prog
,
9689 struct bpf_prog
*old_prog
)
9691 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9692 enum bpf_xdp_mode mode
;
9698 /* link might have been auto-released already, so fail */
9699 if (!xdp_link
->dev
) {
9704 if (old_prog
&& link
->prog
!= old_prog
) {
9708 old_prog
= link
->prog
;
9709 if (old_prog
->type
!= new_prog
->type
||
9710 old_prog
->expected_attach_type
!= new_prog
->expected_attach_type
) {
9715 if (old_prog
== new_prog
) {
9716 /* no-op, don't disturb drivers */
9717 bpf_prog_put(new_prog
);
9721 mode
= dev_xdp_mode(xdp_link
->dev
, xdp_link
->flags
);
9722 bpf_op
= dev_xdp_bpf_op(xdp_link
->dev
, mode
);
9723 err
= dev_xdp_install(xdp_link
->dev
, mode
, bpf_op
, NULL
,
9724 xdp_link
->flags
, new_prog
);
9728 old_prog
= xchg(&link
->prog
, new_prog
);
9729 bpf_prog_put(old_prog
);
9736 static const struct bpf_link_ops bpf_xdp_link_lops
= {
9737 .release
= bpf_xdp_link_release
,
9738 .dealloc
= bpf_xdp_link_dealloc
,
9739 .detach
= bpf_xdp_link_detach
,
9740 .show_fdinfo
= bpf_xdp_link_show_fdinfo
,
9741 .fill_link_info
= bpf_xdp_link_fill_link_info
,
9742 .update_prog
= bpf_xdp_link_update
,
9745 int bpf_xdp_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
9747 struct net
*net
= current
->nsproxy
->net_ns
;
9748 struct bpf_link_primer link_primer
;
9749 struct netlink_ext_ack extack
= {};
9750 struct bpf_xdp_link
*link
;
9751 struct net_device
*dev
;
9755 dev
= dev_get_by_index(net
, attr
->link_create
.target_ifindex
);
9761 link
= kzalloc(sizeof(*link
), GFP_USER
);
9767 bpf_link_init(&link
->link
, BPF_LINK_TYPE_XDP
, &bpf_xdp_link_lops
, prog
);
9769 link
->flags
= attr
->link_create
.flags
;
9771 err
= bpf_link_prime(&link
->link
, &link_primer
);
9777 err
= dev_xdp_attach_link(dev
, &extack
, link
);
9782 bpf_link_cleanup(&link_primer
);
9783 trace_bpf_xdp_link_attach_failed(extack
._msg
);
9787 fd
= bpf_link_settle(&link_primer
);
9788 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9801 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
9803 * @extack: netlink extended ack
9804 * @fd: new program fd or negative value to clear
9805 * @expected_fd: old program fd that userspace expects to replace or clear
9806 * @flags: xdp-related flags
9808 * Set or clear a bpf program for a device
9810 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
9811 int fd
, int expected_fd
, u32 flags
)
9813 enum bpf_xdp_mode mode
= dev_xdp_mode(dev
, flags
);
9814 struct bpf_prog
*new_prog
= NULL
, *old_prog
= NULL
;
9820 new_prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
9821 mode
!= XDP_MODE_SKB
);
9822 if (IS_ERR(new_prog
))
9823 return PTR_ERR(new_prog
);
9826 if (expected_fd
>= 0) {
9827 old_prog
= bpf_prog_get_type_dev(expected_fd
, BPF_PROG_TYPE_XDP
,
9828 mode
!= XDP_MODE_SKB
);
9829 if (IS_ERR(old_prog
)) {
9830 err
= PTR_ERR(old_prog
);
9836 err
= dev_xdp_attach(dev
, extack
, NULL
, new_prog
, old_prog
, flags
);
9839 if (err
&& new_prog
)
9840 bpf_prog_put(new_prog
);
9842 bpf_prog_put(old_prog
);
9846 u32
dev_get_min_mp_channel_count(const struct net_device
*dev
)
9852 for (i
= dev
->real_num_rx_queues
- 1; i
>= 0; i
--)
9853 if (dev
->_rx
[i
].mp_params
.mp_priv
)
9854 /* The channel count is the idx plus 1. */
9861 * dev_index_reserve() - allocate an ifindex in a namespace
9862 * @net: the applicable net namespace
9863 * @ifindex: requested ifindex, pass %0 to get one allocated
9865 * Allocate a ifindex for a new device. Caller must either use the ifindex
9866 * to store the device (via list_netdevice()) or call dev_index_release()
9867 * to give the index up.
9869 * Return: a suitable unique value for a new device interface number or -errno.
9871 static int dev_index_reserve(struct net
*net
, u32 ifindex
)
9875 if (ifindex
> INT_MAX
) {
9876 DEBUG_NET_WARN_ON_ONCE(1);
9881 err
= xa_alloc_cyclic(&net
->dev_by_index
, &ifindex
, NULL
,
9882 xa_limit_31b
, &net
->ifindex
, GFP_KERNEL
);
9884 err
= xa_insert(&net
->dev_by_index
, ifindex
, NULL
, GFP_KERNEL
);
9891 static void dev_index_release(struct net
*net
, int ifindex
)
9893 /* Expect only unused indexes, unlist_netdevice() removes the used */
9894 WARN_ON(xa_erase(&net
->dev_by_index
, ifindex
));
9897 /* Delayed registration/unregisteration */
9898 LIST_HEAD(net_todo_list
);
9899 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
9900 atomic_t dev_unreg_count
= ATOMIC_INIT(0);
9902 static void net_set_todo(struct net_device
*dev
)
9904 list_add_tail(&dev
->todo_list
, &net_todo_list
);
9907 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
9908 struct net_device
*upper
, netdev_features_t features
)
9910 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
9911 netdev_features_t feature
;
9914 for_each_netdev_feature(upper_disables
, feature_bit
) {
9915 feature
= __NETIF_F_BIT(feature_bit
);
9916 if (!(upper
->wanted_features
& feature
)
9917 && (features
& feature
)) {
9918 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
9919 &feature
, upper
->name
);
9920 features
&= ~feature
;
9927 static void netdev_sync_lower_features(struct net_device
*upper
,
9928 struct net_device
*lower
, netdev_features_t features
)
9930 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
9931 netdev_features_t feature
;
9934 for_each_netdev_feature(upper_disables
, feature_bit
) {
9935 feature
= __NETIF_F_BIT(feature_bit
);
9936 if (!(features
& feature
) && (lower
->features
& feature
)) {
9937 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
9938 &feature
, lower
->name
);
9939 lower
->wanted_features
&= ~feature
;
9940 __netdev_update_features(lower
);
9942 if (unlikely(lower
->features
& feature
))
9943 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
9944 &feature
, lower
->name
);
9946 netdev_features_change(lower
);
9951 static bool netdev_has_ip_or_hw_csum(netdev_features_t features
)
9953 netdev_features_t ip_csum_mask
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
9954 bool ip_csum
= (features
& ip_csum_mask
) == ip_csum_mask
;
9955 bool hw_csum
= features
& NETIF_F_HW_CSUM
;
9957 return ip_csum
|| hw_csum
;
9960 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
9961 netdev_features_t features
)
9963 /* Fix illegal checksum combinations */
9964 if ((features
& NETIF_F_HW_CSUM
) &&
9965 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
9966 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
9967 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
9970 /* TSO requires that SG is present as well. */
9971 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
9972 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
9973 features
&= ~NETIF_F_ALL_TSO
;
9976 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
9977 !(features
& NETIF_F_IP_CSUM
)) {
9978 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
9979 features
&= ~NETIF_F_TSO
;
9980 features
&= ~NETIF_F_TSO_ECN
;
9983 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
9984 !(features
& NETIF_F_IPV6_CSUM
)) {
9985 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
9986 features
&= ~NETIF_F_TSO6
;
9989 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9990 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
9991 features
&= ~NETIF_F_TSO_MANGLEID
;
9993 /* TSO ECN requires that TSO is present as well. */
9994 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
9995 features
&= ~NETIF_F_TSO_ECN
;
9997 /* Software GSO depends on SG. */
9998 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
9999 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
10000 features
&= ~NETIF_F_GSO
;
10003 /* GSO partial features require GSO partial be set */
10004 if ((features
& dev
->gso_partial_features
) &&
10005 !(features
& NETIF_F_GSO_PARTIAL
)) {
10007 "Dropping partially supported GSO features since no GSO partial.\n");
10008 features
&= ~dev
->gso_partial_features
;
10011 if (!(features
& NETIF_F_RXCSUM
)) {
10012 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
10013 * successfully merged by hardware must also have the
10014 * checksum verified by hardware. If the user does not
10015 * want to enable RXCSUM, logically, we should disable GRO_HW.
10017 if (features
& NETIF_F_GRO_HW
) {
10018 netdev_dbg(dev
, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
10019 features
&= ~NETIF_F_GRO_HW
;
10023 /* LRO/HW-GRO features cannot be combined with RX-FCS */
10024 if (features
& NETIF_F_RXFCS
) {
10025 if (features
& NETIF_F_LRO
) {
10026 netdev_dbg(dev
, "Dropping LRO feature since RX-FCS is requested.\n");
10027 features
&= ~NETIF_F_LRO
;
10030 if (features
& NETIF_F_GRO_HW
) {
10031 netdev_dbg(dev
, "Dropping HW-GRO feature since RX-FCS is requested.\n");
10032 features
&= ~NETIF_F_GRO_HW
;
10036 if ((features
& NETIF_F_GRO_HW
) && (features
& NETIF_F_LRO
)) {
10037 netdev_dbg(dev
, "Dropping LRO feature since HW-GRO is requested.\n");
10038 features
&= ~NETIF_F_LRO
;
10041 if ((features
& NETIF_F_HW_TLS_TX
) && !netdev_has_ip_or_hw_csum(features
)) {
10042 netdev_dbg(dev
, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
10043 features
&= ~NETIF_F_HW_TLS_TX
;
10046 if ((features
& NETIF_F_HW_TLS_RX
) && !(features
& NETIF_F_RXCSUM
)) {
10047 netdev_dbg(dev
, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
10048 features
&= ~NETIF_F_HW_TLS_RX
;
10051 if ((features
& NETIF_F_GSO_UDP_L4
) && !netdev_has_ip_or_hw_csum(features
)) {
10052 netdev_dbg(dev
, "Dropping USO feature since no CSUM feature.\n");
10053 features
&= ~NETIF_F_GSO_UDP_L4
;
10059 int __netdev_update_features(struct net_device
*dev
)
10061 struct net_device
*upper
, *lower
;
10062 netdev_features_t features
;
10063 struct list_head
*iter
;
10068 features
= netdev_get_wanted_features(dev
);
10070 if (dev
->netdev_ops
->ndo_fix_features
)
10071 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
10073 /* driver might be less strict about feature dependencies */
10074 features
= netdev_fix_features(dev
, features
);
10076 /* some features can't be enabled if they're off on an upper device */
10077 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
10078 features
= netdev_sync_upper_features(dev
, upper
, features
);
10080 if (dev
->features
== features
)
10083 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
10084 &dev
->features
, &features
);
10086 if (dev
->netdev_ops
->ndo_set_features
)
10087 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
10091 if (unlikely(err
< 0)) {
10093 "set_features() failed (%d); wanted %pNF, left %pNF\n",
10094 err
, &features
, &dev
->features
);
10095 /* return non-0 since some features might have changed and
10096 * it's better to fire a spurious notification than miss it
10102 /* some features must be disabled on lower devices when disabled
10103 * on an upper device (think: bonding master or bridge)
10105 netdev_for_each_lower_dev(dev
, lower
, iter
)
10106 netdev_sync_lower_features(dev
, lower
, features
);
10109 netdev_features_t diff
= features
^ dev
->features
;
10111 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
10112 /* udp_tunnel_{get,drop}_rx_info both need
10113 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
10114 * device, or they won't do anything.
10115 * Thus we need to update dev->features
10116 * *before* calling udp_tunnel_get_rx_info,
10117 * but *after* calling udp_tunnel_drop_rx_info.
10119 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
10120 dev
->features
= features
;
10121 udp_tunnel_get_rx_info(dev
);
10123 udp_tunnel_drop_rx_info(dev
);
10127 if (diff
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
10128 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
10129 dev
->features
= features
;
10130 err
|= vlan_get_rx_ctag_filter_info(dev
);
10132 vlan_drop_rx_ctag_filter_info(dev
);
10136 if (diff
& NETIF_F_HW_VLAN_STAG_FILTER
) {
10137 if (features
& NETIF_F_HW_VLAN_STAG_FILTER
) {
10138 dev
->features
= features
;
10139 err
|= vlan_get_rx_stag_filter_info(dev
);
10141 vlan_drop_rx_stag_filter_info(dev
);
10145 dev
->features
= features
;
10148 return err
< 0 ? 0 : 1;
10152 * netdev_update_features - recalculate device features
10153 * @dev: the device to check
10155 * Recalculate dev->features set and send notifications if it
10156 * has changed. Should be called after driver or hardware dependent
10157 * conditions might have changed that influence the features.
10159 void netdev_update_features(struct net_device
*dev
)
10161 if (__netdev_update_features(dev
))
10162 netdev_features_change(dev
);
10164 EXPORT_SYMBOL(netdev_update_features
);
10167 * netdev_change_features - recalculate device features
10168 * @dev: the device to check
10170 * Recalculate dev->features set and send notifications even
10171 * if they have not changed. Should be called instead of
10172 * netdev_update_features() if also dev->vlan_features might
10173 * have changed to allow the changes to be propagated to stacked
10176 void netdev_change_features(struct net_device
*dev
)
10178 __netdev_update_features(dev
);
10179 netdev_features_change(dev
);
10181 EXPORT_SYMBOL(netdev_change_features
);
10184 * netif_stacked_transfer_operstate - transfer operstate
10185 * @rootdev: the root or lower level device to transfer state from
10186 * @dev: the device to transfer operstate to
10188 * Transfer operational state from root to device. This is normally
10189 * called when a stacking relationship exists between the root
10190 * device and the device(a leaf device).
10192 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
10193 struct net_device
*dev
)
10195 if (rootdev
->operstate
== IF_OPER_DORMANT
)
10196 netif_dormant_on(dev
);
10198 netif_dormant_off(dev
);
10200 if (rootdev
->operstate
== IF_OPER_TESTING
)
10201 netif_testing_on(dev
);
10203 netif_testing_off(dev
);
10205 if (netif_carrier_ok(rootdev
))
10206 netif_carrier_on(dev
);
10208 netif_carrier_off(dev
);
10210 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
10212 static int netif_alloc_rx_queues(struct net_device
*dev
)
10214 unsigned int i
, count
= dev
->num_rx_queues
;
10215 struct netdev_rx_queue
*rx
;
10216 size_t sz
= count
* sizeof(*rx
);
10221 rx
= kvzalloc(sz
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
10227 for (i
= 0; i
< count
; i
++) {
10230 /* XDP RX-queue setup */
10231 err
= xdp_rxq_info_reg(&rx
[i
].xdp_rxq
, dev
, i
, 0);
10238 /* Rollback successful reg's and free other resources */
10240 xdp_rxq_info_unreg(&rx
[i
].xdp_rxq
);
10246 static void netif_free_rx_queues(struct net_device
*dev
)
10248 unsigned int i
, count
= dev
->num_rx_queues
;
10250 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10254 for (i
= 0; i
< count
; i
++)
10255 xdp_rxq_info_unreg(&dev
->_rx
[i
].xdp_rxq
);
10260 static void netdev_init_one_queue(struct net_device
*dev
,
10261 struct netdev_queue
*queue
, void *_unused
)
10263 /* Initialize queue lock */
10264 spin_lock_init(&queue
->_xmit_lock
);
10265 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
10266 queue
->xmit_lock_owner
= -1;
10267 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
10270 dql_init(&queue
->dql
, HZ
);
10274 static void netif_free_tx_queues(struct net_device
*dev
)
10279 static int netif_alloc_netdev_queues(struct net_device
*dev
)
10281 unsigned int count
= dev
->num_tx_queues
;
10282 struct netdev_queue
*tx
;
10283 size_t sz
= count
* sizeof(*tx
);
10285 if (count
< 1 || count
> 0xffff)
10288 tx
= kvzalloc(sz
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
10294 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
10295 spin_lock_init(&dev
->tx_global_lock
);
10300 void netif_tx_stop_all_queues(struct net_device
*dev
)
10304 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
10305 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
10307 netif_tx_stop_queue(txq
);
10310 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
10312 static int netdev_do_alloc_pcpu_stats(struct net_device
*dev
)
10316 /* Drivers implementing ndo_get_peer_dev must support tstat
10317 * accounting, so that skb_do_redirect() can bump the dev's
10318 * RX stats upon network namespace switch.
10320 if (dev
->netdev_ops
->ndo_get_peer_dev
&&
10321 dev
->pcpu_stat_type
!= NETDEV_PCPU_STAT_TSTATS
)
10322 return -EOPNOTSUPP
;
10324 switch (dev
->pcpu_stat_type
) {
10325 case NETDEV_PCPU_STAT_NONE
:
10327 case NETDEV_PCPU_STAT_LSTATS
:
10328 v
= dev
->lstats
= netdev_alloc_pcpu_stats(struct pcpu_lstats
);
10330 case NETDEV_PCPU_STAT_TSTATS
:
10331 v
= dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
10333 case NETDEV_PCPU_STAT_DSTATS
:
10334 v
= dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
10340 return v
? 0 : -ENOMEM
;
10343 static void netdev_do_free_pcpu_stats(struct net_device
*dev
)
10345 switch (dev
->pcpu_stat_type
) {
10346 case NETDEV_PCPU_STAT_NONE
:
10348 case NETDEV_PCPU_STAT_LSTATS
:
10349 free_percpu(dev
->lstats
);
10351 case NETDEV_PCPU_STAT_TSTATS
:
10352 free_percpu(dev
->tstats
);
10354 case NETDEV_PCPU_STAT_DSTATS
:
10355 free_percpu(dev
->dstats
);
10360 static void netdev_free_phy_link_topology(struct net_device
*dev
)
10362 struct phy_link_topology
*topo
= dev
->link_topo
;
10364 if (IS_ENABLED(CONFIG_PHYLIB
) && topo
) {
10365 xa_destroy(&topo
->phys
);
10367 dev
->link_topo
= NULL
;
10372 * register_netdevice() - register a network device
10373 * @dev: device to register
10375 * Take a prepared network device structure and make it externally accessible.
10376 * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
10377 * Callers must hold the rtnl lock - you may want register_netdev()
10380 int register_netdevice(struct net_device
*dev
)
10383 struct net
*net
= dev_net(dev
);
10385 BUILD_BUG_ON(sizeof(netdev_features_t
) * BITS_PER_BYTE
<
10386 NETDEV_FEATURE_COUNT
);
10387 BUG_ON(dev_boot_phase
);
10392 /* When net_device's are persistent, this will be fatal. */
10393 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
10396 ret
= ethtool_check_ops(dev
->ethtool_ops
);
10400 /* rss ctx ID 0 is reserved for the default context, start from 1 */
10401 xa_init_flags(&dev
->ethtool
->rss_ctx
, XA_FLAGS_ALLOC1
);
10402 mutex_init(&dev
->ethtool
->rss_lock
);
10404 spin_lock_init(&dev
->addr_list_lock
);
10405 netdev_set_addr_lockdep_class(dev
);
10407 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
10412 dev
->name_node
= netdev_name_node_head_alloc(dev
);
10413 if (!dev
->name_node
)
10416 /* Init, if this function is available */
10417 if (dev
->netdev_ops
->ndo_init
) {
10418 ret
= dev
->netdev_ops
->ndo_init(dev
);
10422 goto err_free_name
;
10426 if (((dev
->hw_features
| dev
->features
) &
10427 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
10428 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
10429 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
10430 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
10435 ret
= netdev_do_alloc_pcpu_stats(dev
);
10439 ret
= dev_index_reserve(net
, dev
->ifindex
);
10441 goto err_free_pcpu
;
10442 dev
->ifindex
= ret
;
10444 /* Transfer changeable features to wanted_features and enable
10445 * software offloads (GSO and GRO).
10447 dev
->hw_features
|= (NETIF_F_SOFT_FEATURES
| NETIF_F_SOFT_FEATURES_OFF
);
10448 dev
->features
|= NETIF_F_SOFT_FEATURES
;
10450 if (dev
->udp_tunnel_nic_info
) {
10451 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
10452 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
10455 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
10457 if (!(dev
->flags
& IFF_LOOPBACK
))
10458 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
10460 /* If IPv4 TCP segmentation offload is supported we should also
10461 * allow the device to enable segmenting the frame with the option
10462 * of ignoring a static IP ID value. This doesn't enable the
10463 * feature itself but allows the user to enable it later.
10465 if (dev
->hw_features
& NETIF_F_TSO
)
10466 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
10467 if (dev
->vlan_features
& NETIF_F_TSO
)
10468 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
10469 if (dev
->mpls_features
& NETIF_F_TSO
)
10470 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
10471 if (dev
->hw_enc_features
& NETIF_F_TSO
)
10472 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
10474 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10476 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
10478 /* Make NETIF_F_SG inheritable to tunnel devices.
10480 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
10482 /* Make NETIF_F_SG inheritable to MPLS.
10484 dev
->mpls_features
|= NETIF_F_SG
;
10486 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
10487 ret
= notifier_to_errno(ret
);
10489 goto err_ifindex_release
;
10491 ret
= netdev_register_kobject(dev
);
10493 WRITE_ONCE(dev
->reg_state
, ret
? NETREG_UNREGISTERED
: NETREG_REGISTERED
);
10496 goto err_uninit_notify
;
10498 __netdev_update_features(dev
);
10501 * Default initial state at registry is that the
10502 * device is present.
10505 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
10507 linkwatch_init_dev(dev
);
10509 dev_init_scheduler(dev
);
10511 netdev_hold(dev
, &dev
->dev_registered_tracker
, GFP_KERNEL
);
10512 list_netdevice(dev
);
10514 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
10516 /* If the device has permanent device address, driver should
10517 * set dev_addr and also addr_assign_type should be set to
10518 * NET_ADDR_PERM (default value).
10520 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
10521 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
10523 /* Notify protocols, that a new device appeared. */
10524 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
10525 ret
= notifier_to_errno(ret
);
10527 /* Expect explicit free_netdev() on failure */
10528 dev
->needs_free_netdev
= false;
10529 unregister_netdevice_queue(dev
, NULL
);
10533 * Prevent userspace races by waiting until the network
10534 * device is fully setup before sending notifications.
10536 if (!dev
->rtnl_link_ops
||
10537 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
10538 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
, 0, NULL
);
10544 call_netdevice_notifiers(NETDEV_PRE_UNINIT
, dev
);
10545 err_ifindex_release
:
10546 dev_index_release(net
, dev
->ifindex
);
10548 netdev_do_free_pcpu_stats(dev
);
10550 if (dev
->netdev_ops
->ndo_uninit
)
10551 dev
->netdev_ops
->ndo_uninit(dev
);
10552 if (dev
->priv_destructor
)
10553 dev
->priv_destructor(dev
);
10555 netdev_name_node_free(dev
->name_node
);
10558 EXPORT_SYMBOL(register_netdevice
);
10560 /* Initialize the core of a dummy net device.
10561 * This is useful if you are calling this function after alloc_netdev(),
10562 * since it does not memset the net_device fields.
10564 static void init_dummy_netdev_core(struct net_device
*dev
)
10566 /* make sure we BUG if trying to hit standard
10567 * register/unregister code path
10569 dev
->reg_state
= NETREG_DUMMY
;
10571 /* NAPI wants this */
10572 INIT_LIST_HEAD(&dev
->napi_list
);
10574 /* a dummy interface is started by default */
10575 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
10576 set_bit(__LINK_STATE_START
, &dev
->state
);
10578 /* napi_busy_loop stats accounting wants this */
10579 dev_net_set(dev
, &init_net
);
10581 /* Note : We dont allocate pcpu_refcnt for dummy devices,
10582 * because users of this 'device' dont need to change
10588 * init_dummy_netdev - init a dummy network device for NAPI
10589 * @dev: device to init
10591 * This takes a network device structure and initializes the minimum
10592 * amount of fields so it can be used to schedule NAPI polls without
10593 * registering a full blown interface. This is to be used by drivers
10594 * that need to tie several hardware interfaces to a single NAPI
10595 * poll scheduler due to HW limitations.
10597 void init_dummy_netdev(struct net_device
*dev
)
10599 /* Clear everything. Note we don't initialize spinlocks
10600 * as they aren't supposed to be taken by any of the
10601 * NAPI code and this dummy netdev is supposed to be
10602 * only ever used for NAPI polls
10604 memset(dev
, 0, sizeof(struct net_device
));
10605 init_dummy_netdev_core(dev
);
10607 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
10610 * register_netdev - register a network device
10611 * @dev: device to register
10613 * Take a completed network device structure and add it to the kernel
10614 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10615 * chain. 0 is returned on success. A negative errno code is returned
10616 * on a failure to set up the device, or if the name is a duplicate.
10618 * This is a wrapper around register_netdevice that takes the rtnl semaphore
10619 * and expands the device name if you passed a format string to
10622 int register_netdev(struct net_device
*dev
)
10626 if (rtnl_lock_killable())
10628 err
= register_netdevice(dev
);
10632 EXPORT_SYMBOL(register_netdev
);
10634 int netdev_refcnt_read(const struct net_device
*dev
)
10636 #ifdef CONFIG_PCPU_DEV_REFCNT
10639 for_each_possible_cpu(i
)
10640 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
10643 return refcount_read(&dev
->dev_refcnt
);
10646 EXPORT_SYMBOL(netdev_refcnt_read
);
10648 int netdev_unregister_timeout_secs __read_mostly
= 10;
10650 #define WAIT_REFS_MIN_MSECS 1
10651 #define WAIT_REFS_MAX_MSECS 250
10653 * netdev_wait_allrefs_any - wait until all references are gone.
10654 * @list: list of net_devices to wait on
10656 * This is called when unregistering network devices.
10658 * Any protocol or device that holds a reference should register
10659 * for netdevice notification, and cleanup and put back the
10660 * reference if they receive an UNREGISTER event.
10661 * We can get stuck here if buggy protocols don't correctly
10664 static struct net_device
*netdev_wait_allrefs_any(struct list_head
*list
)
10666 unsigned long rebroadcast_time
, warning_time
;
10667 struct net_device
*dev
;
10670 rebroadcast_time
= warning_time
= jiffies
;
10672 list_for_each_entry(dev
, list
, todo_list
)
10673 if (netdev_refcnt_read(dev
) == 1)
10677 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
10680 /* Rebroadcast unregister notification */
10681 list_for_each_entry(dev
, list
, todo_list
)
10682 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
10688 list_for_each_entry(dev
, list
, todo_list
)
10689 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
10691 /* We must not have linkwatch events
10692 * pending on unregister. If this
10693 * happens, we simply run the queue
10694 * unscheduled, resulting in a noop
10697 linkwatch_run_queue();
10703 rebroadcast_time
= jiffies
;
10709 wait
= WAIT_REFS_MIN_MSECS
;
10712 wait
= min(wait
<< 1, WAIT_REFS_MAX_MSECS
);
10715 list_for_each_entry(dev
, list
, todo_list
)
10716 if (netdev_refcnt_read(dev
) == 1)
10719 if (time_after(jiffies
, warning_time
+
10720 READ_ONCE(netdev_unregister_timeout_secs
) * HZ
)) {
10721 list_for_each_entry(dev
, list
, todo_list
) {
10722 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10723 dev
->name
, netdev_refcnt_read(dev
));
10724 ref_tracker_dir_print(&dev
->refcnt_tracker
, 10);
10727 warning_time
= jiffies
;
10732 /* The sequence is:
10736 * register_netdevice(x1);
10737 * register_netdevice(x2);
10739 * unregister_netdevice(y1);
10740 * unregister_netdevice(y2);
10746 * We are invoked by rtnl_unlock().
10747 * This allows us to deal with problems:
10748 * 1) We can delete sysfs objects which invoke hotplug
10749 * without deadlocking with linkwatch via keventd.
10750 * 2) Since we run with the RTNL semaphore not held, we can sleep
10751 * safely in order to wait for the netdev refcnt to drop to zero.
10753 * We must not return until all unregister events added during
10754 * the interval the lock was held have been completed.
10756 void netdev_run_todo(void)
10758 struct net_device
*dev
, *tmp
;
10759 struct list_head list
;
10761 #ifdef CONFIG_LOCKDEP
10762 struct list_head unlink_list
;
10764 list_replace_init(&net_unlink_list
, &unlink_list
);
10766 while (!list_empty(&unlink_list
)) {
10767 struct net_device
*dev
= list_first_entry(&unlink_list
,
10770 list_del_init(&dev
->unlink_list
);
10771 dev
->nested_level
= dev
->lower_level
- 1;
10775 /* Snapshot list, allow later requests */
10776 list_replace_init(&net_todo_list
, &list
);
10780 /* Wait for rcu callbacks to finish before next phase */
10781 if (!list_empty(&list
))
10784 list_for_each_entry_safe(dev
, tmp
, &list
, todo_list
) {
10785 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
10786 netdev_WARN(dev
, "run_todo but not unregistering\n");
10787 list_del(&dev
->todo_list
);
10791 WRITE_ONCE(dev
->reg_state
, NETREG_UNREGISTERED
);
10792 linkwatch_sync_dev(dev
);
10796 while (!list_empty(&list
)) {
10797 dev
= netdev_wait_allrefs_any(&list
);
10798 list_del(&dev
->todo_list
);
10801 BUG_ON(netdev_refcnt_read(dev
) != 1);
10802 BUG_ON(!list_empty(&dev
->ptype_all
));
10803 BUG_ON(!list_empty(&dev
->ptype_specific
));
10804 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
10805 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
10807 netdev_do_free_pcpu_stats(dev
);
10808 if (dev
->priv_destructor
)
10809 dev
->priv_destructor(dev
);
10810 if (dev
->needs_free_netdev
)
10815 /* Free network device */
10816 kobject_put(&dev
->dev
.kobj
);
10818 if (cnt
&& atomic_sub_and_test(cnt
, &dev_unreg_count
))
10819 wake_up(&netdev_unregistering_wq
);
10822 /* Collate per-cpu network dstats statistics
10824 * Read per-cpu network statistics from dev->dstats and populate the related
10827 static void dev_fetch_dstats(struct rtnl_link_stats64
*s
,
10828 const struct pcpu_dstats __percpu
*dstats
)
10832 for_each_possible_cpu(cpu
) {
10833 u64 rx_packets
, rx_bytes
, rx_drops
;
10834 u64 tx_packets
, tx_bytes
, tx_drops
;
10835 const struct pcpu_dstats
*stats
;
10836 unsigned int start
;
10838 stats
= per_cpu_ptr(dstats
, cpu
);
10840 start
= u64_stats_fetch_begin(&stats
->syncp
);
10841 rx_packets
= u64_stats_read(&stats
->rx_packets
);
10842 rx_bytes
= u64_stats_read(&stats
->rx_bytes
);
10843 rx_drops
= u64_stats_read(&stats
->rx_drops
);
10844 tx_packets
= u64_stats_read(&stats
->tx_packets
);
10845 tx_bytes
= u64_stats_read(&stats
->tx_bytes
);
10846 tx_drops
= u64_stats_read(&stats
->tx_drops
);
10847 } while (u64_stats_fetch_retry(&stats
->syncp
, start
));
10849 s
->rx_packets
+= rx_packets
;
10850 s
->rx_bytes
+= rx_bytes
;
10851 s
->rx_dropped
+= rx_drops
;
10852 s
->tx_packets
+= tx_packets
;
10853 s
->tx_bytes
+= tx_bytes
;
10854 s
->tx_dropped
+= tx_drops
;
10858 /* ndo_get_stats64 implementation for dtstats-based accounting.
10860 * Populate @s from dev->stats and dev->dstats. This is used internally by the
10861 * core for NETDEV_PCPU_STAT_DSTAT-type stats collection.
10863 static void dev_get_dstats64(const struct net_device
*dev
,
10864 struct rtnl_link_stats64
*s
)
10866 netdev_stats_to_stats64(s
, &dev
->stats
);
10867 dev_fetch_dstats(s
, dev
->dstats
);
10870 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10871 * all the same fields in the same order as net_device_stats, with only
10872 * the type differing, but rtnl_link_stats64 may have additional fields
10873 * at the end for newer counters.
10875 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
10876 const struct net_device_stats
*netdev_stats
)
10878 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(atomic_long_t
);
10879 const atomic_long_t
*src
= (atomic_long_t
*)netdev_stats
;
10880 u64
*dst
= (u64
*)stats64
;
10882 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
10883 for (i
= 0; i
< n
; i
++)
10884 dst
[i
] = (unsigned long)atomic_long_read(&src
[i
]);
10885 /* zero out counters that only exist in rtnl_link_stats64 */
10886 memset((char *)stats64
+ n
* sizeof(u64
), 0,
10887 sizeof(*stats64
) - n
* sizeof(u64
));
10889 EXPORT_SYMBOL(netdev_stats_to_stats64
);
10891 static __cold
struct net_device_core_stats __percpu
*netdev_core_stats_alloc(
10892 struct net_device
*dev
)
10894 struct net_device_core_stats __percpu
*p
;
10896 p
= alloc_percpu_gfp(struct net_device_core_stats
,
10897 GFP_ATOMIC
| __GFP_NOWARN
);
10899 if (p
&& cmpxchg(&dev
->core_stats
, NULL
, p
))
10902 /* This READ_ONCE() pairs with the cmpxchg() above */
10903 return READ_ONCE(dev
->core_stats
);
10906 noinline
void netdev_core_stats_inc(struct net_device
*dev
, u32 offset
)
10908 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10909 struct net_device_core_stats __percpu
*p
= READ_ONCE(dev
->core_stats
);
10910 unsigned long __percpu
*field
;
10912 if (unlikely(!p
)) {
10913 p
= netdev_core_stats_alloc(dev
);
10918 field
= (unsigned long __percpu
*)((void __percpu
*)p
+ offset
);
10919 this_cpu_inc(*field
);
10921 EXPORT_SYMBOL_GPL(netdev_core_stats_inc
);
10924 * dev_get_stats - get network device statistics
10925 * @dev: device to get statistics from
10926 * @storage: place to store stats
10928 * Get network statistics from device. Return @storage.
10929 * The device driver may provide its own method by setting
10930 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10931 * otherwise the internal statistics structure is used.
10933 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
10934 struct rtnl_link_stats64
*storage
)
10936 const struct net_device_ops
*ops
= dev
->netdev_ops
;
10937 const struct net_device_core_stats __percpu
*p
;
10939 if (ops
->ndo_get_stats64
) {
10940 memset(storage
, 0, sizeof(*storage
));
10941 ops
->ndo_get_stats64(dev
, storage
);
10942 } else if (ops
->ndo_get_stats
) {
10943 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
10944 } else if (dev
->pcpu_stat_type
== NETDEV_PCPU_STAT_TSTATS
) {
10945 dev_get_tstats64(dev
, storage
);
10946 } else if (dev
->pcpu_stat_type
== NETDEV_PCPU_STAT_DSTATS
) {
10947 dev_get_dstats64(dev
, storage
);
10949 netdev_stats_to_stats64(storage
, &dev
->stats
);
10952 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10953 p
= READ_ONCE(dev
->core_stats
);
10955 const struct net_device_core_stats
*core_stats
;
10958 for_each_possible_cpu(i
) {
10959 core_stats
= per_cpu_ptr(p
, i
);
10960 storage
->rx_dropped
+= READ_ONCE(core_stats
->rx_dropped
);
10961 storage
->tx_dropped
+= READ_ONCE(core_stats
->tx_dropped
);
10962 storage
->rx_nohandler
+= READ_ONCE(core_stats
->rx_nohandler
);
10963 storage
->rx_otherhost_dropped
+= READ_ONCE(core_stats
->rx_otherhost_dropped
);
10968 EXPORT_SYMBOL(dev_get_stats
);
10971 * dev_fetch_sw_netstats - get per-cpu network device statistics
10972 * @s: place to store stats
10973 * @netstats: per-cpu network stats to read from
10975 * Read per-cpu network statistics and populate the related fields in @s.
10977 void dev_fetch_sw_netstats(struct rtnl_link_stats64
*s
,
10978 const struct pcpu_sw_netstats __percpu
*netstats
)
10982 for_each_possible_cpu(cpu
) {
10983 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
10984 const struct pcpu_sw_netstats
*stats
;
10985 unsigned int start
;
10987 stats
= per_cpu_ptr(netstats
, cpu
);
10989 start
= u64_stats_fetch_begin(&stats
->syncp
);
10990 rx_packets
= u64_stats_read(&stats
->rx_packets
);
10991 rx_bytes
= u64_stats_read(&stats
->rx_bytes
);
10992 tx_packets
= u64_stats_read(&stats
->tx_packets
);
10993 tx_bytes
= u64_stats_read(&stats
->tx_bytes
);
10994 } while (u64_stats_fetch_retry(&stats
->syncp
, start
));
10996 s
->rx_packets
+= rx_packets
;
10997 s
->rx_bytes
+= rx_bytes
;
10998 s
->tx_packets
+= tx_packets
;
10999 s
->tx_bytes
+= tx_bytes
;
11002 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats
);
11005 * dev_get_tstats64 - ndo_get_stats64 implementation
11006 * @dev: device to get statistics from
11007 * @s: place to store stats
11009 * Populate @s from dev->stats and dev->tstats. Can be used as
11010 * ndo_get_stats64() callback.
11012 void dev_get_tstats64(struct net_device
*dev
, struct rtnl_link_stats64
*s
)
11014 netdev_stats_to_stats64(s
, &dev
->stats
);
11015 dev_fetch_sw_netstats(s
, dev
->tstats
);
11017 EXPORT_SYMBOL_GPL(dev_get_tstats64
);
11019 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
11021 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
11023 #ifdef CONFIG_NET_CLS_ACT
11026 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
11029 netdev_init_one_queue(dev
, queue
, NULL
);
11030 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
11031 RCU_INIT_POINTER(queue
->qdisc_sleeping
, &noop_qdisc
);
11032 rcu_assign_pointer(dev
->ingress_queue
, queue
);
11037 static const struct ethtool_ops default_ethtool_ops
;
11039 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
11040 const struct ethtool_ops
*ops
)
11042 if (dev
->ethtool_ops
== &default_ethtool_ops
)
11043 dev
->ethtool_ops
= ops
;
11045 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
11048 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
11049 * @dev: netdev to enable the IRQ coalescing on
11051 * Sets a conservative default for SW IRQ coalescing. Users can use
11052 * sysfs attributes to override the default values.
11054 void netdev_sw_irq_coalesce_default_on(struct net_device
*dev
)
11056 WARN_ON(dev
->reg_state
== NETREG_REGISTERED
);
11058 if (!IS_ENABLED(CONFIG_PREEMPT_RT
)) {
11059 dev
->gro_flush_timeout
= 20000;
11060 dev
->napi_defer_hard_irqs
= 1;
11063 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on
);
11066 * alloc_netdev_mqs - allocate network device
11067 * @sizeof_priv: size of private data to allocate space for
11068 * @name: device name format string
11069 * @name_assign_type: origin of device name
11070 * @setup: callback to initialize device
11071 * @txqs: the number of TX subqueues to allocate
11072 * @rxqs: the number of RX subqueues to allocate
11074 * Allocates a struct net_device with private data area for driver use
11075 * and performs basic initialization. Also allocates subqueue structs
11076 * for each queue on the device.
11078 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
11079 unsigned char name_assign_type
,
11080 void (*setup
)(struct net_device
*),
11081 unsigned int txqs
, unsigned int rxqs
)
11083 struct net_device
*dev
;
11085 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
11088 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
11093 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
11097 dev
= kvzalloc(struct_size(dev
, priv
, sizeof_priv
),
11098 GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
11102 dev
->priv_len
= sizeof_priv
;
11104 ref_tracker_dir_init(&dev
->refcnt_tracker
, 128, name
);
11105 #ifdef CONFIG_PCPU_DEV_REFCNT
11106 dev
->pcpu_refcnt
= alloc_percpu(int);
11107 if (!dev
->pcpu_refcnt
)
11111 refcount_set(&dev
->dev_refcnt
, 1);
11114 if (dev_addr_init(dev
))
11120 dev_net_set(dev
, &init_net
);
11122 dev
->gso_max_size
= GSO_LEGACY_MAX_SIZE
;
11123 dev
->xdp_zc_max_segs
= 1;
11124 dev
->gso_max_segs
= GSO_MAX_SEGS
;
11125 dev
->gro_max_size
= GRO_LEGACY_MAX_SIZE
;
11126 dev
->gso_ipv4_max_size
= GSO_LEGACY_MAX_SIZE
;
11127 dev
->gro_ipv4_max_size
= GRO_LEGACY_MAX_SIZE
;
11128 dev
->tso_max_size
= TSO_LEGACY_MAX_SIZE
;
11129 dev
->tso_max_segs
= TSO_MAX_SEGS
;
11130 dev
->upper_level
= 1;
11131 dev
->lower_level
= 1;
11132 #ifdef CONFIG_LOCKDEP
11133 dev
->nested_level
= 0;
11134 INIT_LIST_HEAD(&dev
->unlink_list
);
11137 INIT_LIST_HEAD(&dev
->napi_list
);
11138 INIT_LIST_HEAD(&dev
->unreg_list
);
11139 INIT_LIST_HEAD(&dev
->close_list
);
11140 INIT_LIST_HEAD(&dev
->link_watch_list
);
11141 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
11142 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
11143 INIT_LIST_HEAD(&dev
->ptype_all
);
11144 INIT_LIST_HEAD(&dev
->ptype_specific
);
11145 INIT_LIST_HEAD(&dev
->net_notifier_list
);
11146 #ifdef CONFIG_NET_SCHED
11147 hash_init(dev
->qdisc_hash
);
11150 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
11153 if (!dev
->tx_queue_len
) {
11154 dev
->priv_flags
|= IFF_NO_QUEUE
;
11155 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
11158 dev
->num_tx_queues
= txqs
;
11159 dev
->real_num_tx_queues
= txqs
;
11160 if (netif_alloc_netdev_queues(dev
))
11163 dev
->num_rx_queues
= rxqs
;
11164 dev
->real_num_rx_queues
= rxqs
;
11165 if (netif_alloc_rx_queues(dev
))
11167 dev
->ethtool
= kzalloc(sizeof(*dev
->ethtool
), GFP_KERNEL_ACCOUNT
);
11171 strscpy(dev
->name
, name
);
11172 dev
->name_assign_type
= name_assign_type
;
11173 dev
->group
= INIT_NETDEV_GROUP
;
11174 if (!dev
->ethtool_ops
)
11175 dev
->ethtool_ops
= &default_ethtool_ops
;
11177 nf_hook_netdev_init(dev
);
11186 #ifdef CONFIG_PCPU_DEV_REFCNT
11187 free_percpu(dev
->pcpu_refcnt
);
11193 EXPORT_SYMBOL(alloc_netdev_mqs
);
11196 * free_netdev - free network device
11199 * This function does the last stage of destroying an allocated device
11200 * interface. The reference to the device object is released. If this
11201 * is the last reference then it will be freed.Must be called in process
11204 void free_netdev(struct net_device
*dev
)
11206 struct napi_struct
*p
, *n
;
11210 /* When called immediately after register_netdevice() failed the unwind
11211 * handling may still be dismantling the device. Handle that case by
11212 * deferring the free.
11214 if (dev
->reg_state
== NETREG_UNREGISTERING
) {
11216 dev
->needs_free_netdev
= true;
11220 kfree(dev
->ethtool
);
11221 netif_free_tx_queues(dev
);
11222 netif_free_rx_queues(dev
);
11224 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
11226 /* Flush device addresses */
11227 dev_addr_flush(dev
);
11229 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
11232 ref_tracker_dir_exit(&dev
->refcnt_tracker
);
11233 #ifdef CONFIG_PCPU_DEV_REFCNT
11234 free_percpu(dev
->pcpu_refcnt
);
11235 dev
->pcpu_refcnt
= NULL
;
11237 free_percpu(dev
->core_stats
);
11238 dev
->core_stats
= NULL
;
11239 free_percpu(dev
->xdp_bulkq
);
11240 dev
->xdp_bulkq
= NULL
;
11242 netdev_free_phy_link_topology(dev
);
11244 /* Compatibility with error handling in drivers */
11245 if (dev
->reg_state
== NETREG_UNINITIALIZED
||
11246 dev
->reg_state
== NETREG_DUMMY
) {
11251 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
11252 WRITE_ONCE(dev
->reg_state
, NETREG_RELEASED
);
11254 /* will free via device release */
11255 put_device(&dev
->dev
);
11257 EXPORT_SYMBOL(free_netdev
);
11260 * alloc_netdev_dummy - Allocate and initialize a dummy net device.
11261 * @sizeof_priv: size of private data to allocate space for
11263 * Return: the allocated net_device on success, NULL otherwise
11265 struct net_device
*alloc_netdev_dummy(int sizeof_priv
)
11267 return alloc_netdev(sizeof_priv
, "dummy#", NET_NAME_UNKNOWN
,
11268 init_dummy_netdev_core
);
11270 EXPORT_SYMBOL_GPL(alloc_netdev_dummy
);
11273 * synchronize_net - Synchronize with packet receive processing
11275 * Wait for packets currently being received to be done.
11276 * Does not block later packets from starting.
11278 void synchronize_net(void)
11281 if (rtnl_is_locked())
11282 synchronize_rcu_expedited();
11286 EXPORT_SYMBOL(synchronize_net
);
11288 static void netdev_rss_contexts_free(struct net_device
*dev
)
11290 struct ethtool_rxfh_context
*ctx
;
11291 unsigned long context
;
11293 mutex_lock(&dev
->ethtool
->rss_lock
);
11294 xa_for_each(&dev
->ethtool
->rss_ctx
, context
, ctx
) {
11295 struct ethtool_rxfh_param rxfh
;
11297 rxfh
.indir
= ethtool_rxfh_context_indir(ctx
);
11298 rxfh
.key
= ethtool_rxfh_context_key(ctx
);
11299 rxfh
.hfunc
= ctx
->hfunc
;
11300 rxfh
.input_xfrm
= ctx
->input_xfrm
;
11301 rxfh
.rss_context
= context
;
11302 rxfh
.rss_delete
= true;
11304 xa_erase(&dev
->ethtool
->rss_ctx
, context
);
11305 if (dev
->ethtool_ops
->create_rxfh_context
)
11306 dev
->ethtool_ops
->remove_rxfh_context(dev
, ctx
,
11309 dev
->ethtool_ops
->set_rxfh(dev
, &rxfh
, NULL
);
11312 xa_destroy(&dev
->ethtool
->rss_ctx
);
11313 mutex_unlock(&dev
->ethtool
->rss_lock
);
11317 * unregister_netdevice_queue - remove device from the kernel
11321 * This function shuts down a device interface and removes it
11322 * from the kernel tables.
11323 * If head not NULL, device is queued to be unregistered later.
11325 * Callers must hold the rtnl semaphore. You may want
11326 * unregister_netdev() instead of this.
11329 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
11334 list_move_tail(&dev
->unreg_list
, head
);
11338 list_add(&dev
->unreg_list
, &single
);
11339 unregister_netdevice_many(&single
);
11342 EXPORT_SYMBOL(unregister_netdevice_queue
);
11344 void unregister_netdevice_many_notify(struct list_head
*head
,
11345 u32 portid
, const struct nlmsghdr
*nlh
)
11347 struct net_device
*dev
, *tmp
;
11348 LIST_HEAD(close_head
);
11351 BUG_ON(dev_boot_phase
);
11354 if (list_empty(head
))
11357 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
11358 /* Some devices call without registering
11359 * for initialization unwind. Remove those
11360 * devices and proceed with the remaining.
11362 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
11363 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
11367 list_del(&dev
->unreg_list
);
11370 dev
->dismantle
= true;
11371 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
11374 /* If device is running, close it first. */
11375 list_for_each_entry(dev
, head
, unreg_list
)
11376 list_add_tail(&dev
->close_list
, &close_head
);
11377 dev_close_many(&close_head
, true);
11379 list_for_each_entry(dev
, head
, unreg_list
) {
11380 /* And unlink it from device chain. */
11381 unlist_netdevice(dev
);
11382 WRITE_ONCE(dev
->reg_state
, NETREG_UNREGISTERING
);
11384 flush_all_backlogs();
11388 list_for_each_entry(dev
, head
, unreg_list
) {
11389 struct sk_buff
*skb
= NULL
;
11391 /* Shutdown queueing discipline. */
11393 dev_tcx_uninstall(dev
);
11394 dev_xdp_uninstall(dev
);
11395 bpf_dev_bound_netdev_unregister(dev
);
11396 dev_dmabuf_uninstall(dev
);
11398 netdev_offload_xstats_disable_all(dev
);
11400 /* Notify protocols, that we are about to destroy
11401 * this device. They should clean all the things.
11403 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
11405 if (!dev
->rtnl_link_ops
||
11406 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
11407 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
11408 GFP_KERNEL
, NULL
, 0,
11412 * Flush the unicast and multicast chains
11417 netdev_name_node_alt_flush(dev
);
11418 netdev_name_node_free(dev
->name_node
);
11420 netdev_rss_contexts_free(dev
);
11422 call_netdevice_notifiers(NETDEV_PRE_UNINIT
, dev
);
11424 if (dev
->netdev_ops
->ndo_uninit
)
11425 dev
->netdev_ops
->ndo_uninit(dev
);
11427 mutex_destroy(&dev
->ethtool
->rss_lock
);
11430 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
, portid
, nlh
);
11432 /* Notifier chain MUST detach us all upper devices. */
11433 WARN_ON(netdev_has_any_upper_dev(dev
));
11434 WARN_ON(netdev_has_any_lower_dev(dev
));
11436 /* Remove entries from kobject tree */
11437 netdev_unregister_kobject(dev
);
11439 /* Remove XPS queueing entries */
11440 netif_reset_xps_queues_gt(dev
, 0);
11446 list_for_each_entry(dev
, head
, unreg_list
) {
11447 netdev_put(dev
, &dev
->dev_registered_tracker
);
11451 atomic_add(cnt
, &dev_unreg_count
);
11457 * unregister_netdevice_many - unregister many devices
11458 * @head: list of devices
11460 * Note: As most callers use a stack allocated list_head,
11461 * we force a list_del() to make sure stack won't be corrupted later.
11463 void unregister_netdevice_many(struct list_head
*head
)
11465 unregister_netdevice_many_notify(head
, 0, NULL
);
11467 EXPORT_SYMBOL(unregister_netdevice_many
);
11470 * unregister_netdev - remove device from the kernel
11473 * This function shuts down a device interface and removes it
11474 * from the kernel tables.
11476 * This is just a wrapper for unregister_netdevice that takes
11477 * the rtnl semaphore. In general you want to use this and not
11478 * unregister_netdevice.
11480 void unregister_netdev(struct net_device
*dev
)
11483 unregister_netdevice(dev
);
11486 EXPORT_SYMBOL(unregister_netdev
);
11489 * __dev_change_net_namespace - move device to different nethost namespace
11491 * @net: network namespace
11492 * @pat: If not NULL name pattern to try if the current device name
11493 * is already taken in the destination network namespace.
11494 * @new_ifindex: If not zero, specifies device index in the target
11497 * This function shuts down a device interface and moves it
11498 * to a new network namespace. On success 0 is returned, on
11499 * a failure a netagive errno code is returned.
11501 * Callers must hold the rtnl semaphore.
11504 int __dev_change_net_namespace(struct net_device
*dev
, struct net
*net
,
11505 const char *pat
, int new_ifindex
)
11507 struct netdev_name_node
*name_node
;
11508 struct net
*net_old
= dev_net(dev
);
11509 char new_name
[IFNAMSIZ
] = {};
11514 /* Don't allow namespace local devices to be moved. */
11516 if (dev
->netns_local
)
11519 /* Ensure the device has been registered */
11520 if (dev
->reg_state
!= NETREG_REGISTERED
)
11523 /* Get out if there is nothing todo */
11525 if (net_eq(net_old
, net
))
11528 /* Pick the destination device name, and ensure
11529 * we can use it in the destination network namespace.
11532 if (netdev_name_in_use(net
, dev
->name
)) {
11533 /* We get here if we can't use the current device name */
11536 err
= dev_prep_valid_name(net
, dev
, pat
, new_name
, EEXIST
);
11540 /* Check that none of the altnames conflicts. */
11542 netdev_for_each_altname(dev
, name_node
)
11543 if (netdev_name_in_use(net
, name_node
->name
))
11546 /* Check that new_ifindex isn't used yet. */
11548 err
= dev_index_reserve(net
, new_ifindex
);
11552 /* If there is an ifindex conflict assign a new one */
11553 err
= dev_index_reserve(net
, dev
->ifindex
);
11555 err
= dev_index_reserve(net
, 0);
11562 * And now a mini version of register_netdevice unregister_netdevice.
11565 /* If device is running close it first. */
11568 /* And unlink it from device chain */
11569 unlist_netdevice(dev
);
11573 /* Shutdown queueing discipline. */
11576 /* Notify protocols, that we are about to destroy
11577 * this device. They should clean all the things.
11579 * Note that dev->reg_state stays at NETREG_REGISTERED.
11580 * This is wanted because this way 8021q and macvlan know
11581 * the device is just moving and can keep their slaves up.
11583 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
11586 new_nsid
= peernet2id_alloc(dev_net(dev
), net
, GFP_KERNEL
);
11588 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
11592 * Flush the unicast and multicast chains
11597 /* Send a netdev-removed uevent to the old namespace */
11598 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
11599 netdev_adjacent_del_links(dev
);
11601 /* Move per-net netdevice notifiers that are following the netdevice */
11602 move_netdevice_notifiers_dev_net(dev
, net
);
11604 /* Actually switch the network namespace */
11605 dev_net_set(dev
, net
);
11606 dev
->ifindex
= new_ifindex
;
11609 /* Rename the netdev to prepared name */
11610 write_seqlock_bh(&netdev_rename_lock
);
11611 strscpy(dev
->name
, new_name
, IFNAMSIZ
);
11612 write_sequnlock_bh(&netdev_rename_lock
);
11615 /* Fixup kobjects */
11616 dev_set_uevent_suppress(&dev
->dev
, 1);
11617 err
= device_rename(&dev
->dev
, dev
->name
);
11618 dev_set_uevent_suppress(&dev
->dev
, 0);
11621 /* Send a netdev-add uevent to the new namespace */
11622 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
11623 netdev_adjacent_add_links(dev
);
11625 /* Adapt owner in case owning user namespace of target network
11626 * namespace is different from the original one.
11628 err
= netdev_change_owner(dev
, net_old
, net
);
11631 /* Add the device back in the hashes */
11632 list_netdevice(dev
);
11634 /* Notify protocols, that a new device appeared. */
11635 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
11638 * Prevent userspace races by waiting until the network
11639 * device is fully setup before sending notifications.
11641 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
, 0, NULL
);
11648 EXPORT_SYMBOL_GPL(__dev_change_net_namespace
);
11650 static int dev_cpu_dead(unsigned int oldcpu
)
11652 struct sk_buff
**list_skb
;
11653 struct sk_buff
*skb
;
11655 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
11657 local_irq_disable();
11658 cpu
= smp_processor_id();
11659 sd
= &per_cpu(softnet_data
, cpu
);
11660 oldsd
= &per_cpu(softnet_data
, oldcpu
);
11662 /* Find end of our completion_queue. */
11663 list_skb
= &sd
->completion_queue
;
11665 list_skb
= &(*list_skb
)->next
;
11666 /* Append completion queue from offline CPU. */
11667 *list_skb
= oldsd
->completion_queue
;
11668 oldsd
->completion_queue
= NULL
;
11670 /* Append output queue from offline CPU. */
11671 if (oldsd
->output_queue
) {
11672 *sd
->output_queue_tailp
= oldsd
->output_queue
;
11673 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
11674 oldsd
->output_queue
= NULL
;
11675 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
11677 /* Append NAPI poll list from offline CPU, with one exception :
11678 * process_backlog() must be called by cpu owning percpu backlog.
11679 * We properly handle process_queue & input_pkt_queue later.
11681 while (!list_empty(&oldsd
->poll_list
)) {
11682 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
11683 struct napi_struct
,
11686 list_del_init(&napi
->poll_list
);
11687 if (napi
->poll
== process_backlog
)
11688 napi
->state
&= NAPIF_STATE_THREADED
;
11690 ____napi_schedule(sd
, napi
);
11693 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
11694 local_irq_enable();
11696 if (!use_backlog_threads()) {
11698 remsd
= oldsd
->rps_ipi_list
;
11699 oldsd
->rps_ipi_list
= NULL
;
11701 /* send out pending IPI's on offline CPU */
11702 net_rps_send_ipi(remsd
);
11705 /* Process offline CPU's input_pkt_queue */
11706 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
11708 rps_input_queue_head_incr(oldsd
);
11710 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
11712 rps_input_queue_head_incr(oldsd
);
11719 * netdev_increment_features - increment feature set by one
11720 * @all: current feature set
11721 * @one: new feature set
11722 * @mask: mask feature set
11724 * Computes a new feature set after adding a device with feature set
11725 * @one to the master device with current feature set @all. Will not
11726 * enable anything that is off in @mask. Returns the new feature set.
11728 netdev_features_t
netdev_increment_features(netdev_features_t all
,
11729 netdev_features_t one
, netdev_features_t mask
)
11731 if (mask
& NETIF_F_HW_CSUM
)
11732 mask
|= NETIF_F_CSUM_MASK
;
11733 mask
|= NETIF_F_VLAN_CHALLENGED
;
11735 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
11736 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
11738 /* If one device supports hw checksumming, set for all. */
11739 if (all
& NETIF_F_HW_CSUM
)
11740 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
11744 EXPORT_SYMBOL(netdev_increment_features
);
11746 static struct hlist_head
* __net_init
netdev_create_hash(void)
11749 struct hlist_head
*hash
;
11751 hash
= kmalloc_array(NETDEV_HASHENTRIES
, sizeof(*hash
), GFP_KERNEL
);
11753 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
11754 INIT_HLIST_HEAD(&hash
[i
]);
11759 /* Initialize per network namespace state */
11760 static int __net_init
netdev_init(struct net
*net
)
11762 BUILD_BUG_ON(GRO_HASH_BUCKETS
>
11763 8 * sizeof_field(struct napi_struct
, gro_bitmask
));
11765 INIT_LIST_HEAD(&net
->dev_base_head
);
11767 net
->dev_name_head
= netdev_create_hash();
11768 if (net
->dev_name_head
== NULL
)
11771 net
->dev_index_head
= netdev_create_hash();
11772 if (net
->dev_index_head
== NULL
)
11775 xa_init_flags(&net
->dev_by_index
, XA_FLAGS_ALLOC1
);
11777 RAW_INIT_NOTIFIER_HEAD(&net
->netdev_chain
);
11782 kfree(net
->dev_name_head
);
11788 * netdev_drivername - network driver for the device
11789 * @dev: network device
11791 * Determine network driver for device.
11793 const char *netdev_drivername(const struct net_device
*dev
)
11795 const struct device_driver
*driver
;
11796 const struct device
*parent
;
11797 const char *empty
= "";
11799 parent
= dev
->dev
.parent
;
11803 driver
= parent
->driver
;
11804 if (driver
&& driver
->name
)
11805 return driver
->name
;
11809 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
11810 struct va_format
*vaf
)
11812 if (dev
&& dev
->dev
.parent
) {
11813 dev_printk_emit(level
[1] - '0',
11816 dev_driver_string(dev
->dev
.parent
),
11817 dev_name(dev
->dev
.parent
),
11818 netdev_name(dev
), netdev_reg_state(dev
),
11821 printk("%s%s%s: %pV",
11822 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
11824 printk("%s(NULL net_device): %pV", level
, vaf
);
11828 void netdev_printk(const char *level
, const struct net_device
*dev
,
11829 const char *format
, ...)
11831 struct va_format vaf
;
11834 va_start(args
, format
);
11839 __netdev_printk(level
, dev
, &vaf
);
11843 EXPORT_SYMBOL(netdev_printk
);
11845 #define define_netdev_printk_level(func, level) \
11846 void func(const struct net_device *dev, const char *fmt, ...) \
11848 struct va_format vaf; \
11851 va_start(args, fmt); \
11856 __netdev_printk(level, dev, &vaf); \
11860 EXPORT_SYMBOL(func);
11862 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
11863 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
11864 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
11865 define_netdev_printk_level(netdev_err
, KERN_ERR
);
11866 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
11867 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
11868 define_netdev_printk_level(netdev_info
, KERN_INFO
);
11870 static void __net_exit
netdev_exit(struct net
*net
)
11872 kfree(net
->dev_name_head
);
11873 kfree(net
->dev_index_head
);
11874 xa_destroy(&net
->dev_by_index
);
11875 if (net
!= &init_net
)
11876 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
11879 static struct pernet_operations __net_initdata netdev_net_ops
= {
11880 .init
= netdev_init
,
11881 .exit
= netdev_exit
,
11884 static void __net_exit
default_device_exit_net(struct net
*net
)
11886 struct netdev_name_node
*name_node
, *tmp
;
11887 struct net_device
*dev
, *aux
;
11889 * Push all migratable network devices back to the
11890 * initial network namespace
11893 for_each_netdev_safe(net
, dev
, aux
) {
11895 char fb_name
[IFNAMSIZ
];
11897 /* Ignore unmoveable devices (i.e. loopback) */
11898 if (dev
->netns_local
)
11901 /* Leave virtual devices for the generic cleanup */
11902 if (dev
->rtnl_link_ops
&& !dev
->rtnl_link_ops
->netns_refund
)
11905 /* Push remaining network devices to init_net */
11906 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
11907 if (netdev_name_in_use(&init_net
, fb_name
))
11908 snprintf(fb_name
, IFNAMSIZ
, "dev%%d");
11910 netdev_for_each_altname_safe(dev
, name_node
, tmp
)
11911 if (netdev_name_in_use(&init_net
, name_node
->name
))
11912 __netdev_name_node_alt_destroy(name_node
);
11914 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
11916 pr_emerg("%s: failed to move %s to init_net: %d\n",
11917 __func__
, dev
->name
, err
);
11923 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
11925 /* At exit all network devices most be removed from a network
11926 * namespace. Do this in the reverse order of registration.
11927 * Do this across as many network namespaces as possible to
11928 * improve batching efficiency.
11930 struct net_device
*dev
;
11932 LIST_HEAD(dev_kill_list
);
11935 list_for_each_entry(net
, net_list
, exit_list
) {
11936 default_device_exit_net(net
);
11940 list_for_each_entry(net
, net_list
, exit_list
) {
11941 for_each_netdev_reverse(net
, dev
) {
11942 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
11943 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
11945 unregister_netdevice_queue(dev
, &dev_kill_list
);
11948 unregister_netdevice_many(&dev_kill_list
);
11952 static struct pernet_operations __net_initdata default_device_ops
= {
11953 .exit_batch
= default_device_exit_batch
,
11956 static void __init
net_dev_struct_check(void)
11958 /* TX read-mostly hotpath */
11959 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, priv_flags_fast
);
11960 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, netdev_ops
);
11961 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, header_ops
);
11962 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, _tx
);
11963 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, real_num_tx_queues
);
11964 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, gso_max_size
);
11965 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, gso_ipv4_max_size
);
11966 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, gso_max_segs
);
11967 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, gso_partial_features
);
11968 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, num_tc
);
11969 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, mtu
);
11970 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, needed_headroom
);
11971 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, tc_to_txq
);
11973 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, xps_maps
);
11975 #ifdef CONFIG_NETFILTER_EGRESS
11976 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, nf_hooks_egress
);
11978 #ifdef CONFIG_NET_XGRESS
11979 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_tx
, tcx_egress
);
11981 CACHELINE_ASSERT_GROUP_SIZE(struct net_device
, net_device_read_tx
, 160);
11983 /* TXRX read-mostly hotpath */
11984 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, lstats
);
11985 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, state
);
11986 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, flags
);
11987 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, hard_header_len
);
11988 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, features
);
11989 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_txrx
, ip6_ptr
);
11990 CACHELINE_ASSERT_GROUP_SIZE(struct net_device
, net_device_read_txrx
, 46);
11992 /* RX read-mostly hotpath */
11993 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, ptype_specific
);
11994 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, ifindex
);
11995 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, real_num_rx_queues
);
11996 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, _rx
);
11997 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, gro_flush_timeout
);
11998 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, napi_defer_hard_irqs
);
11999 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, gro_max_size
);
12000 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, gro_ipv4_max_size
);
12001 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, rx_handler
);
12002 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, rx_handler_data
);
12003 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, nd_net
);
12004 #ifdef CONFIG_NETPOLL
12005 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, npinfo
);
12007 #ifdef CONFIG_NET_XGRESS
12008 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device
, net_device_read_rx
, tcx_ingress
);
12010 CACHELINE_ASSERT_GROUP_SIZE(struct net_device
, net_device_read_rx
, 104);
12014 * Initialize the DEV module. At boot time this walks the device list and
12015 * unhooks any devices that fail to initialise (normally hardware not
12016 * present) and leaves us with a valid list of present and active devices.
12020 /* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
12021 #define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
12023 static int net_page_pool_create(int cpuid
)
12025 #if IS_ENABLED(CONFIG_PAGE_POOL)
12026 struct page_pool_params page_pool_params
= {
12027 .pool_size
= SYSTEM_PERCPU_PAGE_POOL_SIZE
,
12028 .flags
= PP_FLAG_SYSTEM_POOL
,
12029 .nid
= cpu_to_mem(cpuid
),
12031 struct page_pool
*pp_ptr
;
12033 pp_ptr
= page_pool_create_percpu(&page_pool_params
, cpuid
);
12034 if (IS_ERR(pp_ptr
))
12037 per_cpu(system_page_pool
, cpuid
) = pp_ptr
;
12042 static int backlog_napi_should_run(unsigned int cpu
)
12044 struct softnet_data
*sd
= per_cpu_ptr(&softnet_data
, cpu
);
12045 struct napi_struct
*napi
= &sd
->backlog
;
12047 return test_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
);
12050 static void run_backlog_napi(unsigned int cpu
)
12052 struct softnet_data
*sd
= per_cpu_ptr(&softnet_data
, cpu
);
12054 napi_threaded_poll_loop(&sd
->backlog
);
12057 static void backlog_napi_setup(unsigned int cpu
)
12059 struct softnet_data
*sd
= per_cpu_ptr(&softnet_data
, cpu
);
12060 struct napi_struct
*napi
= &sd
->backlog
;
12062 napi
->thread
= this_cpu_read(backlog_napi
);
12063 set_bit(NAPI_STATE_THREADED
, &napi
->state
);
12066 static struct smp_hotplug_thread backlog_threads
= {
12067 .store
= &backlog_napi
,
12068 .thread_should_run
= backlog_napi_should_run
,
12069 .thread_fn
= run_backlog_napi
,
12070 .thread_comm
= "backlog_napi/%u",
12071 .setup
= backlog_napi_setup
,
12075 * This is called single threaded during boot, so no need
12076 * to take the rtnl semaphore.
12078 static int __init
net_dev_init(void)
12080 int i
, rc
= -ENOMEM
;
12082 BUG_ON(!dev_boot_phase
);
12084 net_dev_struct_check();
12086 if (dev_proc_init())
12089 if (netdev_kobject_init())
12092 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
12093 INIT_LIST_HEAD(&ptype_base
[i
]);
12095 if (register_pernet_subsys(&netdev_net_ops
))
12099 * Initialise the packet receive queues.
12102 for_each_possible_cpu(i
) {
12103 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
12104 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
12106 INIT_WORK(flush
, flush_backlog
);
12108 skb_queue_head_init(&sd
->input_pkt_queue
);
12109 skb_queue_head_init(&sd
->process_queue
);
12110 #ifdef CONFIG_XFRM_OFFLOAD
12111 skb_queue_head_init(&sd
->xfrm_backlog
);
12113 INIT_LIST_HEAD(&sd
->poll_list
);
12114 sd
->output_queue_tailp
= &sd
->output_queue
;
12116 INIT_CSD(&sd
->csd
, rps_trigger_softirq
, sd
);
12119 INIT_CSD(&sd
->defer_csd
, trigger_rx_softirq
, sd
);
12120 spin_lock_init(&sd
->defer_lock
);
12122 init_gro_hash(&sd
->backlog
);
12123 sd
->backlog
.poll
= process_backlog
;
12124 sd
->backlog
.weight
= weight_p
;
12125 INIT_LIST_HEAD(&sd
->backlog
.poll_list
);
12127 if (net_page_pool_create(i
))
12130 if (use_backlog_threads())
12131 smpboot_register_percpu_thread(&backlog_threads
);
12133 dev_boot_phase
= 0;
12135 /* The loopback device is special if any other network devices
12136 * is present in a network namespace the loopback device must
12137 * be present. Since we now dynamically allocate and free the
12138 * loopback device ensure this invariant is maintained by
12139 * keeping the loopback device as the first device on the
12140 * list of network devices. Ensuring the loopback devices
12141 * is the first device that appears and the last network device
12144 if (register_pernet_device(&loopback_net_ops
))
12147 if (register_pernet_device(&default_device_ops
))
12150 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
12151 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
12153 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
12154 NULL
, dev_cpu_dead
);
12158 /* avoid static key IPIs to isolated CPUs */
12159 if (housekeeping_enabled(HK_TYPE_MISC
))
12160 net_enable_timestamp();
12163 for_each_possible_cpu(i
) {
12164 struct page_pool
*pp_ptr
;
12166 pp_ptr
= per_cpu(system_page_pool
, i
);
12170 page_pool_destroy(pp_ptr
);
12171 per_cpu(system_page_pool
, i
) = NULL
;
12178 subsys_initcall(net_dev_init
);