1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _NET_CORE_DEV_H
3 #define _NET_CORE_DEV_H
5 #include <linux/types.h>
6 #include <linux/rwsem.h>
7 #include <linux/netdevice.h>
10 struct netlink_ext_ack
;
13 /* Random bits of netdevice that don't need to be exposed */
14 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
15 struct sd_flow_limit
{
17 unsigned int num_buckets
;
18 unsigned int history_head
;
19 u16 history
[FLOW_LIMIT_HISTORY
];
23 extern int netdev_flow_limit_table_len
;
26 int __init
dev_proc_init(void);
28 #define dev_proc_init() 0
31 void linkwatch_init_dev(struct net_device
*dev
);
32 void linkwatch_run_queue(void);
34 void dev_addr_flush(struct net_device
*dev
);
35 int dev_addr_init(struct net_device
*dev
);
36 void dev_addr_check(struct net_device
*dev
);
38 /* sysctls not referred to from outside net/core/ */
39 extern int netdev_unregister_timeout_secs
;
41 extern int dev_weight_rx_bias
;
42 extern int dev_weight_tx_bias
;
44 extern struct rw_semaphore dev_addr_sem
;
47 extern struct list_head net_todo_list
;
48 void netdev_run_todo(void);
50 /* netdev management, shared between various uAPI entry points */
51 struct netdev_name_node
{
52 struct hlist_node hlist
;
53 struct list_head list
;
54 struct net_device
*dev
;
59 int netdev_get_name(struct net
*net
, char *name
, int ifindex
);
60 int dev_change_name(struct net_device
*dev
, const char *newname
);
62 #define netdev_for_each_altname(dev, namenode) \
63 list_for_each_entry((namenode), &(dev)->name_node->list, list)
64 #define netdev_for_each_altname_safe(dev, namenode, next) \
65 list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
68 int netdev_name_node_alt_create(struct net_device
*dev
, const char *name
);
69 int netdev_name_node_alt_destroy(struct net_device
*dev
, const char *name
);
71 int dev_validate_mtu(struct net_device
*dev
, int mtu
,
72 struct netlink_ext_ack
*extack
);
73 int dev_set_mtu_ext(struct net_device
*dev
, int mtu
,
74 struct netlink_ext_ack
*extack
);
76 int dev_get_phys_port_id(struct net_device
*dev
,
77 struct netdev_phys_item_id
*ppid
);
78 int dev_get_phys_port_name(struct net_device
*dev
,
79 char *name
, size_t len
);
81 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
);
82 void dev_change_proto_down_reason(struct net_device
*dev
, unsigned long mask
,
85 typedef int (*bpf_op_t
)(struct net_device
*dev
, struct netdev_bpf
*bpf
);
86 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
87 int fd
, int expected_fd
, u32 flags
);
89 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
);
90 void dev_set_group(struct net_device
*dev
, int new_group
);
91 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
);
93 void __dev_set_rx_mode(struct net_device
*dev
);
95 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
96 unsigned int gchanges
, u32 portid
,
97 const struct nlmsghdr
*nlh
);
99 void unregister_netdevice_many_notify(struct list_head
*head
,
100 u32 portid
, const struct nlmsghdr
*nlh
);
102 static inline void netif_set_gso_max_size(struct net_device
*dev
,
105 /* dev->gso_max_size is read locklessly from sk_setup_caps() */
106 WRITE_ONCE(dev
->gso_max_size
, size
);
107 if (size
<= GSO_LEGACY_MAX_SIZE
)
108 WRITE_ONCE(dev
->gso_ipv4_max_size
, size
);
111 static inline void netif_set_gso_max_segs(struct net_device
*dev
,
114 /* dev->gso_max_segs is read locklessly from sk_setup_caps() */
115 WRITE_ONCE(dev
->gso_max_segs
, segs
);
118 static inline void netif_set_gro_max_size(struct net_device
*dev
,
121 /* This pairs with the READ_ONCE() in skb_gro_receive() */
122 WRITE_ONCE(dev
->gro_max_size
, size
);
123 if (size
<= GRO_LEGACY_MAX_SIZE
)
124 WRITE_ONCE(dev
->gro_ipv4_max_size
, size
);
127 static inline void netif_set_gso_ipv4_max_size(struct net_device
*dev
,
130 /* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */
131 WRITE_ONCE(dev
->gso_ipv4_max_size
, size
);
134 static inline void netif_set_gro_ipv4_max_size(struct net_device
*dev
,
137 /* This pairs with the READ_ONCE() in skb_gro_receive() */
138 WRITE_ONCE(dev
->gro_ipv4_max_size
, size
);
141 int rps_cpumask_housekeeping(struct cpumask
*mask
);
143 #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
144 void xdp_do_check_flushed(struct napi_struct
*napi
);
146 static inline void xdp_do_check_flushed(struct napi_struct
*napi
) { }
149 struct napi_struct
*napi_by_id(unsigned int napi_id
);
150 void kick_defer_list_purge(struct softnet_data
*sd
, unsigned int cpu
);
152 #define XMIT_RECURSION_LIMIT 8
154 #ifndef CONFIG_PREEMPT_RT
155 static inline bool dev_xmit_recursion(void)
157 return unlikely(__this_cpu_read(softnet_data
.xmit
.recursion
) >
158 XMIT_RECURSION_LIMIT
);
161 static inline void dev_xmit_recursion_inc(void)
163 __this_cpu_inc(softnet_data
.xmit
.recursion
);
166 static inline void dev_xmit_recursion_dec(void)
168 __this_cpu_dec(softnet_data
.xmit
.recursion
);
171 static inline bool dev_xmit_recursion(void)
173 return unlikely(current
->net_xmit
.recursion
> XMIT_RECURSION_LIMIT
);
176 static inline void dev_xmit_recursion_inc(void)
178 current
->net_xmit
.recursion
++;
181 static inline void dev_xmit_recursion_dec(void)
183 current
->net_xmit
.recursion
--;
187 int dev_set_hwtstamp_phylib(struct net_device
*dev
,
188 struct kernel_hwtstamp_config
*cfg
,
189 struct netlink_ext_ack
*extack
);