2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <net/switchdev.h>
16 #include <linux/if_arp.h>
17 #include <linux/slab.h>
18 #include <linux/sched/signal.h>
19 #include <linux/nsproxy.h>
21 #include <net/net_namespace.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/vmalloc.h>
24 #include <linux/export.h>
25 #include <linux/jiffies.h>
26 #include <linux/pm_runtime.h>
28 #include <linux/of_net.h>
30 #include "net-sysfs.h"
33 static const char fmt_hex
[] = "%#x\n";
34 static const char fmt_dec
[] = "%d\n";
35 static const char fmt_ulong
[] = "%lu\n";
36 static const char fmt_u64
[] = "%llu\n";
38 static inline int dev_isalive(const struct net_device
*dev
)
40 return dev
->reg_state
<= NETREG_REGISTERED
;
43 /* use same locking rules as GIF* ioctl's */
44 static ssize_t
netdev_show(const struct device
*dev
,
45 struct device_attribute
*attr
, char *buf
,
46 ssize_t (*format
)(const struct net_device
*, char *))
48 struct net_device
*ndev
= to_net_dev(dev
);
49 ssize_t ret
= -EINVAL
;
51 read_lock(&dev_base_lock
);
52 if (dev_isalive(ndev
))
53 ret
= (*format
)(ndev
, buf
);
54 read_unlock(&dev_base_lock
);
59 /* generate a show function for simple field */
60 #define NETDEVICE_SHOW(field, format_string) \
61 static ssize_t format_##field(const struct net_device *dev, char *buf) \
63 return sprintf(buf, format_string, dev->field); \
65 static ssize_t field##_show(struct device *dev, \
66 struct device_attribute *attr, char *buf) \
68 return netdev_show(dev, attr, buf, format_##field); \
71 #define NETDEVICE_SHOW_RO(field, format_string) \
72 NETDEVICE_SHOW(field, format_string); \
73 static DEVICE_ATTR_RO(field)
75 #define NETDEVICE_SHOW_RW(field, format_string) \
76 NETDEVICE_SHOW(field, format_string); \
77 static DEVICE_ATTR_RW(field)
79 /* use same locking and permission rules as SIF* ioctl's */
80 static ssize_t
netdev_store(struct device
*dev
, struct device_attribute
*attr
,
81 const char *buf
, size_t len
,
82 int (*set
)(struct net_device
*, unsigned long))
84 struct net_device
*netdev
= to_net_dev(dev
);
85 struct net
*net
= dev_net(netdev
);
89 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
92 ret
= kstrtoul(buf
, 0, &new);
97 return restart_syscall();
99 if (dev_isalive(netdev
)) {
100 ret
= (*set
)(netdev
, new);
109 NETDEVICE_SHOW_RO(dev_id
, fmt_hex
);
110 NETDEVICE_SHOW_RO(dev_port
, fmt_dec
);
111 NETDEVICE_SHOW_RO(addr_assign_type
, fmt_dec
);
112 NETDEVICE_SHOW_RO(addr_len
, fmt_dec
);
113 NETDEVICE_SHOW_RO(ifindex
, fmt_dec
);
114 NETDEVICE_SHOW_RO(type
, fmt_dec
);
115 NETDEVICE_SHOW_RO(link_mode
, fmt_dec
);
117 static ssize_t
iflink_show(struct device
*dev
, struct device_attribute
*attr
,
120 struct net_device
*ndev
= to_net_dev(dev
);
122 return sprintf(buf
, fmt_dec
, dev_get_iflink(ndev
));
124 static DEVICE_ATTR_RO(iflink
);
126 static ssize_t
format_name_assign_type(const struct net_device
*dev
, char *buf
)
128 return sprintf(buf
, fmt_dec
, dev
->name_assign_type
);
131 static ssize_t
name_assign_type_show(struct device
*dev
,
132 struct device_attribute
*attr
,
135 struct net_device
*ndev
= to_net_dev(dev
);
136 ssize_t ret
= -EINVAL
;
138 if (ndev
->name_assign_type
!= NET_NAME_UNKNOWN
)
139 ret
= netdev_show(dev
, attr
, buf
, format_name_assign_type
);
143 static DEVICE_ATTR_RO(name_assign_type
);
145 /* use same locking rules as GIFHWADDR ioctl's */
146 static ssize_t
address_show(struct device
*dev
, struct device_attribute
*attr
,
149 struct net_device
*ndev
= to_net_dev(dev
);
150 ssize_t ret
= -EINVAL
;
152 read_lock(&dev_base_lock
);
153 if (dev_isalive(ndev
))
154 ret
= sysfs_format_mac(buf
, ndev
->dev_addr
, ndev
->addr_len
);
155 read_unlock(&dev_base_lock
);
158 static DEVICE_ATTR_RO(address
);
160 static ssize_t
broadcast_show(struct device
*dev
,
161 struct device_attribute
*attr
, char *buf
)
163 struct net_device
*ndev
= to_net_dev(dev
);
165 if (dev_isalive(ndev
))
166 return sysfs_format_mac(buf
, ndev
->broadcast
, ndev
->addr_len
);
169 static DEVICE_ATTR_RO(broadcast
);
171 static int change_carrier(struct net_device
*dev
, unsigned long new_carrier
)
173 if (!netif_running(dev
))
175 return dev_change_carrier(dev
, (bool)new_carrier
);
178 static ssize_t
carrier_store(struct device
*dev
, struct device_attribute
*attr
,
179 const char *buf
, size_t len
)
181 return netdev_store(dev
, attr
, buf
, len
, change_carrier
);
184 static ssize_t
carrier_show(struct device
*dev
,
185 struct device_attribute
*attr
, char *buf
)
187 struct net_device
*netdev
= to_net_dev(dev
);
189 if (netif_running(netdev
))
190 return sprintf(buf
, fmt_dec
, !!netif_carrier_ok(netdev
));
194 static DEVICE_ATTR_RW(carrier
);
196 static ssize_t
speed_show(struct device
*dev
,
197 struct device_attribute
*attr
, char *buf
)
199 struct net_device
*netdev
= to_net_dev(dev
);
203 return restart_syscall();
205 if (netif_running(netdev
)) {
206 struct ethtool_link_ksettings cmd
;
208 if (!__ethtool_get_link_ksettings(netdev
, &cmd
))
209 ret
= sprintf(buf
, fmt_dec
, cmd
.base
.speed
);
214 static DEVICE_ATTR_RO(speed
);
216 static ssize_t
duplex_show(struct device
*dev
,
217 struct device_attribute
*attr
, char *buf
)
219 struct net_device
*netdev
= to_net_dev(dev
);
223 return restart_syscall();
225 if (netif_running(netdev
)) {
226 struct ethtool_link_ksettings cmd
;
228 if (!__ethtool_get_link_ksettings(netdev
, &cmd
)) {
231 switch (cmd
.base
.duplex
) {
242 ret
= sprintf(buf
, "%s\n", duplex
);
248 static DEVICE_ATTR_RO(duplex
);
250 static ssize_t
dormant_show(struct device
*dev
,
251 struct device_attribute
*attr
, char *buf
)
253 struct net_device
*netdev
= to_net_dev(dev
);
255 if (netif_running(netdev
))
256 return sprintf(buf
, fmt_dec
, !!netif_dormant(netdev
));
260 static DEVICE_ATTR_RO(dormant
);
262 static const char *const operstates
[] = {
264 "notpresent", /* currently unused */
267 "testing", /* currently unused */
272 static ssize_t
operstate_show(struct device
*dev
,
273 struct device_attribute
*attr
, char *buf
)
275 const struct net_device
*netdev
= to_net_dev(dev
);
276 unsigned char operstate
;
278 read_lock(&dev_base_lock
);
279 operstate
= netdev
->operstate
;
280 if (!netif_running(netdev
))
281 operstate
= IF_OPER_DOWN
;
282 read_unlock(&dev_base_lock
);
284 if (operstate
>= ARRAY_SIZE(operstates
))
285 return -EINVAL
; /* should not happen */
287 return sprintf(buf
, "%s\n", operstates
[operstate
]);
289 static DEVICE_ATTR_RO(operstate
);
291 static ssize_t
carrier_changes_show(struct device
*dev
,
292 struct device_attribute
*attr
,
295 struct net_device
*netdev
= to_net_dev(dev
);
297 return sprintf(buf
, fmt_dec
,
298 atomic_read(&netdev
->carrier_up_count
) +
299 atomic_read(&netdev
->carrier_down_count
));
301 static DEVICE_ATTR_RO(carrier_changes
);
303 static ssize_t
carrier_up_count_show(struct device
*dev
,
304 struct device_attribute
*attr
,
307 struct net_device
*netdev
= to_net_dev(dev
);
309 return sprintf(buf
, fmt_dec
, atomic_read(&netdev
->carrier_up_count
));
311 static DEVICE_ATTR_RO(carrier_up_count
);
313 static ssize_t
carrier_down_count_show(struct device
*dev
,
314 struct device_attribute
*attr
,
317 struct net_device
*netdev
= to_net_dev(dev
);
319 return sprintf(buf
, fmt_dec
, atomic_read(&netdev
->carrier_down_count
));
321 static DEVICE_ATTR_RO(carrier_down_count
);
323 /* read-write attributes */
325 static int change_mtu(struct net_device
*dev
, unsigned long new_mtu
)
327 return dev_set_mtu(dev
, (int)new_mtu
);
330 static ssize_t
mtu_store(struct device
*dev
, struct device_attribute
*attr
,
331 const char *buf
, size_t len
)
333 return netdev_store(dev
, attr
, buf
, len
, change_mtu
);
335 NETDEVICE_SHOW_RW(mtu
, fmt_dec
);
337 static int change_flags(struct net_device
*dev
, unsigned long new_flags
)
339 return dev_change_flags(dev
, (unsigned int)new_flags
);
342 static ssize_t
flags_store(struct device
*dev
, struct device_attribute
*attr
,
343 const char *buf
, size_t len
)
345 return netdev_store(dev
, attr
, buf
, len
, change_flags
);
347 NETDEVICE_SHOW_RW(flags
, fmt_hex
);
349 static ssize_t
tx_queue_len_store(struct device
*dev
,
350 struct device_attribute
*attr
,
351 const char *buf
, size_t len
)
353 if (!capable(CAP_NET_ADMIN
))
356 return netdev_store(dev
, attr
, buf
, len
, dev_change_tx_queue_len
);
358 NETDEVICE_SHOW_RW(tx_queue_len
, fmt_dec
);
360 static int change_gro_flush_timeout(struct net_device
*dev
, unsigned long val
)
362 dev
->gro_flush_timeout
= val
;
366 static ssize_t
gro_flush_timeout_store(struct device
*dev
,
367 struct device_attribute
*attr
,
368 const char *buf
, size_t len
)
370 if (!capable(CAP_NET_ADMIN
))
373 return netdev_store(dev
, attr
, buf
, len
, change_gro_flush_timeout
);
375 NETDEVICE_SHOW_RW(gro_flush_timeout
, fmt_ulong
);
377 static ssize_t
ifalias_store(struct device
*dev
, struct device_attribute
*attr
,
378 const char *buf
, size_t len
)
380 struct net_device
*netdev
= to_net_dev(dev
);
381 struct net
*net
= dev_net(netdev
);
385 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
388 /* ignore trailing newline */
389 if (len
> 0 && buf
[len
- 1] == '\n')
393 return restart_syscall();
395 if (dev_isalive(netdev
)) {
396 ret
= dev_set_alias(netdev
, buf
, count
);
400 netdev_state_change(netdev
);
408 static ssize_t
ifalias_show(struct device
*dev
,
409 struct device_attribute
*attr
, char *buf
)
411 const struct net_device
*netdev
= to_net_dev(dev
);
415 ret
= dev_get_alias(netdev
, tmp
, sizeof(tmp
));
417 ret
= sprintf(buf
, "%s\n", tmp
);
420 static DEVICE_ATTR_RW(ifalias
);
422 static int change_group(struct net_device
*dev
, unsigned long new_group
)
424 dev_set_group(dev
, (int)new_group
);
428 static ssize_t
group_store(struct device
*dev
, struct device_attribute
*attr
,
429 const char *buf
, size_t len
)
431 return netdev_store(dev
, attr
, buf
, len
, change_group
);
433 NETDEVICE_SHOW(group
, fmt_dec
);
434 static DEVICE_ATTR(netdev_group
, S_IRUGO
| S_IWUSR
, group_show
, group_store
);
436 static int change_proto_down(struct net_device
*dev
, unsigned long proto_down
)
438 return dev_change_proto_down(dev
, (bool)proto_down
);
441 static ssize_t
proto_down_store(struct device
*dev
,
442 struct device_attribute
*attr
,
443 const char *buf
, size_t len
)
445 return netdev_store(dev
, attr
, buf
, len
, change_proto_down
);
447 NETDEVICE_SHOW_RW(proto_down
, fmt_dec
);
449 static ssize_t
phys_port_id_show(struct device
*dev
,
450 struct device_attribute
*attr
, char *buf
)
452 struct net_device
*netdev
= to_net_dev(dev
);
453 ssize_t ret
= -EINVAL
;
456 return restart_syscall();
458 if (dev_isalive(netdev
)) {
459 struct netdev_phys_item_id ppid
;
461 ret
= dev_get_phys_port_id(netdev
, &ppid
);
463 ret
= sprintf(buf
, "%*phN\n", ppid
.id_len
, ppid
.id
);
469 static DEVICE_ATTR_RO(phys_port_id
);
471 static ssize_t
phys_port_name_show(struct device
*dev
,
472 struct device_attribute
*attr
, char *buf
)
474 struct net_device
*netdev
= to_net_dev(dev
);
475 ssize_t ret
= -EINVAL
;
478 return restart_syscall();
480 if (dev_isalive(netdev
)) {
483 ret
= dev_get_phys_port_name(netdev
, name
, sizeof(name
));
485 ret
= sprintf(buf
, "%s\n", name
);
491 static DEVICE_ATTR_RO(phys_port_name
);
493 static ssize_t
phys_switch_id_show(struct device
*dev
,
494 struct device_attribute
*attr
, char *buf
)
496 struct net_device
*netdev
= to_net_dev(dev
);
497 ssize_t ret
= -EINVAL
;
500 return restart_syscall();
502 if (dev_isalive(netdev
)) {
503 struct switchdev_attr attr
= {
505 .id
= SWITCHDEV_ATTR_ID_PORT_PARENT_ID
,
506 .flags
= SWITCHDEV_F_NO_RECURSE
,
509 ret
= switchdev_port_attr_get(netdev
, &attr
);
511 ret
= sprintf(buf
, "%*phN\n", attr
.u
.ppid
.id_len
,
518 static DEVICE_ATTR_RO(phys_switch_id
);
520 static struct attribute
*net_class_attrs
[] __ro_after_init
= {
521 &dev_attr_netdev_group
.attr
,
523 &dev_attr_dev_id
.attr
,
524 &dev_attr_dev_port
.attr
,
525 &dev_attr_iflink
.attr
,
526 &dev_attr_ifindex
.attr
,
527 &dev_attr_name_assign_type
.attr
,
528 &dev_attr_addr_assign_type
.attr
,
529 &dev_attr_addr_len
.attr
,
530 &dev_attr_link_mode
.attr
,
531 &dev_attr_address
.attr
,
532 &dev_attr_broadcast
.attr
,
533 &dev_attr_speed
.attr
,
534 &dev_attr_duplex
.attr
,
535 &dev_attr_dormant
.attr
,
536 &dev_attr_operstate
.attr
,
537 &dev_attr_carrier_changes
.attr
,
538 &dev_attr_ifalias
.attr
,
539 &dev_attr_carrier
.attr
,
541 &dev_attr_flags
.attr
,
542 &dev_attr_tx_queue_len
.attr
,
543 &dev_attr_gro_flush_timeout
.attr
,
544 &dev_attr_phys_port_id
.attr
,
545 &dev_attr_phys_port_name
.attr
,
546 &dev_attr_phys_switch_id
.attr
,
547 &dev_attr_proto_down
.attr
,
548 &dev_attr_carrier_up_count
.attr
,
549 &dev_attr_carrier_down_count
.attr
,
552 ATTRIBUTE_GROUPS(net_class
);
554 /* Show a given an attribute in the statistics group */
555 static ssize_t
netstat_show(const struct device
*d
,
556 struct device_attribute
*attr
, char *buf
,
557 unsigned long offset
)
559 struct net_device
*dev
= to_net_dev(d
);
560 ssize_t ret
= -EINVAL
;
562 WARN_ON(offset
> sizeof(struct rtnl_link_stats64
) ||
563 offset
% sizeof(u64
) != 0);
565 read_lock(&dev_base_lock
);
566 if (dev_isalive(dev
)) {
567 struct rtnl_link_stats64 temp
;
568 const struct rtnl_link_stats64
*stats
= dev_get_stats(dev
, &temp
);
570 ret
= sprintf(buf
, fmt_u64
, *(u64
*)(((u8
*)stats
) + offset
));
572 read_unlock(&dev_base_lock
);
576 /* generate a read-only statistics attribute */
577 #define NETSTAT_ENTRY(name) \
578 static ssize_t name##_show(struct device *d, \
579 struct device_attribute *attr, char *buf) \
581 return netstat_show(d, attr, buf, \
582 offsetof(struct rtnl_link_stats64, name)); \
584 static DEVICE_ATTR_RO(name)
586 NETSTAT_ENTRY(rx_packets
);
587 NETSTAT_ENTRY(tx_packets
);
588 NETSTAT_ENTRY(rx_bytes
);
589 NETSTAT_ENTRY(tx_bytes
);
590 NETSTAT_ENTRY(rx_errors
);
591 NETSTAT_ENTRY(tx_errors
);
592 NETSTAT_ENTRY(rx_dropped
);
593 NETSTAT_ENTRY(tx_dropped
);
594 NETSTAT_ENTRY(multicast
);
595 NETSTAT_ENTRY(collisions
);
596 NETSTAT_ENTRY(rx_length_errors
);
597 NETSTAT_ENTRY(rx_over_errors
);
598 NETSTAT_ENTRY(rx_crc_errors
);
599 NETSTAT_ENTRY(rx_frame_errors
);
600 NETSTAT_ENTRY(rx_fifo_errors
);
601 NETSTAT_ENTRY(rx_missed_errors
);
602 NETSTAT_ENTRY(tx_aborted_errors
);
603 NETSTAT_ENTRY(tx_carrier_errors
);
604 NETSTAT_ENTRY(tx_fifo_errors
);
605 NETSTAT_ENTRY(tx_heartbeat_errors
);
606 NETSTAT_ENTRY(tx_window_errors
);
607 NETSTAT_ENTRY(rx_compressed
);
608 NETSTAT_ENTRY(tx_compressed
);
609 NETSTAT_ENTRY(rx_nohandler
);
611 static struct attribute
*netstat_attrs
[] __ro_after_init
= {
612 &dev_attr_rx_packets
.attr
,
613 &dev_attr_tx_packets
.attr
,
614 &dev_attr_rx_bytes
.attr
,
615 &dev_attr_tx_bytes
.attr
,
616 &dev_attr_rx_errors
.attr
,
617 &dev_attr_tx_errors
.attr
,
618 &dev_attr_rx_dropped
.attr
,
619 &dev_attr_tx_dropped
.attr
,
620 &dev_attr_multicast
.attr
,
621 &dev_attr_collisions
.attr
,
622 &dev_attr_rx_length_errors
.attr
,
623 &dev_attr_rx_over_errors
.attr
,
624 &dev_attr_rx_crc_errors
.attr
,
625 &dev_attr_rx_frame_errors
.attr
,
626 &dev_attr_rx_fifo_errors
.attr
,
627 &dev_attr_rx_missed_errors
.attr
,
628 &dev_attr_tx_aborted_errors
.attr
,
629 &dev_attr_tx_carrier_errors
.attr
,
630 &dev_attr_tx_fifo_errors
.attr
,
631 &dev_attr_tx_heartbeat_errors
.attr
,
632 &dev_attr_tx_window_errors
.attr
,
633 &dev_attr_rx_compressed
.attr
,
634 &dev_attr_tx_compressed
.attr
,
635 &dev_attr_rx_nohandler
.attr
,
639 static const struct attribute_group netstat_group
= {
640 .name
= "statistics",
641 .attrs
= netstat_attrs
,
644 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
645 static struct attribute
*wireless_attrs
[] = {
649 static const struct attribute_group wireless_group
= {
651 .attrs
= wireless_attrs
,
655 #else /* CONFIG_SYSFS */
656 #define net_class_groups NULL
657 #endif /* CONFIG_SYSFS */
660 #define to_rx_queue_attr(_attr) \
661 container_of(_attr, struct rx_queue_attribute, attr)
663 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
665 static ssize_t
rx_queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
668 const struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
669 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
671 if (!attribute
->show
)
674 return attribute
->show(queue
, buf
);
677 static ssize_t
rx_queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
678 const char *buf
, size_t count
)
680 const struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
681 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
683 if (!attribute
->store
)
686 return attribute
->store(queue
, buf
, count
);
689 static const struct sysfs_ops rx_queue_sysfs_ops
= {
690 .show
= rx_queue_attr_show
,
691 .store
= rx_queue_attr_store
,
695 static ssize_t
show_rps_map(struct netdev_rx_queue
*queue
, char *buf
)
701 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
705 map
= rcu_dereference(queue
->rps_map
);
707 for (i
= 0; i
< map
->len
; i
++)
708 cpumask_set_cpu(map
->cpus
[i
], mask
);
710 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n", cpumask_pr_args(mask
));
712 free_cpumask_var(mask
);
714 return len
< PAGE_SIZE
? len
: -EINVAL
;
717 static ssize_t
store_rps_map(struct netdev_rx_queue
*queue
,
718 const char *buf
, size_t len
)
720 struct rps_map
*old_map
, *map
;
723 static DEFINE_MUTEX(rps_map_mutex
);
725 if (!capable(CAP_NET_ADMIN
))
728 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
731 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
733 free_cpumask_var(mask
);
737 map
= kzalloc(max_t(unsigned int,
738 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
741 free_cpumask_var(mask
);
746 for_each_cpu_and(cpu
, mask
, cpu_online_mask
)
747 map
->cpus
[i
++] = cpu
;
756 mutex_lock(&rps_map_mutex
);
757 old_map
= rcu_dereference_protected(queue
->rps_map
,
758 mutex_is_locked(&rps_map_mutex
));
759 rcu_assign_pointer(queue
->rps_map
, map
);
762 static_key_slow_inc(&rps_needed
);
764 static_key_slow_dec(&rps_needed
);
766 mutex_unlock(&rps_map_mutex
);
769 kfree_rcu(old_map
, rcu
);
771 free_cpumask_var(mask
);
775 static ssize_t
show_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
778 struct rps_dev_flow_table
*flow_table
;
779 unsigned long val
= 0;
782 flow_table
= rcu_dereference(queue
->rps_flow_table
);
784 val
= (unsigned long)flow_table
->mask
+ 1;
787 return sprintf(buf
, "%lu\n", val
);
790 static void rps_dev_flow_table_release(struct rcu_head
*rcu
)
792 struct rps_dev_flow_table
*table
= container_of(rcu
,
793 struct rps_dev_flow_table
, rcu
);
797 static ssize_t
store_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
798 const char *buf
, size_t len
)
800 unsigned long mask
, count
;
801 struct rps_dev_flow_table
*table
, *old_table
;
802 static DEFINE_SPINLOCK(rps_dev_flow_lock
);
805 if (!capable(CAP_NET_ADMIN
))
808 rc
= kstrtoul(buf
, 0, &count
);
814 /* mask = roundup_pow_of_two(count) - 1;
815 * without overflows...
817 while ((mask
| (mask
>> 1)) != mask
)
819 /* On 64 bit arches, must check mask fits in table->mask (u32),
820 * and on 32bit arches, must check
821 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
823 #if BITS_PER_LONG > 32
824 if (mask
> (unsigned long)(u32
)mask
)
827 if (mask
> (ULONG_MAX
- RPS_DEV_FLOW_TABLE_SIZE(1))
828 / sizeof(struct rps_dev_flow
)) {
829 /* Enforce a limit to prevent overflow */
833 table
= vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask
+ 1));
838 for (count
= 0; count
<= mask
; count
++)
839 table
->flows
[count
].cpu
= RPS_NO_CPU
;
844 spin_lock(&rps_dev_flow_lock
);
845 old_table
= rcu_dereference_protected(queue
->rps_flow_table
,
846 lockdep_is_held(&rps_dev_flow_lock
));
847 rcu_assign_pointer(queue
->rps_flow_table
, table
);
848 spin_unlock(&rps_dev_flow_lock
);
851 call_rcu(&old_table
->rcu
, rps_dev_flow_table_release
);
856 static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
857 = __ATTR(rps_cpus
, S_IRUGO
| S_IWUSR
, show_rps_map
, store_rps_map
);
859 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
860 = __ATTR(rps_flow_cnt
, S_IRUGO
| S_IWUSR
,
861 show_rps_dev_flow_table_cnt
, store_rps_dev_flow_table_cnt
);
862 #endif /* CONFIG_RPS */
864 static struct attribute
*rx_queue_default_attrs
[] __ro_after_init
= {
866 &rps_cpus_attribute
.attr
,
867 &rps_dev_flow_table_cnt_attribute
.attr
,
872 static void rx_queue_release(struct kobject
*kobj
)
874 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
877 struct rps_dev_flow_table
*flow_table
;
879 map
= rcu_dereference_protected(queue
->rps_map
, 1);
881 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
885 flow_table
= rcu_dereference_protected(queue
->rps_flow_table
, 1);
887 RCU_INIT_POINTER(queue
->rps_flow_table
, NULL
);
888 call_rcu(&flow_table
->rcu
, rps_dev_flow_table_release
);
892 memset(kobj
, 0, sizeof(*kobj
));
896 static const void *rx_queue_namespace(struct kobject
*kobj
)
898 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
899 struct device
*dev
= &queue
->dev
->dev
;
900 const void *ns
= NULL
;
902 if (dev
->class && dev
->class->ns_type
)
903 ns
= dev
->class->namespace(dev
);
908 static struct kobj_type rx_queue_ktype __ro_after_init
= {
909 .sysfs_ops
= &rx_queue_sysfs_ops
,
910 .release
= rx_queue_release
,
911 .default_attrs
= rx_queue_default_attrs
,
912 .namespace = rx_queue_namespace
915 static int rx_queue_add_kobject(struct net_device
*dev
, int index
)
917 struct netdev_rx_queue
*queue
= dev
->_rx
+ index
;
918 struct kobject
*kobj
= &queue
->kobj
;
921 kobj
->kset
= dev
->queues_kset
;
922 error
= kobject_init_and_add(kobj
, &rx_queue_ktype
, NULL
,
927 if (dev
->sysfs_rx_queue_group
) {
928 error
= sysfs_create_group(kobj
, dev
->sysfs_rx_queue_group
);
935 kobject_uevent(kobj
, KOBJ_ADD
);
936 dev_hold(queue
->dev
);
940 #endif /* CONFIG_SYSFS */
943 net_rx_queue_update_kobjects(struct net_device
*dev
, int old_num
, int new_num
)
950 if (!dev
->sysfs_rx_queue_group
)
953 for (i
= old_num
; i
< new_num
; i
++) {
954 error
= rx_queue_add_kobject(dev
, i
);
961 while (--i
>= new_num
) {
962 struct kobject
*kobj
= &dev
->_rx
[i
].kobj
;
964 if (!refcount_read(&dev_net(dev
)->count
))
965 kobj
->uevent_suppress
= 1;
966 if (dev
->sysfs_rx_queue_group
)
967 sysfs_remove_group(kobj
, dev
->sysfs_rx_queue_group
);
979 * netdev_queue sysfs structures and functions.
981 struct netdev_queue_attribute
{
982 struct attribute attr
;
983 ssize_t (*show
)(struct netdev_queue
*queue
, char *buf
);
984 ssize_t (*store
)(struct netdev_queue
*queue
,
985 const char *buf
, size_t len
);
987 #define to_netdev_queue_attr(_attr) \
988 container_of(_attr, struct netdev_queue_attribute, attr)
990 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
992 static ssize_t
netdev_queue_attr_show(struct kobject
*kobj
,
993 struct attribute
*attr
, char *buf
)
995 const struct netdev_queue_attribute
*attribute
996 = to_netdev_queue_attr(attr
);
997 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
999 if (!attribute
->show
)
1002 return attribute
->show(queue
, buf
);
1005 static ssize_t
netdev_queue_attr_store(struct kobject
*kobj
,
1006 struct attribute
*attr
,
1007 const char *buf
, size_t count
)
1009 const struct netdev_queue_attribute
*attribute
1010 = to_netdev_queue_attr(attr
);
1011 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1013 if (!attribute
->store
)
1016 return attribute
->store(queue
, buf
, count
);
1019 static const struct sysfs_ops netdev_queue_sysfs_ops
= {
1020 .show
= netdev_queue_attr_show
,
1021 .store
= netdev_queue_attr_store
,
1024 static ssize_t
tx_timeout_show(struct netdev_queue
*queue
, char *buf
)
1026 unsigned long trans_timeout
;
1028 spin_lock_irq(&queue
->_xmit_lock
);
1029 trans_timeout
= queue
->trans_timeout
;
1030 spin_unlock_irq(&queue
->_xmit_lock
);
1032 return sprintf(buf
, "%lu", trans_timeout
);
1035 static unsigned int get_netdev_queue_index(struct netdev_queue
*queue
)
1037 struct net_device
*dev
= queue
->dev
;
1040 i
= queue
- dev
->_tx
;
1041 BUG_ON(i
>= dev
->num_tx_queues
);
1046 static ssize_t
traffic_class_show(struct netdev_queue
*queue
,
1049 struct net_device
*dev
= queue
->dev
;
1050 int index
= get_netdev_queue_index(queue
);
1051 int tc
= netdev_txq_to_tc(dev
, index
);
1056 return sprintf(buf
, "%u\n", tc
);
1060 static ssize_t
tx_maxrate_show(struct netdev_queue
*queue
,
1063 return sprintf(buf
, "%lu\n", queue
->tx_maxrate
);
1066 static ssize_t
tx_maxrate_store(struct netdev_queue
*queue
,
1067 const char *buf
, size_t len
)
1069 struct net_device
*dev
= queue
->dev
;
1070 int err
, index
= get_netdev_queue_index(queue
);
1073 err
= kstrtou32(buf
, 10, &rate
);
1077 if (!rtnl_trylock())
1078 return restart_syscall();
1081 if (dev
->netdev_ops
->ndo_set_tx_maxrate
)
1082 err
= dev
->netdev_ops
->ndo_set_tx_maxrate(dev
, index
, rate
);
1086 queue
->tx_maxrate
= rate
;
1092 static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
1093 = __ATTR_RW(tx_maxrate
);
1096 static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
1097 = __ATTR_RO(tx_timeout
);
1099 static struct netdev_queue_attribute queue_traffic_class __ro_after_init
1100 = __ATTR_RO(traffic_class
);
1104 * Byte queue limits sysfs structures and functions.
1106 static ssize_t
bql_show(char *buf
, unsigned int value
)
1108 return sprintf(buf
, "%u\n", value
);
1111 static ssize_t
bql_set(const char *buf
, const size_t count
,
1112 unsigned int *pvalue
)
1117 if (!strcmp(buf
, "max") || !strcmp(buf
, "max\n")) {
1118 value
= DQL_MAX_LIMIT
;
1120 err
= kstrtouint(buf
, 10, &value
);
1123 if (value
> DQL_MAX_LIMIT
)
1132 static ssize_t
bql_show_hold_time(struct netdev_queue
*queue
,
1135 struct dql
*dql
= &queue
->dql
;
1137 return sprintf(buf
, "%u\n", jiffies_to_msecs(dql
->slack_hold_time
));
1140 static ssize_t
bql_set_hold_time(struct netdev_queue
*queue
,
1141 const char *buf
, size_t len
)
1143 struct dql
*dql
= &queue
->dql
;
1147 err
= kstrtouint(buf
, 10, &value
);
1151 dql
->slack_hold_time
= msecs_to_jiffies(value
);
1156 static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
1157 = __ATTR(hold_time
, S_IRUGO
| S_IWUSR
,
1158 bql_show_hold_time
, bql_set_hold_time
);
1160 static ssize_t
bql_show_inflight(struct netdev_queue
*queue
,
1163 struct dql
*dql
= &queue
->dql
;
1165 return sprintf(buf
, "%u\n", dql
->num_queued
- dql
->num_completed
);
1168 static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init
=
1169 __ATTR(inflight
, S_IRUGO
, bql_show_inflight
, NULL
);
1171 #define BQL_ATTR(NAME, FIELD) \
1172 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
1175 return bql_show(buf, queue->dql.FIELD); \
1178 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
1179 const char *buf, size_t len) \
1181 return bql_set(buf, len, &queue->dql.FIELD); \
1184 static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
1185 = __ATTR(NAME, S_IRUGO | S_IWUSR, \
1186 bql_show_ ## NAME, bql_set_ ## NAME)
1188 BQL_ATTR(limit
, limit
);
1189 BQL_ATTR(limit_max
, max_limit
);
1190 BQL_ATTR(limit_min
, min_limit
);
1192 static struct attribute
*dql_attrs
[] __ro_after_init
= {
1193 &bql_limit_attribute
.attr
,
1194 &bql_limit_max_attribute
.attr
,
1195 &bql_limit_min_attribute
.attr
,
1196 &bql_hold_time_attribute
.attr
,
1197 &bql_inflight_attribute
.attr
,
1201 static const struct attribute_group dql_group
= {
1202 .name
= "byte_queue_limits",
1205 #endif /* CONFIG_BQL */
1208 static ssize_t
xps_cpus_show(struct netdev_queue
*queue
,
1211 struct net_device
*dev
= queue
->dev
;
1212 int cpu
, len
, num_tc
= 1, tc
= 0;
1213 struct xps_dev_maps
*dev_maps
;
1215 unsigned long index
;
1217 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
1220 index
= get_netdev_queue_index(queue
);
1223 num_tc
= dev
->num_tc
;
1224 tc
= netdev_txq_to_tc(dev
, index
);
1230 dev_maps
= rcu_dereference(dev
->xps_maps
);
1232 for_each_possible_cpu(cpu
) {
1233 int i
, tci
= cpu
* num_tc
+ tc
;
1234 struct xps_map
*map
;
1236 map
= rcu_dereference(dev_maps
->cpu_map
[tci
]);
1240 for (i
= map
->len
; i
--;) {
1241 if (map
->queues
[i
] == index
) {
1242 cpumask_set_cpu(cpu
, mask
);
1250 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n", cpumask_pr_args(mask
));
1251 free_cpumask_var(mask
);
1252 return len
< PAGE_SIZE
? len
: -EINVAL
;
1255 static ssize_t
xps_cpus_store(struct netdev_queue
*queue
,
1256 const char *buf
, size_t len
)
1258 struct net_device
*dev
= queue
->dev
;
1259 unsigned long index
;
1263 if (!capable(CAP_NET_ADMIN
))
1266 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
1269 index
= get_netdev_queue_index(queue
);
1271 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
1273 free_cpumask_var(mask
);
1277 err
= netif_set_xps_queue(dev
, mask
, index
);
1279 free_cpumask_var(mask
);
1284 static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
1285 = __ATTR_RW(xps_cpus
);
1286 #endif /* CONFIG_XPS */
1288 static struct attribute
*netdev_queue_default_attrs
[] __ro_after_init
= {
1289 &queue_trans_timeout
.attr
,
1290 &queue_traffic_class
.attr
,
1292 &xps_cpus_attribute
.attr
,
1293 &queue_tx_maxrate
.attr
,
1298 static void netdev_queue_release(struct kobject
*kobj
)
1300 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1302 memset(kobj
, 0, sizeof(*kobj
));
1303 dev_put(queue
->dev
);
1306 static const void *netdev_queue_namespace(struct kobject
*kobj
)
1308 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1309 struct device
*dev
= &queue
->dev
->dev
;
1310 const void *ns
= NULL
;
1312 if (dev
->class && dev
->class->ns_type
)
1313 ns
= dev
->class->namespace(dev
);
1318 static struct kobj_type netdev_queue_ktype __ro_after_init
= {
1319 .sysfs_ops
= &netdev_queue_sysfs_ops
,
1320 .release
= netdev_queue_release
,
1321 .default_attrs
= netdev_queue_default_attrs
,
1322 .namespace = netdev_queue_namespace
,
1325 static int netdev_queue_add_kobject(struct net_device
*dev
, int index
)
1327 struct netdev_queue
*queue
= dev
->_tx
+ index
;
1328 struct kobject
*kobj
= &queue
->kobj
;
1331 kobj
->kset
= dev
->queues_kset
;
1332 error
= kobject_init_and_add(kobj
, &netdev_queue_ktype
, NULL
,
1338 error
= sysfs_create_group(kobj
, &dql_group
);
1345 kobject_uevent(kobj
, KOBJ_ADD
);
1346 dev_hold(queue
->dev
);
1350 #endif /* CONFIG_SYSFS */
1353 netdev_queue_update_kobjects(struct net_device
*dev
, int old_num
, int new_num
)
1359 for (i
= old_num
; i
< new_num
; i
++) {
1360 error
= netdev_queue_add_kobject(dev
, i
);
1367 while (--i
>= new_num
) {
1368 struct netdev_queue
*queue
= dev
->_tx
+ i
;
1370 if (!refcount_read(&dev_net(dev
)->count
))
1371 queue
->kobj
.uevent_suppress
= 1;
1373 sysfs_remove_group(&queue
->kobj
, &dql_group
);
1375 kobject_put(&queue
->kobj
);
1381 #endif /* CONFIG_SYSFS */
1384 static int register_queue_kobjects(struct net_device
*dev
)
1386 int error
= 0, txq
= 0, rxq
= 0, real_rx
= 0, real_tx
= 0;
1389 dev
->queues_kset
= kset_create_and_add("queues",
1390 NULL
, &dev
->dev
.kobj
);
1391 if (!dev
->queues_kset
)
1393 real_rx
= dev
->real_num_rx_queues
;
1395 real_tx
= dev
->real_num_tx_queues
;
1397 error
= net_rx_queue_update_kobjects(dev
, 0, real_rx
);
1402 error
= netdev_queue_update_kobjects(dev
, 0, real_tx
);
1410 netdev_queue_update_kobjects(dev
, txq
, 0);
1411 net_rx_queue_update_kobjects(dev
, rxq
, 0);
1415 static void remove_queue_kobjects(struct net_device
*dev
)
1417 int real_rx
= 0, real_tx
= 0;
1420 real_rx
= dev
->real_num_rx_queues
;
1422 real_tx
= dev
->real_num_tx_queues
;
1424 net_rx_queue_update_kobjects(dev
, real_rx
, 0);
1425 netdev_queue_update_kobjects(dev
, real_tx
, 0);
1427 kset_unregister(dev
->queues_kset
);
1431 static bool net_current_may_mount(void)
1433 struct net
*net
= current
->nsproxy
->net_ns
;
1435 return ns_capable(net
->user_ns
, CAP_SYS_ADMIN
);
1438 static void *net_grab_current_ns(void)
1440 struct net
*ns
= current
->nsproxy
->net_ns
;
1441 #ifdef CONFIG_NET_NS
1443 refcount_inc(&ns
->passive
);
1448 static const void *net_initial_ns(void)
1453 static const void *net_netlink_ns(struct sock
*sk
)
1455 return sock_net(sk
);
1458 const struct kobj_ns_type_operations net_ns_type_operations
= {
1459 .type
= KOBJ_NS_TYPE_NET
,
1460 .current_may_mount
= net_current_may_mount
,
1461 .grab_current_ns
= net_grab_current_ns
,
1462 .netlink_ns
= net_netlink_ns
,
1463 .initial_ns
= net_initial_ns
,
1464 .drop_ns
= net_drop_ns
,
1466 EXPORT_SYMBOL_GPL(net_ns_type_operations
);
1468 static int netdev_uevent(struct device
*d
, struct kobj_uevent_env
*env
)
1470 struct net_device
*dev
= to_net_dev(d
);
1473 /* pass interface to uevent. */
1474 retval
= add_uevent_var(env
, "INTERFACE=%s", dev
->name
);
1478 /* pass ifindex to uevent.
1479 * ifindex is useful as it won't change (interface name may change)
1480 * and is what RtNetlink uses natively.
1482 retval
= add_uevent_var(env
, "IFINDEX=%d", dev
->ifindex
);
1489 * netdev_release -- destroy and free a dead device.
1490 * Called when last reference to device kobject is gone.
1492 static void netdev_release(struct device
*d
)
1494 struct net_device
*dev
= to_net_dev(d
);
1496 BUG_ON(dev
->reg_state
!= NETREG_RELEASED
);
1498 /* no need to wait for rcu grace period:
1499 * device is dead and about to be freed.
1501 kfree(rcu_access_pointer(dev
->ifalias
));
1502 netdev_freemem(dev
);
1505 static const void *net_namespace(struct device
*d
)
1507 struct net_device
*dev
= to_net_dev(d
);
1509 return dev_net(dev
);
1512 static struct class net_class __ro_after_init
= {
1514 .dev_release
= netdev_release
,
1515 .dev_groups
= net_class_groups
,
1516 .dev_uevent
= netdev_uevent
,
1517 .ns_type
= &net_ns_type_operations
,
1518 .namespace = net_namespace
,
1521 #ifdef CONFIG_OF_NET
1522 static int of_dev_node_match(struct device
*dev
, const void *data
)
1527 ret
= dev
->parent
->of_node
== data
;
1529 return ret
== 0 ? dev
->of_node
== data
: ret
;
1533 * of_find_net_device_by_node - lookup the net device for the device node
1534 * @np: OF device node
1536 * Looks up the net_device structure corresponding with the device node.
1537 * If successful, returns a pointer to the net_device with the embedded
1538 * struct device refcount incremented by one, or NULL on failure. The
1539 * refcount must be dropped when done with the net_device.
1541 struct net_device
*of_find_net_device_by_node(struct device_node
*np
)
1545 dev
= class_find_device(&net_class
, NULL
, np
, of_dev_node_match
);
1549 return to_net_dev(dev
);
1551 EXPORT_SYMBOL(of_find_net_device_by_node
);
1554 /* Delete sysfs entries but hold kobject reference until after all
1555 * netdev references are gone.
1557 void netdev_unregister_kobject(struct net_device
*ndev
)
1559 struct device
*dev
= &ndev
->dev
;
1561 if (!refcount_read(&dev_net(ndev
)->count
))
1562 dev_set_uevent_suppress(dev
, 1);
1564 kobject_get(&dev
->kobj
);
1566 remove_queue_kobjects(ndev
);
1568 pm_runtime_set_memalloc_noio(dev
, false);
1573 /* Create sysfs entries for network device. */
1574 int netdev_register_kobject(struct net_device
*ndev
)
1576 struct device
*dev
= &ndev
->dev
;
1577 const struct attribute_group
**groups
= ndev
->sysfs_groups
;
1580 device_initialize(dev
);
1581 dev
->class = &net_class
;
1582 dev
->platform_data
= ndev
;
1583 dev
->groups
= groups
;
1585 dev_set_name(dev
, "%s", ndev
->name
);
1588 /* Allow for a device specific group */
1592 *groups
++ = &netstat_group
;
1594 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1595 if (ndev
->ieee80211_ptr
)
1596 *groups
++ = &wireless_group
;
1597 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1598 else if (ndev
->wireless_handlers
)
1599 *groups
++ = &wireless_group
;
1602 #endif /* CONFIG_SYSFS */
1604 error
= device_add(dev
);
1608 error
= register_queue_kobjects(ndev
);
1614 pm_runtime_set_memalloc_noio(dev
, true);
1619 int netdev_class_create_file_ns(const struct class_attribute
*class_attr
,
1622 return class_create_file_ns(&net_class
, class_attr
, ns
);
1624 EXPORT_SYMBOL(netdev_class_create_file_ns
);
1626 void netdev_class_remove_file_ns(const struct class_attribute
*class_attr
,
1629 class_remove_file_ns(&net_class
, class_attr
, ns
);
1631 EXPORT_SYMBOL(netdev_class_remove_file_ns
);
1633 int __init
netdev_kobject_init(void)
1635 kobj_ns_type_register(&net_ns_type_operations
);
1636 return class_register(&net_class
);