2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <net/switchdev.h>
16 #include <linux/if_arp.h>
17 #include <linux/slab.h>
18 #include <linux/nsproxy.h>
20 #include <net/net_namespace.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/vmalloc.h>
23 #include <linux/export.h>
24 #include <linux/jiffies.h>
25 #include <linux/pm_runtime.h>
27 #include <linux/of_net.h>
29 #include "net-sysfs.h"
32 static const char fmt_hex
[] = "%#x\n";
33 static const char fmt_dec
[] = "%d\n";
34 static const char fmt_ulong
[] = "%lu\n";
35 static const char fmt_u64
[] = "%llu\n";
37 static inline int dev_isalive(const struct net_device
*dev
)
39 return dev
->reg_state
<= NETREG_REGISTERED
;
42 /* use same locking rules as GIF* ioctl's */
43 static ssize_t
netdev_show(const struct device
*dev
,
44 struct device_attribute
*attr
, char *buf
,
45 ssize_t (*format
)(const struct net_device
*, char *))
47 struct net_device
*ndev
= to_net_dev(dev
);
48 ssize_t ret
= -EINVAL
;
50 read_lock(&dev_base_lock
);
51 if (dev_isalive(ndev
))
52 ret
= (*format
)(ndev
, buf
);
53 read_unlock(&dev_base_lock
);
58 /* generate a show function for simple field */
59 #define NETDEVICE_SHOW(field, format_string) \
60 static ssize_t format_##field(const struct net_device *dev, char *buf) \
62 return sprintf(buf, format_string, dev->field); \
64 static ssize_t field##_show(struct device *dev, \
65 struct device_attribute *attr, char *buf) \
67 return netdev_show(dev, attr, buf, format_##field); \
70 #define NETDEVICE_SHOW_RO(field, format_string) \
71 NETDEVICE_SHOW(field, format_string); \
72 static DEVICE_ATTR_RO(field)
74 #define NETDEVICE_SHOW_RW(field, format_string) \
75 NETDEVICE_SHOW(field, format_string); \
76 static DEVICE_ATTR_RW(field)
78 /* use same locking and permission rules as SIF* ioctl's */
79 static ssize_t
netdev_store(struct device
*dev
, struct device_attribute
*attr
,
80 const char *buf
, size_t len
,
81 int (*set
)(struct net_device
*, unsigned long))
83 struct net_device
*netdev
= to_net_dev(dev
);
84 struct net
*net
= dev_net(netdev
);
88 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
91 ret
= kstrtoul(buf
, 0, &new);
96 return restart_syscall();
98 if (dev_isalive(netdev
)) {
99 if ((ret
= (*set
)(netdev
, new)) == 0)
107 NETDEVICE_SHOW_RO(dev_id
, fmt_hex
);
108 NETDEVICE_SHOW_RO(dev_port
, fmt_dec
);
109 NETDEVICE_SHOW_RO(addr_assign_type
, fmt_dec
);
110 NETDEVICE_SHOW_RO(addr_len
, fmt_dec
);
111 NETDEVICE_SHOW_RO(ifindex
, fmt_dec
);
112 NETDEVICE_SHOW_RO(type
, fmt_dec
);
113 NETDEVICE_SHOW_RO(link_mode
, fmt_dec
);
115 static ssize_t
iflink_show(struct device
*dev
, struct device_attribute
*attr
,
118 struct net_device
*ndev
= to_net_dev(dev
);
120 return sprintf(buf
, fmt_dec
, dev_get_iflink(ndev
));
122 static DEVICE_ATTR_RO(iflink
);
124 static ssize_t
format_name_assign_type(const struct net_device
*dev
, char *buf
)
126 return sprintf(buf
, fmt_dec
, dev
->name_assign_type
);
129 static ssize_t
name_assign_type_show(struct device
*dev
,
130 struct device_attribute
*attr
,
133 struct net_device
*ndev
= to_net_dev(dev
);
134 ssize_t ret
= -EINVAL
;
136 if (ndev
->name_assign_type
!= NET_NAME_UNKNOWN
)
137 ret
= netdev_show(dev
, attr
, buf
, format_name_assign_type
);
141 static DEVICE_ATTR_RO(name_assign_type
);
143 /* use same locking rules as GIFHWADDR ioctl's */
144 static ssize_t
address_show(struct device
*dev
, struct device_attribute
*attr
,
147 struct net_device
*ndev
= to_net_dev(dev
);
148 ssize_t ret
= -EINVAL
;
150 read_lock(&dev_base_lock
);
151 if (dev_isalive(ndev
))
152 ret
= sysfs_format_mac(buf
, ndev
->dev_addr
, ndev
->addr_len
);
153 read_unlock(&dev_base_lock
);
156 static DEVICE_ATTR_RO(address
);
158 static ssize_t
broadcast_show(struct device
*dev
,
159 struct device_attribute
*attr
, char *buf
)
161 struct net_device
*ndev
= to_net_dev(dev
);
162 if (dev_isalive(ndev
))
163 return sysfs_format_mac(buf
, ndev
->broadcast
, ndev
->addr_len
);
166 static DEVICE_ATTR_RO(broadcast
);
168 static int change_carrier(struct net_device
*dev
, unsigned long new_carrier
)
170 if (!netif_running(dev
))
172 return dev_change_carrier(dev
, (bool) new_carrier
);
175 static ssize_t
carrier_store(struct device
*dev
, struct device_attribute
*attr
,
176 const char *buf
, size_t len
)
178 return netdev_store(dev
, attr
, buf
, len
, change_carrier
);
181 static ssize_t
carrier_show(struct device
*dev
,
182 struct device_attribute
*attr
, char *buf
)
184 struct net_device
*netdev
= to_net_dev(dev
);
185 if (netif_running(netdev
)) {
186 return sprintf(buf
, fmt_dec
, !!netif_carrier_ok(netdev
));
190 static DEVICE_ATTR_RW(carrier
);
192 static ssize_t
speed_show(struct device
*dev
,
193 struct device_attribute
*attr
, char *buf
)
195 struct net_device
*netdev
= to_net_dev(dev
);
199 return restart_syscall();
201 if (netif_running(netdev
)) {
202 struct ethtool_link_ksettings cmd
;
204 if (!__ethtool_get_link_ksettings(netdev
, &cmd
))
205 ret
= sprintf(buf
, fmt_dec
, cmd
.base
.speed
);
210 static DEVICE_ATTR_RO(speed
);
212 static ssize_t
duplex_show(struct device
*dev
,
213 struct device_attribute
*attr
, char *buf
)
215 struct net_device
*netdev
= to_net_dev(dev
);
219 return restart_syscall();
221 if (netif_running(netdev
)) {
222 struct ethtool_link_ksettings cmd
;
224 if (!__ethtool_get_link_ksettings(netdev
, &cmd
)) {
227 switch (cmd
.base
.duplex
) {
238 ret
= sprintf(buf
, "%s\n", duplex
);
244 static DEVICE_ATTR_RO(duplex
);
246 static ssize_t
dormant_show(struct device
*dev
,
247 struct device_attribute
*attr
, char *buf
)
249 struct net_device
*netdev
= to_net_dev(dev
);
251 if (netif_running(netdev
))
252 return sprintf(buf
, fmt_dec
, !!netif_dormant(netdev
));
256 static DEVICE_ATTR_RO(dormant
);
258 static const char *const operstates
[] = {
260 "notpresent", /* currently unused */
263 "testing", /* currently unused */
268 static ssize_t
operstate_show(struct device
*dev
,
269 struct device_attribute
*attr
, char *buf
)
271 const struct net_device
*netdev
= to_net_dev(dev
);
272 unsigned char operstate
;
274 read_lock(&dev_base_lock
);
275 operstate
= netdev
->operstate
;
276 if (!netif_running(netdev
))
277 operstate
= IF_OPER_DOWN
;
278 read_unlock(&dev_base_lock
);
280 if (operstate
>= ARRAY_SIZE(operstates
))
281 return -EINVAL
; /* should not happen */
283 return sprintf(buf
, "%s\n", operstates
[operstate
]);
285 static DEVICE_ATTR_RO(operstate
);
287 static ssize_t
carrier_changes_show(struct device
*dev
,
288 struct device_attribute
*attr
,
291 struct net_device
*netdev
= to_net_dev(dev
);
292 return sprintf(buf
, fmt_dec
,
293 atomic_read(&netdev
->carrier_changes
));
295 static DEVICE_ATTR_RO(carrier_changes
);
297 /* read-write attributes */
299 static int change_mtu(struct net_device
*dev
, unsigned long new_mtu
)
301 return dev_set_mtu(dev
, (int) new_mtu
);
304 static ssize_t
mtu_store(struct device
*dev
, struct device_attribute
*attr
,
305 const char *buf
, size_t len
)
307 return netdev_store(dev
, attr
, buf
, len
, change_mtu
);
309 NETDEVICE_SHOW_RW(mtu
, fmt_dec
);
311 static int change_flags(struct net_device
*dev
, unsigned long new_flags
)
313 return dev_change_flags(dev
, (unsigned int) new_flags
);
316 static ssize_t
flags_store(struct device
*dev
, struct device_attribute
*attr
,
317 const char *buf
, size_t len
)
319 return netdev_store(dev
, attr
, buf
, len
, change_flags
);
321 NETDEVICE_SHOW_RW(flags
, fmt_hex
);
323 static int change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
325 int res
, orig_len
= dev
->tx_queue_len
;
327 if (new_len
!= orig_len
) {
328 dev
->tx_queue_len
= new_len
;
329 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
330 res
= notifier_to_errno(res
);
333 "refused to change device tx_queue_len\n");
334 dev
->tx_queue_len
= orig_len
;
342 static ssize_t
tx_queue_len_store(struct device
*dev
,
343 struct device_attribute
*attr
,
344 const char *buf
, size_t len
)
346 if (!capable(CAP_NET_ADMIN
))
349 return netdev_store(dev
, attr
, buf
, len
, change_tx_queue_len
);
351 NETDEVICE_SHOW_RW(tx_queue_len
, fmt_ulong
);
353 static int change_gro_flush_timeout(struct net_device
*dev
, unsigned long val
)
355 dev
->gro_flush_timeout
= val
;
359 static ssize_t
gro_flush_timeout_store(struct device
*dev
,
360 struct device_attribute
*attr
,
361 const char *buf
, size_t len
)
363 if (!capable(CAP_NET_ADMIN
))
366 return netdev_store(dev
, attr
, buf
, len
, change_gro_flush_timeout
);
368 NETDEVICE_SHOW_RW(gro_flush_timeout
, fmt_ulong
);
370 static ssize_t
ifalias_store(struct device
*dev
, struct device_attribute
*attr
,
371 const char *buf
, size_t len
)
373 struct net_device
*netdev
= to_net_dev(dev
);
374 struct net
*net
= dev_net(netdev
);
378 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
381 /* ignore trailing newline */
382 if (len
> 0 && buf
[len
- 1] == '\n')
386 return restart_syscall();
387 ret
= dev_set_alias(netdev
, buf
, count
);
390 return ret
< 0 ? ret
: len
;
393 static ssize_t
ifalias_show(struct device
*dev
,
394 struct device_attribute
*attr
, char *buf
)
396 const struct net_device
*netdev
= to_net_dev(dev
);
400 return restart_syscall();
402 ret
= sprintf(buf
, "%s\n", netdev
->ifalias
);
406 static DEVICE_ATTR_RW(ifalias
);
408 static int change_group(struct net_device
*dev
, unsigned long new_group
)
410 dev_set_group(dev
, (int) new_group
);
414 static ssize_t
group_store(struct device
*dev
, struct device_attribute
*attr
,
415 const char *buf
, size_t len
)
417 return netdev_store(dev
, attr
, buf
, len
, change_group
);
419 NETDEVICE_SHOW(group
, fmt_dec
);
420 static DEVICE_ATTR(netdev_group
, S_IRUGO
| S_IWUSR
, group_show
, group_store
);
422 static int change_proto_down(struct net_device
*dev
, unsigned long proto_down
)
424 return dev_change_proto_down(dev
, (bool) proto_down
);
427 static ssize_t
proto_down_store(struct device
*dev
,
428 struct device_attribute
*attr
,
429 const char *buf
, size_t len
)
431 return netdev_store(dev
, attr
, buf
, len
, change_proto_down
);
433 NETDEVICE_SHOW_RW(proto_down
, fmt_dec
);
435 static ssize_t
phys_port_id_show(struct device
*dev
,
436 struct device_attribute
*attr
, char *buf
)
438 struct net_device
*netdev
= to_net_dev(dev
);
439 ssize_t ret
= -EINVAL
;
442 return restart_syscall();
444 if (dev_isalive(netdev
)) {
445 struct netdev_phys_item_id ppid
;
447 ret
= dev_get_phys_port_id(netdev
, &ppid
);
449 ret
= sprintf(buf
, "%*phN\n", ppid
.id_len
, ppid
.id
);
455 static DEVICE_ATTR_RO(phys_port_id
);
457 static ssize_t
phys_port_name_show(struct device
*dev
,
458 struct device_attribute
*attr
, char *buf
)
460 struct net_device
*netdev
= to_net_dev(dev
);
461 ssize_t ret
= -EINVAL
;
464 return restart_syscall();
466 if (dev_isalive(netdev
)) {
469 ret
= dev_get_phys_port_name(netdev
, name
, sizeof(name
));
471 ret
= sprintf(buf
, "%s\n", name
);
477 static DEVICE_ATTR_RO(phys_port_name
);
479 static ssize_t
phys_switch_id_show(struct device
*dev
,
480 struct device_attribute
*attr
, char *buf
)
482 struct net_device
*netdev
= to_net_dev(dev
);
483 ssize_t ret
= -EINVAL
;
486 return restart_syscall();
488 if (dev_isalive(netdev
)) {
489 struct switchdev_attr attr
= {
491 .id
= SWITCHDEV_ATTR_ID_PORT_PARENT_ID
,
492 .flags
= SWITCHDEV_F_NO_RECURSE
,
495 ret
= switchdev_port_attr_get(netdev
, &attr
);
497 ret
= sprintf(buf
, "%*phN\n", attr
.u
.ppid
.id_len
,
504 static DEVICE_ATTR_RO(phys_switch_id
);
506 static struct attribute
*net_class_attrs
[] = {
507 &dev_attr_netdev_group
.attr
,
509 &dev_attr_dev_id
.attr
,
510 &dev_attr_dev_port
.attr
,
511 &dev_attr_iflink
.attr
,
512 &dev_attr_ifindex
.attr
,
513 &dev_attr_name_assign_type
.attr
,
514 &dev_attr_addr_assign_type
.attr
,
515 &dev_attr_addr_len
.attr
,
516 &dev_attr_link_mode
.attr
,
517 &dev_attr_address
.attr
,
518 &dev_attr_broadcast
.attr
,
519 &dev_attr_speed
.attr
,
520 &dev_attr_duplex
.attr
,
521 &dev_attr_dormant
.attr
,
522 &dev_attr_operstate
.attr
,
523 &dev_attr_carrier_changes
.attr
,
524 &dev_attr_ifalias
.attr
,
525 &dev_attr_carrier
.attr
,
527 &dev_attr_flags
.attr
,
528 &dev_attr_tx_queue_len
.attr
,
529 &dev_attr_gro_flush_timeout
.attr
,
530 &dev_attr_phys_port_id
.attr
,
531 &dev_attr_phys_port_name
.attr
,
532 &dev_attr_phys_switch_id
.attr
,
533 &dev_attr_proto_down
.attr
,
536 ATTRIBUTE_GROUPS(net_class
);
538 /* Show a given an attribute in the statistics group */
539 static ssize_t
netstat_show(const struct device
*d
,
540 struct device_attribute
*attr
, char *buf
,
541 unsigned long offset
)
543 struct net_device
*dev
= to_net_dev(d
);
544 ssize_t ret
= -EINVAL
;
546 WARN_ON(offset
> sizeof(struct rtnl_link_stats64
) ||
547 offset
% sizeof(u64
) != 0);
549 read_lock(&dev_base_lock
);
550 if (dev_isalive(dev
)) {
551 struct rtnl_link_stats64 temp
;
552 const struct rtnl_link_stats64
*stats
= dev_get_stats(dev
, &temp
);
554 ret
= sprintf(buf
, fmt_u64
, *(u64
*)(((u8
*) stats
) + offset
));
556 read_unlock(&dev_base_lock
);
560 /* generate a read-only statistics attribute */
561 #define NETSTAT_ENTRY(name) \
562 static ssize_t name##_show(struct device *d, \
563 struct device_attribute *attr, char *buf) \
565 return netstat_show(d, attr, buf, \
566 offsetof(struct rtnl_link_stats64, name)); \
568 static DEVICE_ATTR_RO(name)
570 NETSTAT_ENTRY(rx_packets
);
571 NETSTAT_ENTRY(tx_packets
);
572 NETSTAT_ENTRY(rx_bytes
);
573 NETSTAT_ENTRY(tx_bytes
);
574 NETSTAT_ENTRY(rx_errors
);
575 NETSTAT_ENTRY(tx_errors
);
576 NETSTAT_ENTRY(rx_dropped
);
577 NETSTAT_ENTRY(tx_dropped
);
578 NETSTAT_ENTRY(multicast
);
579 NETSTAT_ENTRY(collisions
);
580 NETSTAT_ENTRY(rx_length_errors
);
581 NETSTAT_ENTRY(rx_over_errors
);
582 NETSTAT_ENTRY(rx_crc_errors
);
583 NETSTAT_ENTRY(rx_frame_errors
);
584 NETSTAT_ENTRY(rx_fifo_errors
);
585 NETSTAT_ENTRY(rx_missed_errors
);
586 NETSTAT_ENTRY(tx_aborted_errors
);
587 NETSTAT_ENTRY(tx_carrier_errors
);
588 NETSTAT_ENTRY(tx_fifo_errors
);
589 NETSTAT_ENTRY(tx_heartbeat_errors
);
590 NETSTAT_ENTRY(tx_window_errors
);
591 NETSTAT_ENTRY(rx_compressed
);
592 NETSTAT_ENTRY(tx_compressed
);
593 NETSTAT_ENTRY(rx_nohandler
);
595 static struct attribute
*netstat_attrs
[] = {
596 &dev_attr_rx_packets
.attr
,
597 &dev_attr_tx_packets
.attr
,
598 &dev_attr_rx_bytes
.attr
,
599 &dev_attr_tx_bytes
.attr
,
600 &dev_attr_rx_errors
.attr
,
601 &dev_attr_tx_errors
.attr
,
602 &dev_attr_rx_dropped
.attr
,
603 &dev_attr_tx_dropped
.attr
,
604 &dev_attr_multicast
.attr
,
605 &dev_attr_collisions
.attr
,
606 &dev_attr_rx_length_errors
.attr
,
607 &dev_attr_rx_over_errors
.attr
,
608 &dev_attr_rx_crc_errors
.attr
,
609 &dev_attr_rx_frame_errors
.attr
,
610 &dev_attr_rx_fifo_errors
.attr
,
611 &dev_attr_rx_missed_errors
.attr
,
612 &dev_attr_tx_aborted_errors
.attr
,
613 &dev_attr_tx_carrier_errors
.attr
,
614 &dev_attr_tx_fifo_errors
.attr
,
615 &dev_attr_tx_heartbeat_errors
.attr
,
616 &dev_attr_tx_window_errors
.attr
,
617 &dev_attr_rx_compressed
.attr
,
618 &dev_attr_tx_compressed
.attr
,
619 &dev_attr_rx_nohandler
.attr
,
624 static struct attribute_group netstat_group
= {
625 .name
= "statistics",
626 .attrs
= netstat_attrs
,
629 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
630 static struct attribute
*wireless_attrs
[] = {
634 static struct attribute_group wireless_group
= {
636 .attrs
= wireless_attrs
,
640 #else /* CONFIG_SYSFS */
641 #define net_class_groups NULL
642 #endif /* CONFIG_SYSFS */
645 #define to_rx_queue_attr(_attr) container_of(_attr, \
646 struct rx_queue_attribute, attr)
648 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
650 static ssize_t
rx_queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
653 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
654 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
656 if (!attribute
->show
)
659 return attribute
->show(queue
, attribute
, buf
);
662 static ssize_t
rx_queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
663 const char *buf
, size_t count
)
665 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
666 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
668 if (!attribute
->store
)
671 return attribute
->store(queue
, attribute
, buf
, count
);
674 static const struct sysfs_ops rx_queue_sysfs_ops
= {
675 .show
= rx_queue_attr_show
,
676 .store
= rx_queue_attr_store
,
680 static ssize_t
show_rps_map(struct netdev_rx_queue
*queue
,
681 struct rx_queue_attribute
*attribute
, char *buf
)
687 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
691 map
= rcu_dereference(queue
->rps_map
);
693 for (i
= 0; i
< map
->len
; i
++)
694 cpumask_set_cpu(map
->cpus
[i
], mask
);
696 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n", cpumask_pr_args(mask
));
698 free_cpumask_var(mask
);
700 return len
< PAGE_SIZE
? len
: -EINVAL
;
703 static ssize_t
store_rps_map(struct netdev_rx_queue
*queue
,
704 struct rx_queue_attribute
*attribute
,
705 const char *buf
, size_t len
)
707 struct rps_map
*old_map
, *map
;
710 static DEFINE_MUTEX(rps_map_mutex
);
712 if (!capable(CAP_NET_ADMIN
))
715 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
718 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
720 free_cpumask_var(mask
);
724 map
= kzalloc(max_t(unsigned int,
725 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
728 free_cpumask_var(mask
);
733 for_each_cpu_and(cpu
, mask
, cpu_online_mask
)
734 map
->cpus
[i
++] = cpu
;
743 mutex_lock(&rps_map_mutex
);
744 old_map
= rcu_dereference_protected(queue
->rps_map
,
745 mutex_is_locked(&rps_map_mutex
));
746 rcu_assign_pointer(queue
->rps_map
, map
);
749 static_key_slow_inc(&rps_needed
);
751 static_key_slow_dec(&rps_needed
);
753 mutex_unlock(&rps_map_mutex
);
756 kfree_rcu(old_map
, rcu
);
758 free_cpumask_var(mask
);
762 static ssize_t
show_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
763 struct rx_queue_attribute
*attr
,
766 struct rps_dev_flow_table
*flow_table
;
767 unsigned long val
= 0;
770 flow_table
= rcu_dereference(queue
->rps_flow_table
);
772 val
= (unsigned long)flow_table
->mask
+ 1;
775 return sprintf(buf
, "%lu\n", val
);
778 static void rps_dev_flow_table_release(struct rcu_head
*rcu
)
780 struct rps_dev_flow_table
*table
= container_of(rcu
,
781 struct rps_dev_flow_table
, rcu
);
785 static ssize_t
store_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
786 struct rx_queue_attribute
*attr
,
787 const char *buf
, size_t len
)
789 unsigned long mask
, count
;
790 struct rps_dev_flow_table
*table
, *old_table
;
791 static DEFINE_SPINLOCK(rps_dev_flow_lock
);
794 if (!capable(CAP_NET_ADMIN
))
797 rc
= kstrtoul(buf
, 0, &count
);
803 /* mask = roundup_pow_of_two(count) - 1;
804 * without overflows...
806 while ((mask
| (mask
>> 1)) != mask
)
808 /* On 64 bit arches, must check mask fits in table->mask (u32),
809 * and on 32bit arches, must check
810 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
812 #if BITS_PER_LONG > 32
813 if (mask
> (unsigned long)(u32
)mask
)
816 if (mask
> (ULONG_MAX
- RPS_DEV_FLOW_TABLE_SIZE(1))
817 / sizeof(struct rps_dev_flow
)) {
818 /* Enforce a limit to prevent overflow */
822 table
= vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask
+ 1));
827 for (count
= 0; count
<= mask
; count
++)
828 table
->flows
[count
].cpu
= RPS_NO_CPU
;
832 spin_lock(&rps_dev_flow_lock
);
833 old_table
= rcu_dereference_protected(queue
->rps_flow_table
,
834 lockdep_is_held(&rps_dev_flow_lock
));
835 rcu_assign_pointer(queue
->rps_flow_table
, table
);
836 spin_unlock(&rps_dev_flow_lock
);
839 call_rcu(&old_table
->rcu
, rps_dev_flow_table_release
);
844 static struct rx_queue_attribute rps_cpus_attribute
=
845 __ATTR(rps_cpus
, S_IRUGO
| S_IWUSR
, show_rps_map
, store_rps_map
);
848 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute
=
849 __ATTR(rps_flow_cnt
, S_IRUGO
| S_IWUSR
,
850 show_rps_dev_flow_table_cnt
, store_rps_dev_flow_table_cnt
);
851 #endif /* CONFIG_RPS */
853 static struct attribute
*rx_queue_default_attrs
[] = {
855 &rps_cpus_attribute
.attr
,
856 &rps_dev_flow_table_cnt_attribute
.attr
,
861 static void rx_queue_release(struct kobject
*kobj
)
863 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
866 struct rps_dev_flow_table
*flow_table
;
869 map
= rcu_dereference_protected(queue
->rps_map
, 1);
871 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
875 flow_table
= rcu_dereference_protected(queue
->rps_flow_table
, 1);
877 RCU_INIT_POINTER(queue
->rps_flow_table
, NULL
);
878 call_rcu(&flow_table
->rcu
, rps_dev_flow_table_release
);
882 memset(kobj
, 0, sizeof(*kobj
));
886 static const void *rx_queue_namespace(struct kobject
*kobj
)
888 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
889 struct device
*dev
= &queue
->dev
->dev
;
890 const void *ns
= NULL
;
892 if (dev
->class && dev
->class->ns_type
)
893 ns
= dev
->class->namespace(dev
);
898 static struct kobj_type rx_queue_ktype
= {
899 .sysfs_ops
= &rx_queue_sysfs_ops
,
900 .release
= rx_queue_release
,
901 .default_attrs
= rx_queue_default_attrs
,
902 .namespace = rx_queue_namespace
905 static int rx_queue_add_kobject(struct net_device
*dev
, int index
)
907 struct netdev_rx_queue
*queue
= dev
->_rx
+ index
;
908 struct kobject
*kobj
= &queue
->kobj
;
911 kobj
->kset
= dev
->queues_kset
;
912 error
= kobject_init_and_add(kobj
, &rx_queue_ktype
, NULL
,
917 if (dev
->sysfs_rx_queue_group
) {
918 error
= sysfs_create_group(kobj
, dev
->sysfs_rx_queue_group
);
923 kobject_uevent(kobj
, KOBJ_ADD
);
924 dev_hold(queue
->dev
);
931 #endif /* CONFIG_SYSFS */
934 net_rx_queue_update_kobjects(struct net_device
*dev
, int old_num
, int new_num
)
941 if (!dev
->sysfs_rx_queue_group
)
944 for (i
= old_num
; i
< new_num
; i
++) {
945 error
= rx_queue_add_kobject(dev
, i
);
952 while (--i
>= new_num
) {
953 if (dev
->sysfs_rx_queue_group
)
954 sysfs_remove_group(&dev
->_rx
[i
].kobj
,
955 dev
->sysfs_rx_queue_group
);
956 kobject_put(&dev
->_rx
[i
].kobj
);
967 * netdev_queue sysfs structures and functions.
969 struct netdev_queue_attribute
{
970 struct attribute attr
;
971 ssize_t (*show
)(struct netdev_queue
*queue
,
972 struct netdev_queue_attribute
*attr
, char *buf
);
973 ssize_t (*store
)(struct netdev_queue
*queue
,
974 struct netdev_queue_attribute
*attr
, const char *buf
, size_t len
);
976 #define to_netdev_queue_attr(_attr) container_of(_attr, \
977 struct netdev_queue_attribute, attr)
979 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
981 static ssize_t
netdev_queue_attr_show(struct kobject
*kobj
,
982 struct attribute
*attr
, char *buf
)
984 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
985 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
987 if (!attribute
->show
)
990 return attribute
->show(queue
, attribute
, buf
);
993 static ssize_t
netdev_queue_attr_store(struct kobject
*kobj
,
994 struct attribute
*attr
,
995 const char *buf
, size_t count
)
997 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
998 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1000 if (!attribute
->store
)
1003 return attribute
->store(queue
, attribute
, buf
, count
);
1006 static const struct sysfs_ops netdev_queue_sysfs_ops
= {
1007 .show
= netdev_queue_attr_show
,
1008 .store
= netdev_queue_attr_store
,
1011 static ssize_t
show_trans_timeout(struct netdev_queue
*queue
,
1012 struct netdev_queue_attribute
*attribute
,
1015 unsigned long trans_timeout
;
1017 spin_lock_irq(&queue
->_xmit_lock
);
1018 trans_timeout
= queue
->trans_timeout
;
1019 spin_unlock_irq(&queue
->_xmit_lock
);
1021 return sprintf(buf
, "%lu", trans_timeout
);
1025 static unsigned int get_netdev_queue_index(struct netdev_queue
*queue
)
1027 struct net_device
*dev
= queue
->dev
;
1030 i
= queue
- dev
->_tx
;
1031 BUG_ON(i
>= dev
->num_tx_queues
);
1036 static ssize_t
show_tx_maxrate(struct netdev_queue
*queue
,
1037 struct netdev_queue_attribute
*attribute
,
1040 return sprintf(buf
, "%lu\n", queue
->tx_maxrate
);
1043 static ssize_t
set_tx_maxrate(struct netdev_queue
*queue
,
1044 struct netdev_queue_attribute
*attribute
,
1045 const char *buf
, size_t len
)
1047 struct net_device
*dev
= queue
->dev
;
1048 int err
, index
= get_netdev_queue_index(queue
);
1051 err
= kstrtou32(buf
, 10, &rate
);
1055 if (!rtnl_trylock())
1056 return restart_syscall();
1059 if (dev
->netdev_ops
->ndo_set_tx_maxrate
)
1060 err
= dev
->netdev_ops
->ndo_set_tx_maxrate(dev
, index
, rate
);
1064 queue
->tx_maxrate
= rate
;
1070 static struct netdev_queue_attribute queue_tx_maxrate
=
1071 __ATTR(tx_maxrate
, S_IRUGO
| S_IWUSR
,
1072 show_tx_maxrate
, set_tx_maxrate
);
1075 static struct netdev_queue_attribute queue_trans_timeout
=
1076 __ATTR(tx_timeout
, S_IRUGO
, show_trans_timeout
, NULL
);
1080 * Byte queue limits sysfs structures and functions.
1082 static ssize_t
bql_show(char *buf
, unsigned int value
)
1084 return sprintf(buf
, "%u\n", value
);
1087 static ssize_t
bql_set(const char *buf
, const size_t count
,
1088 unsigned int *pvalue
)
1093 if (!strcmp(buf
, "max") || !strcmp(buf
, "max\n"))
1094 value
= DQL_MAX_LIMIT
;
1096 err
= kstrtouint(buf
, 10, &value
);
1099 if (value
> DQL_MAX_LIMIT
)
1108 static ssize_t
bql_show_hold_time(struct netdev_queue
*queue
,
1109 struct netdev_queue_attribute
*attr
,
1112 struct dql
*dql
= &queue
->dql
;
1114 return sprintf(buf
, "%u\n", jiffies_to_msecs(dql
->slack_hold_time
));
1117 static ssize_t
bql_set_hold_time(struct netdev_queue
*queue
,
1118 struct netdev_queue_attribute
*attribute
,
1119 const char *buf
, size_t len
)
1121 struct dql
*dql
= &queue
->dql
;
1125 err
= kstrtouint(buf
, 10, &value
);
1129 dql
->slack_hold_time
= msecs_to_jiffies(value
);
1134 static struct netdev_queue_attribute bql_hold_time_attribute
=
1135 __ATTR(hold_time
, S_IRUGO
| S_IWUSR
, bql_show_hold_time
,
1138 static ssize_t
bql_show_inflight(struct netdev_queue
*queue
,
1139 struct netdev_queue_attribute
*attr
,
1142 struct dql
*dql
= &queue
->dql
;
1144 return sprintf(buf
, "%u\n", dql
->num_queued
- dql
->num_completed
);
1147 static struct netdev_queue_attribute bql_inflight_attribute
=
1148 __ATTR(inflight
, S_IRUGO
, bql_show_inflight
, NULL
);
1150 #define BQL_ATTR(NAME, FIELD) \
1151 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
1152 struct netdev_queue_attribute *attr, \
1155 return bql_show(buf, queue->dql.FIELD); \
1158 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
1159 struct netdev_queue_attribute *attr, \
1160 const char *buf, size_t len) \
1162 return bql_set(buf, len, &queue->dql.FIELD); \
1165 static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
1166 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
1169 BQL_ATTR(limit
, limit
)
1170 BQL_ATTR(limit_max
, max_limit
)
1171 BQL_ATTR(limit_min
, min_limit
)
1173 static struct attribute
*dql_attrs
[] = {
1174 &bql_limit_attribute
.attr
,
1175 &bql_limit_max_attribute
.attr
,
1176 &bql_limit_min_attribute
.attr
,
1177 &bql_hold_time_attribute
.attr
,
1178 &bql_inflight_attribute
.attr
,
1182 static struct attribute_group dql_group
= {
1183 .name
= "byte_queue_limits",
1186 #endif /* CONFIG_BQL */
1189 static ssize_t
show_xps_map(struct netdev_queue
*queue
,
1190 struct netdev_queue_attribute
*attribute
, char *buf
)
1192 struct net_device
*dev
= queue
->dev
;
1193 struct xps_dev_maps
*dev_maps
;
1195 unsigned long index
;
1198 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
1201 index
= get_netdev_queue_index(queue
);
1204 dev_maps
= rcu_dereference(dev
->xps_maps
);
1206 for_each_possible_cpu(i
) {
1207 struct xps_map
*map
=
1208 rcu_dereference(dev_maps
->cpu_map
[i
]);
1211 for (j
= 0; j
< map
->len
; j
++) {
1212 if (map
->queues
[j
] == index
) {
1213 cpumask_set_cpu(i
, mask
);
1222 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n", cpumask_pr_args(mask
));
1223 free_cpumask_var(mask
);
1224 return len
< PAGE_SIZE
? len
: -EINVAL
;
1227 static ssize_t
store_xps_map(struct netdev_queue
*queue
,
1228 struct netdev_queue_attribute
*attribute
,
1229 const char *buf
, size_t len
)
1231 struct net_device
*dev
= queue
->dev
;
1232 unsigned long index
;
1236 if (!capable(CAP_NET_ADMIN
))
1239 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
1242 index
= get_netdev_queue_index(queue
);
1244 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
1246 free_cpumask_var(mask
);
1250 err
= netif_set_xps_queue(dev
, mask
, index
);
1252 free_cpumask_var(mask
);
1257 static struct netdev_queue_attribute xps_cpus_attribute
=
1258 __ATTR(xps_cpus
, S_IRUGO
| S_IWUSR
, show_xps_map
, store_xps_map
);
1259 #endif /* CONFIG_XPS */
1261 static struct attribute
*netdev_queue_default_attrs
[] = {
1262 &queue_trans_timeout
.attr
,
1264 &xps_cpus_attribute
.attr
,
1265 &queue_tx_maxrate
.attr
,
1270 static void netdev_queue_release(struct kobject
*kobj
)
1272 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1274 memset(kobj
, 0, sizeof(*kobj
));
1275 dev_put(queue
->dev
);
1278 static const void *netdev_queue_namespace(struct kobject
*kobj
)
1280 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1281 struct device
*dev
= &queue
->dev
->dev
;
1282 const void *ns
= NULL
;
1284 if (dev
->class && dev
->class->ns_type
)
1285 ns
= dev
->class->namespace(dev
);
1290 static struct kobj_type netdev_queue_ktype
= {
1291 .sysfs_ops
= &netdev_queue_sysfs_ops
,
1292 .release
= netdev_queue_release
,
1293 .default_attrs
= netdev_queue_default_attrs
,
1294 .namespace = netdev_queue_namespace
,
1297 static int netdev_queue_add_kobject(struct net_device
*dev
, int index
)
1299 struct netdev_queue
*queue
= dev
->_tx
+ index
;
1300 struct kobject
*kobj
= &queue
->kobj
;
1303 kobj
->kset
= dev
->queues_kset
;
1304 error
= kobject_init_and_add(kobj
, &netdev_queue_ktype
, NULL
,
1310 error
= sysfs_create_group(kobj
, &dql_group
);
1315 kobject_uevent(kobj
, KOBJ_ADD
);
1316 dev_hold(queue
->dev
);
1323 #endif /* CONFIG_SYSFS */
1326 netdev_queue_update_kobjects(struct net_device
*dev
, int old_num
, int new_num
)
1332 for (i
= old_num
; i
< new_num
; i
++) {
1333 error
= netdev_queue_add_kobject(dev
, i
);
1340 while (--i
>= new_num
) {
1341 struct netdev_queue
*queue
= dev
->_tx
+ i
;
1344 sysfs_remove_group(&queue
->kobj
, &dql_group
);
1346 kobject_put(&queue
->kobj
);
1352 #endif /* CONFIG_SYSFS */
1355 static int register_queue_kobjects(struct net_device
*dev
)
1357 int error
= 0, txq
= 0, rxq
= 0, real_rx
= 0, real_tx
= 0;
1360 dev
->queues_kset
= kset_create_and_add("queues",
1361 NULL
, &dev
->dev
.kobj
);
1362 if (!dev
->queues_kset
)
1364 real_rx
= dev
->real_num_rx_queues
;
1366 real_tx
= dev
->real_num_tx_queues
;
1368 error
= net_rx_queue_update_kobjects(dev
, 0, real_rx
);
1373 error
= netdev_queue_update_kobjects(dev
, 0, real_tx
);
1381 netdev_queue_update_kobjects(dev
, txq
, 0);
1382 net_rx_queue_update_kobjects(dev
, rxq
, 0);
1386 static void remove_queue_kobjects(struct net_device
*dev
)
1388 int real_rx
= 0, real_tx
= 0;
1391 real_rx
= dev
->real_num_rx_queues
;
1393 real_tx
= dev
->real_num_tx_queues
;
1395 net_rx_queue_update_kobjects(dev
, real_rx
, 0);
1396 netdev_queue_update_kobjects(dev
, real_tx
, 0);
1398 kset_unregister(dev
->queues_kset
);
1402 static bool net_current_may_mount(void)
1404 struct net
*net
= current
->nsproxy
->net_ns
;
1406 return ns_capable(net
->user_ns
, CAP_SYS_ADMIN
);
1409 static void *net_grab_current_ns(void)
1411 struct net
*ns
= current
->nsproxy
->net_ns
;
1412 #ifdef CONFIG_NET_NS
1414 atomic_inc(&ns
->passive
);
1419 static const void *net_initial_ns(void)
1424 static const void *net_netlink_ns(struct sock
*sk
)
1426 return sock_net(sk
);
1429 struct kobj_ns_type_operations net_ns_type_operations
= {
1430 .type
= KOBJ_NS_TYPE_NET
,
1431 .current_may_mount
= net_current_may_mount
,
1432 .grab_current_ns
= net_grab_current_ns
,
1433 .netlink_ns
= net_netlink_ns
,
1434 .initial_ns
= net_initial_ns
,
1435 .drop_ns
= net_drop_ns
,
1437 EXPORT_SYMBOL_GPL(net_ns_type_operations
);
1439 static int netdev_uevent(struct device
*d
, struct kobj_uevent_env
*env
)
1441 struct net_device
*dev
= to_net_dev(d
);
1444 /* pass interface to uevent. */
1445 retval
= add_uevent_var(env
, "INTERFACE=%s", dev
->name
);
1449 /* pass ifindex to uevent.
1450 * ifindex is useful as it won't change (interface name may change)
1451 * and is what RtNetlink uses natively. */
1452 retval
= add_uevent_var(env
, "IFINDEX=%d", dev
->ifindex
);
1459 * netdev_release -- destroy and free a dead device.
1460 * Called when last reference to device kobject is gone.
1462 static void netdev_release(struct device
*d
)
1464 struct net_device
*dev
= to_net_dev(d
);
1466 BUG_ON(dev
->reg_state
!= NETREG_RELEASED
);
1468 kfree(dev
->ifalias
);
1469 netdev_freemem(dev
);
1472 static const void *net_namespace(struct device
*d
)
1474 struct net_device
*dev
= to_net_dev(d
);
1476 return dev_net(dev
);
1479 static struct class net_class
= {
1481 .dev_release
= netdev_release
,
1482 .dev_groups
= net_class_groups
,
1483 .dev_uevent
= netdev_uevent
,
1484 .ns_type
= &net_ns_type_operations
,
1485 .namespace = net_namespace
,
1488 #ifdef CONFIG_OF_NET
1489 static int of_dev_node_match(struct device
*dev
, const void *data
)
1494 ret
= dev
->parent
->of_node
== data
;
1496 return ret
== 0 ? dev
->of_node
== data
: ret
;
1500 * of_find_net_device_by_node - lookup the net device for the device node
1501 * @np: OF device node
1503 * Looks up the net_device structure corresponding with the device node.
1504 * If successful, returns a pointer to the net_device with the embedded
1505 * struct device refcount incremented by one, or NULL on failure. The
1506 * refcount must be dropped when done with the net_device.
1508 struct net_device
*of_find_net_device_by_node(struct device_node
*np
)
1512 dev
= class_find_device(&net_class
, NULL
, np
, of_dev_node_match
);
1516 return to_net_dev(dev
);
1518 EXPORT_SYMBOL(of_find_net_device_by_node
);
1521 /* Delete sysfs entries but hold kobject reference until after all
1522 * netdev references are gone.
1524 void netdev_unregister_kobject(struct net_device
*ndev
)
1526 struct device
*dev
= &(ndev
->dev
);
1528 kobject_get(&dev
->kobj
);
1530 remove_queue_kobjects(ndev
);
1532 pm_runtime_set_memalloc_noio(dev
, false);
1537 /* Create sysfs entries for network device. */
1538 int netdev_register_kobject(struct net_device
*ndev
)
1540 struct device
*dev
= &(ndev
->dev
);
1541 const struct attribute_group
**groups
= ndev
->sysfs_groups
;
1544 device_initialize(dev
);
1545 dev
->class = &net_class
;
1546 dev
->platform_data
= ndev
;
1547 dev
->groups
= groups
;
1549 dev_set_name(dev
, "%s", ndev
->name
);
1552 /* Allow for a device specific group */
1556 *groups
++ = &netstat_group
;
1558 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1559 if (ndev
->ieee80211_ptr
)
1560 *groups
++ = &wireless_group
;
1561 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1562 else if (ndev
->wireless_handlers
)
1563 *groups
++ = &wireless_group
;
1566 #endif /* CONFIG_SYSFS */
1568 error
= device_add(dev
);
1572 error
= register_queue_kobjects(ndev
);
1578 pm_runtime_set_memalloc_noio(dev
, true);
1583 int netdev_class_create_file_ns(struct class_attribute
*class_attr
,
1586 return class_create_file_ns(&net_class
, class_attr
, ns
);
1588 EXPORT_SYMBOL(netdev_class_create_file_ns
);
1590 void netdev_class_remove_file_ns(struct class_attribute
*class_attr
,
1593 class_remove_file_ns(&net_class
, class_attr
, ns
);
1595 EXPORT_SYMBOL(netdev_class_remove_file_ns
);
1597 int __init
netdev_kobject_init(void)
1599 kobj_ns_type_register(&net_ns_type_operations
);
1600 return class_register(&net_class
);