2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <net/switchdev.h>
16 #include <linux/if_arp.h>
17 #include <linux/slab.h>
18 #include <linux/nsproxy.h>
20 #include <net/net_namespace.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/vmalloc.h>
23 #include <linux/export.h>
24 #include <linux/jiffies.h>
25 #include <linux/pm_runtime.h>
27 #include "net-sysfs.h"
30 static const char fmt_hex
[] = "%#x\n";
31 static const char fmt_long_hex
[] = "%#lx\n";
32 static const char fmt_dec
[] = "%d\n";
33 static const char fmt_udec
[] = "%u\n";
34 static const char fmt_ulong
[] = "%lu\n";
35 static const char fmt_u64
[] = "%llu\n";
37 static inline int dev_isalive(const struct net_device
*dev
)
39 return dev
->reg_state
<= NETREG_REGISTERED
;
42 /* use same locking rules as GIF* ioctl's */
43 static ssize_t
netdev_show(const struct device
*dev
,
44 struct device_attribute
*attr
, char *buf
,
45 ssize_t (*format
)(const struct net_device
*, char *))
47 struct net_device
*ndev
= to_net_dev(dev
);
48 ssize_t ret
= -EINVAL
;
50 read_lock(&dev_base_lock
);
51 if (dev_isalive(ndev
))
52 ret
= (*format
)(ndev
, buf
);
53 read_unlock(&dev_base_lock
);
58 /* generate a show function for simple field */
59 #define NETDEVICE_SHOW(field, format_string) \
60 static ssize_t format_##field(const struct net_device *dev, char *buf) \
62 return sprintf(buf, format_string, dev->field); \
64 static ssize_t field##_show(struct device *dev, \
65 struct device_attribute *attr, char *buf) \
67 return netdev_show(dev, attr, buf, format_##field); \
70 #define NETDEVICE_SHOW_RO(field, format_string) \
71 NETDEVICE_SHOW(field, format_string); \
72 static DEVICE_ATTR_RO(field)
74 #define NETDEVICE_SHOW_RW(field, format_string) \
75 NETDEVICE_SHOW(field, format_string); \
76 static DEVICE_ATTR_RW(field)
78 /* use same locking and permission rules as SIF* ioctl's */
79 static ssize_t
netdev_store(struct device
*dev
, struct device_attribute
*attr
,
80 const char *buf
, size_t len
,
81 int (*set
)(struct net_device
*, unsigned long))
83 struct net_device
*netdev
= to_net_dev(dev
);
84 struct net
*net
= dev_net(netdev
);
88 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
91 ret
= kstrtoul(buf
, 0, &new);
96 return restart_syscall();
98 if (dev_isalive(netdev
)) {
99 if ((ret
= (*set
)(netdev
, new)) == 0)
107 NETDEVICE_SHOW_RO(dev_id
, fmt_hex
);
108 NETDEVICE_SHOW_RO(dev_port
, fmt_dec
);
109 NETDEVICE_SHOW_RO(addr_assign_type
, fmt_dec
);
110 NETDEVICE_SHOW_RO(addr_len
, fmt_dec
);
111 NETDEVICE_SHOW_RO(iflink
, fmt_dec
);
112 NETDEVICE_SHOW_RO(ifindex
, fmt_dec
);
113 NETDEVICE_SHOW_RO(type
, fmt_dec
);
114 NETDEVICE_SHOW_RO(link_mode
, fmt_dec
);
116 static ssize_t
format_name_assign_type(const struct net_device
*dev
, char *buf
)
118 return sprintf(buf
, fmt_dec
, dev
->name_assign_type
);
121 static ssize_t
name_assign_type_show(struct device
*dev
,
122 struct device_attribute
*attr
,
125 struct net_device
*ndev
= to_net_dev(dev
);
126 ssize_t ret
= -EINVAL
;
128 if (ndev
->name_assign_type
!= NET_NAME_UNKNOWN
)
129 ret
= netdev_show(dev
, attr
, buf
, format_name_assign_type
);
133 static DEVICE_ATTR_RO(name_assign_type
);
135 /* use same locking rules as GIFHWADDR ioctl's */
136 static ssize_t
address_show(struct device
*dev
, struct device_attribute
*attr
,
139 struct net_device
*ndev
= to_net_dev(dev
);
140 ssize_t ret
= -EINVAL
;
142 read_lock(&dev_base_lock
);
143 if (dev_isalive(ndev
))
144 ret
= sysfs_format_mac(buf
, ndev
->dev_addr
, ndev
->addr_len
);
145 read_unlock(&dev_base_lock
);
148 static DEVICE_ATTR_RO(address
);
150 static ssize_t
broadcast_show(struct device
*dev
,
151 struct device_attribute
*attr
, char *buf
)
153 struct net_device
*ndev
= to_net_dev(dev
);
154 if (dev_isalive(ndev
))
155 return sysfs_format_mac(buf
, ndev
->broadcast
, ndev
->addr_len
);
158 static DEVICE_ATTR_RO(broadcast
);
160 static int change_carrier(struct net_device
*dev
, unsigned long new_carrier
)
162 if (!netif_running(dev
))
164 return dev_change_carrier(dev
, (bool) new_carrier
);
167 static ssize_t
carrier_store(struct device
*dev
, struct device_attribute
*attr
,
168 const char *buf
, size_t len
)
170 return netdev_store(dev
, attr
, buf
, len
, change_carrier
);
173 static ssize_t
carrier_show(struct device
*dev
,
174 struct device_attribute
*attr
, char *buf
)
176 struct net_device
*netdev
= to_net_dev(dev
);
177 if (netif_running(netdev
)) {
178 return sprintf(buf
, fmt_dec
, !!netif_carrier_ok(netdev
));
182 static DEVICE_ATTR_RW(carrier
);
184 static ssize_t
speed_show(struct device
*dev
,
185 struct device_attribute
*attr
, char *buf
)
187 struct net_device
*netdev
= to_net_dev(dev
);
191 return restart_syscall();
193 if (netif_running(netdev
)) {
194 struct ethtool_cmd cmd
;
195 if (!__ethtool_get_settings(netdev
, &cmd
))
196 ret
= sprintf(buf
, fmt_udec
, ethtool_cmd_speed(&cmd
));
201 static DEVICE_ATTR_RO(speed
);
203 static ssize_t
duplex_show(struct device
*dev
,
204 struct device_attribute
*attr
, char *buf
)
206 struct net_device
*netdev
= to_net_dev(dev
);
210 return restart_syscall();
212 if (netif_running(netdev
)) {
213 struct ethtool_cmd cmd
;
214 if (!__ethtool_get_settings(netdev
, &cmd
)) {
216 switch (cmd
.duplex
) {
227 ret
= sprintf(buf
, "%s\n", duplex
);
233 static DEVICE_ATTR_RO(duplex
);
235 static ssize_t
dormant_show(struct device
*dev
,
236 struct device_attribute
*attr
, char *buf
)
238 struct net_device
*netdev
= to_net_dev(dev
);
240 if (netif_running(netdev
))
241 return sprintf(buf
, fmt_dec
, !!netif_dormant(netdev
));
245 static DEVICE_ATTR_RO(dormant
);
247 static const char *const operstates
[] = {
249 "notpresent", /* currently unused */
252 "testing", /* currently unused */
257 static ssize_t
operstate_show(struct device
*dev
,
258 struct device_attribute
*attr
, char *buf
)
260 const struct net_device
*netdev
= to_net_dev(dev
);
261 unsigned char operstate
;
263 read_lock(&dev_base_lock
);
264 operstate
= netdev
->operstate
;
265 if (!netif_running(netdev
))
266 operstate
= IF_OPER_DOWN
;
267 read_unlock(&dev_base_lock
);
269 if (operstate
>= ARRAY_SIZE(operstates
))
270 return -EINVAL
; /* should not happen */
272 return sprintf(buf
, "%s\n", operstates
[operstate
]);
274 static DEVICE_ATTR_RO(operstate
);
276 static ssize_t
carrier_changes_show(struct device
*dev
,
277 struct device_attribute
*attr
,
280 struct net_device
*netdev
= to_net_dev(dev
);
281 return sprintf(buf
, fmt_dec
,
282 atomic_read(&netdev
->carrier_changes
));
284 static DEVICE_ATTR_RO(carrier_changes
);
286 /* read-write attributes */
288 static int change_mtu(struct net_device
*dev
, unsigned long new_mtu
)
290 return dev_set_mtu(dev
, (int) new_mtu
);
293 static ssize_t
mtu_store(struct device
*dev
, struct device_attribute
*attr
,
294 const char *buf
, size_t len
)
296 return netdev_store(dev
, attr
, buf
, len
, change_mtu
);
298 NETDEVICE_SHOW_RW(mtu
, fmt_dec
);
300 static int change_flags(struct net_device
*dev
, unsigned long new_flags
)
302 return dev_change_flags(dev
, (unsigned int) new_flags
);
305 static ssize_t
flags_store(struct device
*dev
, struct device_attribute
*attr
,
306 const char *buf
, size_t len
)
308 return netdev_store(dev
, attr
, buf
, len
, change_flags
);
310 NETDEVICE_SHOW_RW(flags
, fmt_hex
);
312 static int change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
314 dev
->tx_queue_len
= new_len
;
318 static ssize_t
tx_queue_len_store(struct device
*dev
,
319 struct device_attribute
*attr
,
320 const char *buf
, size_t len
)
322 if (!capable(CAP_NET_ADMIN
))
325 return netdev_store(dev
, attr
, buf
, len
, change_tx_queue_len
);
327 NETDEVICE_SHOW_RW(tx_queue_len
, fmt_ulong
);
329 static int change_gro_flush_timeout(struct net_device
*dev
, unsigned long val
)
331 dev
->gro_flush_timeout
= val
;
335 static ssize_t
gro_flush_timeout_store(struct device
*dev
,
336 struct device_attribute
*attr
,
337 const char *buf
, size_t len
)
339 if (!capable(CAP_NET_ADMIN
))
342 return netdev_store(dev
, attr
, buf
, len
, change_gro_flush_timeout
);
344 NETDEVICE_SHOW_RW(gro_flush_timeout
, fmt_ulong
);
346 static ssize_t
ifalias_store(struct device
*dev
, struct device_attribute
*attr
,
347 const char *buf
, size_t len
)
349 struct net_device
*netdev
= to_net_dev(dev
);
350 struct net
*net
= dev_net(netdev
);
354 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
357 /* ignore trailing newline */
358 if (len
> 0 && buf
[len
- 1] == '\n')
362 return restart_syscall();
363 ret
= dev_set_alias(netdev
, buf
, count
);
366 return ret
< 0 ? ret
: len
;
369 static ssize_t
ifalias_show(struct device
*dev
,
370 struct device_attribute
*attr
, char *buf
)
372 const struct net_device
*netdev
= to_net_dev(dev
);
376 return restart_syscall();
378 ret
= sprintf(buf
, "%s\n", netdev
->ifalias
);
382 static DEVICE_ATTR_RW(ifalias
);
384 static int change_group(struct net_device
*dev
, unsigned long new_group
)
386 dev_set_group(dev
, (int) new_group
);
390 static ssize_t
group_store(struct device
*dev
, struct device_attribute
*attr
,
391 const char *buf
, size_t len
)
393 return netdev_store(dev
, attr
, buf
, len
, change_group
);
395 NETDEVICE_SHOW(group
, fmt_dec
);
396 static DEVICE_ATTR(netdev_group
, S_IRUGO
| S_IWUSR
, group_show
, group_store
);
398 static ssize_t
phys_port_id_show(struct device
*dev
,
399 struct device_attribute
*attr
, char *buf
)
401 struct net_device
*netdev
= to_net_dev(dev
);
402 ssize_t ret
= -EINVAL
;
405 return restart_syscall();
407 if (dev_isalive(netdev
)) {
408 struct netdev_phys_item_id ppid
;
410 ret
= dev_get_phys_port_id(netdev
, &ppid
);
412 ret
= sprintf(buf
, "%*phN\n", ppid
.id_len
, ppid
.id
);
418 static DEVICE_ATTR_RO(phys_port_id
);
420 static ssize_t
phys_switch_id_show(struct device
*dev
,
421 struct device_attribute
*attr
, char *buf
)
423 struct net_device
*netdev
= to_net_dev(dev
);
424 ssize_t ret
= -EINVAL
;
427 return restart_syscall();
429 if (dev_isalive(netdev
)) {
430 struct netdev_phys_item_id ppid
;
432 ret
= netdev_switch_parent_id_get(netdev
, &ppid
);
434 ret
= sprintf(buf
, "%*phN\n", ppid
.id_len
, ppid
.id
);
440 static DEVICE_ATTR_RO(phys_switch_id
);
442 static struct attribute
*net_class_attrs
[] = {
443 &dev_attr_netdev_group
.attr
,
445 &dev_attr_dev_id
.attr
,
446 &dev_attr_dev_port
.attr
,
447 &dev_attr_iflink
.attr
,
448 &dev_attr_ifindex
.attr
,
449 &dev_attr_name_assign_type
.attr
,
450 &dev_attr_addr_assign_type
.attr
,
451 &dev_attr_addr_len
.attr
,
452 &dev_attr_link_mode
.attr
,
453 &dev_attr_address
.attr
,
454 &dev_attr_broadcast
.attr
,
455 &dev_attr_speed
.attr
,
456 &dev_attr_duplex
.attr
,
457 &dev_attr_dormant
.attr
,
458 &dev_attr_operstate
.attr
,
459 &dev_attr_carrier_changes
.attr
,
460 &dev_attr_ifalias
.attr
,
461 &dev_attr_carrier
.attr
,
463 &dev_attr_flags
.attr
,
464 &dev_attr_tx_queue_len
.attr
,
465 &dev_attr_gro_flush_timeout
.attr
,
466 &dev_attr_phys_port_id
.attr
,
467 &dev_attr_phys_switch_id
.attr
,
470 ATTRIBUTE_GROUPS(net_class
);
472 /* Show a given an attribute in the statistics group */
473 static ssize_t
netstat_show(const struct device
*d
,
474 struct device_attribute
*attr
, char *buf
,
475 unsigned long offset
)
477 struct net_device
*dev
= to_net_dev(d
);
478 ssize_t ret
= -EINVAL
;
480 WARN_ON(offset
> sizeof(struct rtnl_link_stats64
) ||
481 offset
% sizeof(u64
) != 0);
483 read_lock(&dev_base_lock
);
484 if (dev_isalive(dev
)) {
485 struct rtnl_link_stats64 temp
;
486 const struct rtnl_link_stats64
*stats
= dev_get_stats(dev
, &temp
);
488 ret
= sprintf(buf
, fmt_u64
, *(u64
*)(((u8
*) stats
) + offset
));
490 read_unlock(&dev_base_lock
);
494 /* generate a read-only statistics attribute */
495 #define NETSTAT_ENTRY(name) \
496 static ssize_t name##_show(struct device *d, \
497 struct device_attribute *attr, char *buf) \
499 return netstat_show(d, attr, buf, \
500 offsetof(struct rtnl_link_stats64, name)); \
502 static DEVICE_ATTR_RO(name)
504 NETSTAT_ENTRY(rx_packets
);
505 NETSTAT_ENTRY(tx_packets
);
506 NETSTAT_ENTRY(rx_bytes
);
507 NETSTAT_ENTRY(tx_bytes
);
508 NETSTAT_ENTRY(rx_errors
);
509 NETSTAT_ENTRY(tx_errors
);
510 NETSTAT_ENTRY(rx_dropped
);
511 NETSTAT_ENTRY(tx_dropped
);
512 NETSTAT_ENTRY(multicast
);
513 NETSTAT_ENTRY(collisions
);
514 NETSTAT_ENTRY(rx_length_errors
);
515 NETSTAT_ENTRY(rx_over_errors
);
516 NETSTAT_ENTRY(rx_crc_errors
);
517 NETSTAT_ENTRY(rx_frame_errors
);
518 NETSTAT_ENTRY(rx_fifo_errors
);
519 NETSTAT_ENTRY(rx_missed_errors
);
520 NETSTAT_ENTRY(tx_aborted_errors
);
521 NETSTAT_ENTRY(tx_carrier_errors
);
522 NETSTAT_ENTRY(tx_fifo_errors
);
523 NETSTAT_ENTRY(tx_heartbeat_errors
);
524 NETSTAT_ENTRY(tx_window_errors
);
525 NETSTAT_ENTRY(rx_compressed
);
526 NETSTAT_ENTRY(tx_compressed
);
528 static struct attribute
*netstat_attrs
[] = {
529 &dev_attr_rx_packets
.attr
,
530 &dev_attr_tx_packets
.attr
,
531 &dev_attr_rx_bytes
.attr
,
532 &dev_attr_tx_bytes
.attr
,
533 &dev_attr_rx_errors
.attr
,
534 &dev_attr_tx_errors
.attr
,
535 &dev_attr_rx_dropped
.attr
,
536 &dev_attr_tx_dropped
.attr
,
537 &dev_attr_multicast
.attr
,
538 &dev_attr_collisions
.attr
,
539 &dev_attr_rx_length_errors
.attr
,
540 &dev_attr_rx_over_errors
.attr
,
541 &dev_attr_rx_crc_errors
.attr
,
542 &dev_attr_rx_frame_errors
.attr
,
543 &dev_attr_rx_fifo_errors
.attr
,
544 &dev_attr_rx_missed_errors
.attr
,
545 &dev_attr_tx_aborted_errors
.attr
,
546 &dev_attr_tx_carrier_errors
.attr
,
547 &dev_attr_tx_fifo_errors
.attr
,
548 &dev_attr_tx_heartbeat_errors
.attr
,
549 &dev_attr_tx_window_errors
.attr
,
550 &dev_attr_rx_compressed
.attr
,
551 &dev_attr_tx_compressed
.attr
,
556 static struct attribute_group netstat_group
= {
557 .name
= "statistics",
558 .attrs
= netstat_attrs
,
561 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
562 static struct attribute
*wireless_attrs
[] = {
566 static struct attribute_group wireless_group
= {
568 .attrs
= wireless_attrs
,
572 #else /* CONFIG_SYSFS */
573 #define net_class_groups NULL
574 #endif /* CONFIG_SYSFS */
577 #define to_rx_queue_attr(_attr) container_of(_attr, \
578 struct rx_queue_attribute, attr)
580 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
582 static ssize_t
rx_queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
585 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
586 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
588 if (!attribute
->show
)
591 return attribute
->show(queue
, attribute
, buf
);
594 static ssize_t
rx_queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
595 const char *buf
, size_t count
)
597 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
598 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
600 if (!attribute
->store
)
603 return attribute
->store(queue
, attribute
, buf
, count
);
606 static const struct sysfs_ops rx_queue_sysfs_ops
= {
607 .show
= rx_queue_attr_show
,
608 .store
= rx_queue_attr_store
,
612 static ssize_t
show_rps_map(struct netdev_rx_queue
*queue
,
613 struct rx_queue_attribute
*attribute
, char *buf
)
619 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
623 map
= rcu_dereference(queue
->rps_map
);
625 for (i
= 0; i
< map
->len
; i
++)
626 cpumask_set_cpu(map
->cpus
[i
], mask
);
628 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n", cpumask_pr_args(mask
));
630 free_cpumask_var(mask
);
632 return len
< PAGE_SIZE
? len
: -EINVAL
;
635 static ssize_t
store_rps_map(struct netdev_rx_queue
*queue
,
636 struct rx_queue_attribute
*attribute
,
637 const char *buf
, size_t len
)
639 struct rps_map
*old_map
, *map
;
642 static DEFINE_SPINLOCK(rps_map_lock
);
644 if (!capable(CAP_NET_ADMIN
))
647 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
650 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
652 free_cpumask_var(mask
);
656 map
= kzalloc(max_t(unsigned int,
657 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
660 free_cpumask_var(mask
);
665 for_each_cpu_and(cpu
, mask
, cpu_online_mask
)
666 map
->cpus
[i
++] = cpu
;
675 spin_lock(&rps_map_lock
);
676 old_map
= rcu_dereference_protected(queue
->rps_map
,
677 lockdep_is_held(&rps_map_lock
));
678 rcu_assign_pointer(queue
->rps_map
, map
);
679 spin_unlock(&rps_map_lock
);
682 static_key_slow_inc(&rps_needed
);
684 kfree_rcu(old_map
, rcu
);
685 static_key_slow_dec(&rps_needed
);
687 free_cpumask_var(mask
);
691 static ssize_t
show_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
692 struct rx_queue_attribute
*attr
,
695 struct rps_dev_flow_table
*flow_table
;
696 unsigned long val
= 0;
699 flow_table
= rcu_dereference(queue
->rps_flow_table
);
701 val
= (unsigned long)flow_table
->mask
+ 1;
704 return sprintf(buf
, "%lu\n", val
);
707 static void rps_dev_flow_table_release(struct rcu_head
*rcu
)
709 struct rps_dev_flow_table
*table
= container_of(rcu
,
710 struct rps_dev_flow_table
, rcu
);
714 static ssize_t
store_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
715 struct rx_queue_attribute
*attr
,
716 const char *buf
, size_t len
)
718 unsigned long mask
, count
;
719 struct rps_dev_flow_table
*table
, *old_table
;
720 static DEFINE_SPINLOCK(rps_dev_flow_lock
);
723 if (!capable(CAP_NET_ADMIN
))
726 rc
= kstrtoul(buf
, 0, &count
);
732 /* mask = roundup_pow_of_two(count) - 1;
733 * without overflows...
735 while ((mask
| (mask
>> 1)) != mask
)
737 /* On 64 bit arches, must check mask fits in table->mask (u32),
738 * and on 32bit arches, must check
739 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
741 #if BITS_PER_LONG > 32
742 if (mask
> (unsigned long)(u32
)mask
)
745 if (mask
> (ULONG_MAX
- RPS_DEV_FLOW_TABLE_SIZE(1))
746 / sizeof(struct rps_dev_flow
)) {
747 /* Enforce a limit to prevent overflow */
751 table
= vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask
+ 1));
756 for (count
= 0; count
<= mask
; count
++)
757 table
->flows
[count
].cpu
= RPS_NO_CPU
;
761 spin_lock(&rps_dev_flow_lock
);
762 old_table
= rcu_dereference_protected(queue
->rps_flow_table
,
763 lockdep_is_held(&rps_dev_flow_lock
));
764 rcu_assign_pointer(queue
->rps_flow_table
, table
);
765 spin_unlock(&rps_dev_flow_lock
);
768 call_rcu(&old_table
->rcu
, rps_dev_flow_table_release
);
773 static struct rx_queue_attribute rps_cpus_attribute
=
774 __ATTR(rps_cpus
, S_IRUGO
| S_IWUSR
, show_rps_map
, store_rps_map
);
777 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute
=
778 __ATTR(rps_flow_cnt
, S_IRUGO
| S_IWUSR
,
779 show_rps_dev_flow_table_cnt
, store_rps_dev_flow_table_cnt
);
780 #endif /* CONFIG_RPS */
782 static struct attribute
*rx_queue_default_attrs
[] = {
784 &rps_cpus_attribute
.attr
,
785 &rps_dev_flow_table_cnt_attribute
.attr
,
790 static void rx_queue_release(struct kobject
*kobj
)
792 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
795 struct rps_dev_flow_table
*flow_table
;
798 map
= rcu_dereference_protected(queue
->rps_map
, 1);
800 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
804 flow_table
= rcu_dereference_protected(queue
->rps_flow_table
, 1);
806 RCU_INIT_POINTER(queue
->rps_flow_table
, NULL
);
807 call_rcu(&flow_table
->rcu
, rps_dev_flow_table_release
);
811 memset(kobj
, 0, sizeof(*kobj
));
815 static const void *rx_queue_namespace(struct kobject
*kobj
)
817 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
818 struct device
*dev
= &queue
->dev
->dev
;
819 const void *ns
= NULL
;
821 if (dev
->class && dev
->class->ns_type
)
822 ns
= dev
->class->namespace(dev
);
827 static struct kobj_type rx_queue_ktype
= {
828 .sysfs_ops
= &rx_queue_sysfs_ops
,
829 .release
= rx_queue_release
,
830 .default_attrs
= rx_queue_default_attrs
,
831 .namespace = rx_queue_namespace
834 static int rx_queue_add_kobject(struct net_device
*dev
, int index
)
836 struct netdev_rx_queue
*queue
= dev
->_rx
+ index
;
837 struct kobject
*kobj
= &queue
->kobj
;
840 kobj
->kset
= dev
->queues_kset
;
841 error
= kobject_init_and_add(kobj
, &rx_queue_ktype
, NULL
,
846 if (dev
->sysfs_rx_queue_group
) {
847 error
= sysfs_create_group(kobj
, dev
->sysfs_rx_queue_group
);
852 kobject_uevent(kobj
, KOBJ_ADD
);
853 dev_hold(queue
->dev
);
860 #endif /* CONFIG_SYSFS */
863 net_rx_queue_update_kobjects(struct net_device
*dev
, int old_num
, int new_num
)
870 if (!dev
->sysfs_rx_queue_group
)
873 for (i
= old_num
; i
< new_num
; i
++) {
874 error
= rx_queue_add_kobject(dev
, i
);
881 while (--i
>= new_num
) {
882 if (dev
->sysfs_rx_queue_group
)
883 sysfs_remove_group(&dev
->_rx
[i
].kobj
,
884 dev
->sysfs_rx_queue_group
);
885 kobject_put(&dev
->_rx
[i
].kobj
);
896 * netdev_queue sysfs structures and functions.
898 struct netdev_queue_attribute
{
899 struct attribute attr
;
900 ssize_t (*show
)(struct netdev_queue
*queue
,
901 struct netdev_queue_attribute
*attr
, char *buf
);
902 ssize_t (*store
)(struct netdev_queue
*queue
,
903 struct netdev_queue_attribute
*attr
, const char *buf
, size_t len
);
905 #define to_netdev_queue_attr(_attr) container_of(_attr, \
906 struct netdev_queue_attribute, attr)
908 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
910 static ssize_t
netdev_queue_attr_show(struct kobject
*kobj
,
911 struct attribute
*attr
, char *buf
)
913 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
914 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
916 if (!attribute
->show
)
919 return attribute
->show(queue
, attribute
, buf
);
922 static ssize_t
netdev_queue_attr_store(struct kobject
*kobj
,
923 struct attribute
*attr
,
924 const char *buf
, size_t count
)
926 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
927 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
929 if (!attribute
->store
)
932 return attribute
->store(queue
, attribute
, buf
, count
);
935 static const struct sysfs_ops netdev_queue_sysfs_ops
= {
936 .show
= netdev_queue_attr_show
,
937 .store
= netdev_queue_attr_store
,
940 static ssize_t
show_trans_timeout(struct netdev_queue
*queue
,
941 struct netdev_queue_attribute
*attribute
,
944 unsigned long trans_timeout
;
946 spin_lock_irq(&queue
->_xmit_lock
);
947 trans_timeout
= queue
->trans_timeout
;
948 spin_unlock_irq(&queue
->_xmit_lock
);
950 return sprintf(buf
, "%lu", trans_timeout
);
953 static struct netdev_queue_attribute queue_trans_timeout
=
954 __ATTR(tx_timeout
, S_IRUGO
, show_trans_timeout
, NULL
);
958 * Byte queue limits sysfs structures and functions.
960 static ssize_t
bql_show(char *buf
, unsigned int value
)
962 return sprintf(buf
, "%u\n", value
);
965 static ssize_t
bql_set(const char *buf
, const size_t count
,
966 unsigned int *pvalue
)
971 if (!strcmp(buf
, "max") || !strcmp(buf
, "max\n"))
972 value
= DQL_MAX_LIMIT
;
974 err
= kstrtouint(buf
, 10, &value
);
977 if (value
> DQL_MAX_LIMIT
)
986 static ssize_t
bql_show_hold_time(struct netdev_queue
*queue
,
987 struct netdev_queue_attribute
*attr
,
990 struct dql
*dql
= &queue
->dql
;
992 return sprintf(buf
, "%u\n", jiffies_to_msecs(dql
->slack_hold_time
));
995 static ssize_t
bql_set_hold_time(struct netdev_queue
*queue
,
996 struct netdev_queue_attribute
*attribute
,
997 const char *buf
, size_t len
)
999 struct dql
*dql
= &queue
->dql
;
1003 err
= kstrtouint(buf
, 10, &value
);
1007 dql
->slack_hold_time
= msecs_to_jiffies(value
);
1012 static struct netdev_queue_attribute bql_hold_time_attribute
=
1013 __ATTR(hold_time
, S_IRUGO
| S_IWUSR
, bql_show_hold_time
,
1016 static ssize_t
bql_show_inflight(struct netdev_queue
*queue
,
1017 struct netdev_queue_attribute
*attr
,
1020 struct dql
*dql
= &queue
->dql
;
1022 return sprintf(buf
, "%u\n", dql
->num_queued
- dql
->num_completed
);
1025 static struct netdev_queue_attribute bql_inflight_attribute
=
1026 __ATTR(inflight
, S_IRUGO
, bql_show_inflight
, NULL
);
1028 #define BQL_ATTR(NAME, FIELD) \
1029 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
1030 struct netdev_queue_attribute *attr, \
1033 return bql_show(buf, queue->dql.FIELD); \
1036 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
1037 struct netdev_queue_attribute *attr, \
1038 const char *buf, size_t len) \
1040 return bql_set(buf, len, &queue->dql.FIELD); \
1043 static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
1044 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
1047 BQL_ATTR(limit
, limit
)
1048 BQL_ATTR(limit_max
, max_limit
)
1049 BQL_ATTR(limit_min
, min_limit
)
1051 static struct attribute
*dql_attrs
[] = {
1052 &bql_limit_attribute
.attr
,
1053 &bql_limit_max_attribute
.attr
,
1054 &bql_limit_min_attribute
.attr
,
1055 &bql_hold_time_attribute
.attr
,
1056 &bql_inflight_attribute
.attr
,
1060 static struct attribute_group dql_group
= {
1061 .name
= "byte_queue_limits",
1064 #endif /* CONFIG_BQL */
1067 static unsigned int get_netdev_queue_index(struct netdev_queue
*queue
)
1069 struct net_device
*dev
= queue
->dev
;
1072 i
= queue
- dev
->_tx
;
1073 BUG_ON(i
>= dev
->num_tx_queues
);
1079 static ssize_t
show_xps_map(struct netdev_queue
*queue
,
1080 struct netdev_queue_attribute
*attribute
, char *buf
)
1082 struct net_device
*dev
= queue
->dev
;
1083 struct xps_dev_maps
*dev_maps
;
1085 unsigned long index
;
1088 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
1091 index
= get_netdev_queue_index(queue
);
1094 dev_maps
= rcu_dereference(dev
->xps_maps
);
1096 for_each_possible_cpu(i
) {
1097 struct xps_map
*map
=
1098 rcu_dereference(dev_maps
->cpu_map
[i
]);
1101 for (j
= 0; j
< map
->len
; j
++) {
1102 if (map
->queues
[j
] == index
) {
1103 cpumask_set_cpu(i
, mask
);
1112 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n", cpumask_pr_args(mask
));
1113 free_cpumask_var(mask
);
1114 return len
< PAGE_SIZE
? len
: -EINVAL
;
1117 static ssize_t
store_xps_map(struct netdev_queue
*queue
,
1118 struct netdev_queue_attribute
*attribute
,
1119 const char *buf
, size_t len
)
1121 struct net_device
*dev
= queue
->dev
;
1122 unsigned long index
;
1126 if (!capable(CAP_NET_ADMIN
))
1129 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
1132 index
= get_netdev_queue_index(queue
);
1134 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
1136 free_cpumask_var(mask
);
1140 err
= netif_set_xps_queue(dev
, mask
, index
);
1142 free_cpumask_var(mask
);
1147 static struct netdev_queue_attribute xps_cpus_attribute
=
1148 __ATTR(xps_cpus
, S_IRUGO
| S_IWUSR
, show_xps_map
, store_xps_map
);
1149 #endif /* CONFIG_XPS */
1151 static struct attribute
*netdev_queue_default_attrs
[] = {
1152 &queue_trans_timeout
.attr
,
1154 &xps_cpus_attribute
.attr
,
1159 static void netdev_queue_release(struct kobject
*kobj
)
1161 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1163 memset(kobj
, 0, sizeof(*kobj
));
1164 dev_put(queue
->dev
);
1167 static const void *netdev_queue_namespace(struct kobject
*kobj
)
1169 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1170 struct device
*dev
= &queue
->dev
->dev
;
1171 const void *ns
= NULL
;
1173 if (dev
->class && dev
->class->ns_type
)
1174 ns
= dev
->class->namespace(dev
);
1179 static struct kobj_type netdev_queue_ktype
= {
1180 .sysfs_ops
= &netdev_queue_sysfs_ops
,
1181 .release
= netdev_queue_release
,
1182 .default_attrs
= netdev_queue_default_attrs
,
1183 .namespace = netdev_queue_namespace
,
1186 static int netdev_queue_add_kobject(struct net_device
*dev
, int index
)
1188 struct netdev_queue
*queue
= dev
->_tx
+ index
;
1189 struct kobject
*kobj
= &queue
->kobj
;
1192 kobj
->kset
= dev
->queues_kset
;
1193 error
= kobject_init_and_add(kobj
, &netdev_queue_ktype
, NULL
,
1199 error
= sysfs_create_group(kobj
, &dql_group
);
1204 kobject_uevent(kobj
, KOBJ_ADD
);
1205 dev_hold(queue
->dev
);
1212 #endif /* CONFIG_SYSFS */
1215 netdev_queue_update_kobjects(struct net_device
*dev
, int old_num
, int new_num
)
1221 for (i
= old_num
; i
< new_num
; i
++) {
1222 error
= netdev_queue_add_kobject(dev
, i
);
1229 while (--i
>= new_num
) {
1230 struct netdev_queue
*queue
= dev
->_tx
+ i
;
1233 sysfs_remove_group(&queue
->kobj
, &dql_group
);
1235 kobject_put(&queue
->kobj
);
1241 #endif /* CONFIG_SYSFS */
1244 static int register_queue_kobjects(struct net_device
*dev
)
1246 int error
= 0, txq
= 0, rxq
= 0, real_rx
= 0, real_tx
= 0;
1249 dev
->queues_kset
= kset_create_and_add("queues",
1250 NULL
, &dev
->dev
.kobj
);
1251 if (!dev
->queues_kset
)
1253 real_rx
= dev
->real_num_rx_queues
;
1255 real_tx
= dev
->real_num_tx_queues
;
1257 error
= net_rx_queue_update_kobjects(dev
, 0, real_rx
);
1262 error
= netdev_queue_update_kobjects(dev
, 0, real_tx
);
1270 netdev_queue_update_kobjects(dev
, txq
, 0);
1271 net_rx_queue_update_kobjects(dev
, rxq
, 0);
1275 static void remove_queue_kobjects(struct net_device
*dev
)
1277 int real_rx
= 0, real_tx
= 0;
1280 real_rx
= dev
->real_num_rx_queues
;
1282 real_tx
= dev
->real_num_tx_queues
;
1284 net_rx_queue_update_kobjects(dev
, real_rx
, 0);
1285 netdev_queue_update_kobjects(dev
, real_tx
, 0);
1287 kset_unregister(dev
->queues_kset
);
1291 static bool net_current_may_mount(void)
1293 struct net
*net
= current
->nsproxy
->net_ns
;
1295 return ns_capable(net
->user_ns
, CAP_SYS_ADMIN
);
1298 static void *net_grab_current_ns(void)
1300 struct net
*ns
= current
->nsproxy
->net_ns
;
1301 #ifdef CONFIG_NET_NS
1303 atomic_inc(&ns
->passive
);
1308 static const void *net_initial_ns(void)
1313 static const void *net_netlink_ns(struct sock
*sk
)
1315 return sock_net(sk
);
1318 struct kobj_ns_type_operations net_ns_type_operations
= {
1319 .type
= KOBJ_NS_TYPE_NET
,
1320 .current_may_mount
= net_current_may_mount
,
1321 .grab_current_ns
= net_grab_current_ns
,
1322 .netlink_ns
= net_netlink_ns
,
1323 .initial_ns
= net_initial_ns
,
1324 .drop_ns
= net_drop_ns
,
1326 EXPORT_SYMBOL_GPL(net_ns_type_operations
);
1328 static int netdev_uevent(struct device
*d
, struct kobj_uevent_env
*env
)
1330 struct net_device
*dev
= to_net_dev(d
);
1333 /* pass interface to uevent. */
1334 retval
= add_uevent_var(env
, "INTERFACE=%s", dev
->name
);
1338 /* pass ifindex to uevent.
1339 * ifindex is useful as it won't change (interface name may change)
1340 * and is what RtNetlink uses natively. */
1341 retval
= add_uevent_var(env
, "IFINDEX=%d", dev
->ifindex
);
1348 * netdev_release -- destroy and free a dead device.
1349 * Called when last reference to device kobject is gone.
1351 static void netdev_release(struct device
*d
)
1353 struct net_device
*dev
= to_net_dev(d
);
1355 BUG_ON(dev
->reg_state
!= NETREG_RELEASED
);
1357 kfree(dev
->ifalias
);
1358 netdev_freemem(dev
);
1361 static const void *net_namespace(struct device
*d
)
1363 struct net_device
*dev
;
1364 dev
= container_of(d
, struct net_device
, dev
);
1365 return dev_net(dev
);
1368 static struct class net_class
= {
1370 .dev_release
= netdev_release
,
1371 .dev_groups
= net_class_groups
,
1372 .dev_uevent
= netdev_uevent
,
1373 .ns_type
= &net_ns_type_operations
,
1374 .namespace = net_namespace
,
1377 /* Delete sysfs entries but hold kobject reference until after all
1378 * netdev references are gone.
1380 void netdev_unregister_kobject(struct net_device
*ndev
)
1382 struct device
*dev
= &(ndev
->dev
);
1384 kobject_get(&dev
->kobj
);
1386 remove_queue_kobjects(ndev
);
1388 pm_runtime_set_memalloc_noio(dev
, false);
1393 /* Create sysfs entries for network device. */
1394 int netdev_register_kobject(struct net_device
*ndev
)
1396 struct device
*dev
= &(ndev
->dev
);
1397 const struct attribute_group
**groups
= ndev
->sysfs_groups
;
1400 device_initialize(dev
);
1401 dev
->class = &net_class
;
1402 dev
->platform_data
= ndev
;
1403 dev
->groups
= groups
;
1405 dev_set_name(dev
, "%s", ndev
->name
);
1408 /* Allow for a device specific group */
1412 *groups
++ = &netstat_group
;
1414 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1415 if (ndev
->ieee80211_ptr
)
1416 *groups
++ = &wireless_group
;
1417 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1418 else if (ndev
->wireless_handlers
)
1419 *groups
++ = &wireless_group
;
1422 #endif /* CONFIG_SYSFS */
1424 error
= device_add(dev
);
1428 error
= register_queue_kobjects(ndev
);
1434 pm_runtime_set_memalloc_noio(dev
, true);
1439 int netdev_class_create_file_ns(struct class_attribute
*class_attr
,
1442 return class_create_file_ns(&net_class
, class_attr
, ns
);
1444 EXPORT_SYMBOL(netdev_class_create_file_ns
);
1446 void netdev_class_remove_file_ns(struct class_attribute
*class_attr
,
1449 class_remove_file_ns(&net_class
, class_attr
, ns
);
1451 EXPORT_SYMBOL(netdev_class_remove_file_ns
);
1453 int __init
netdev_kobject_init(void)
1455 kobj_ns_type_register(&net_ns_type_operations
);
1456 return class_register(&net_class
);