2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/vmalloc.h>
22 #include <linux/export.h>
23 #include <linux/jiffies.h>
24 #include <linux/pm_runtime.h>
26 #include "net-sysfs.h"
29 static const char fmt_hex
[] = "%#x\n";
30 static const char fmt_long_hex
[] = "%#lx\n";
31 static const char fmt_dec
[] = "%d\n";
32 static const char fmt_udec
[] = "%u\n";
33 static const char fmt_ulong
[] = "%lu\n";
34 static const char fmt_u64
[] = "%llu\n";
36 static inline int dev_isalive(const struct net_device
*dev
)
38 return dev
->reg_state
<= NETREG_REGISTERED
;
41 /* use same locking rules as GIF* ioctl's */
42 static ssize_t
netdev_show(const struct device
*dev
,
43 struct device_attribute
*attr
, char *buf
,
44 ssize_t (*format
)(const struct net_device
*, char *))
46 struct net_device
*net
= to_net_dev(dev
);
47 ssize_t ret
= -EINVAL
;
49 read_lock(&dev_base_lock
);
51 ret
= (*format
)(net
, buf
);
52 read_unlock(&dev_base_lock
);
57 /* generate a show function for simple field */
58 #define NETDEVICE_SHOW(field, format_string) \
59 static ssize_t format_##field(const struct net_device *net, char *buf) \
61 return sprintf(buf, format_string, net->field); \
63 static ssize_t show_##field(struct device *dev, \
64 struct device_attribute *attr, char *buf) \
66 return netdev_show(dev, attr, buf, format_##field); \
70 /* use same locking and permission rules as SIF* ioctl's */
71 static ssize_t
netdev_store(struct device
*dev
, struct device_attribute
*attr
,
72 const char *buf
, size_t len
,
73 int (*set
)(struct net_device
*, unsigned long))
75 struct net_device
*netdev
= to_net_dev(dev
);
76 struct net
*net
= dev_net(netdev
);
80 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
83 ret
= kstrtoul(buf
, 0, &new);
88 return restart_syscall();
90 if (dev_isalive(netdev
)) {
91 if ((ret
= (*set
)(netdev
, new)) == 0)
99 NETDEVICE_SHOW(dev_id
, fmt_hex
);
100 NETDEVICE_SHOW(addr_assign_type
, fmt_dec
);
101 NETDEVICE_SHOW(addr_len
, fmt_dec
);
102 NETDEVICE_SHOW(iflink
, fmt_dec
);
103 NETDEVICE_SHOW(ifindex
, fmt_dec
);
104 NETDEVICE_SHOW(type
, fmt_dec
);
105 NETDEVICE_SHOW(link_mode
, fmt_dec
);
107 /* use same locking rules as GIFHWADDR ioctl's */
108 static ssize_t
show_address(struct device
*dev
, struct device_attribute
*attr
,
111 struct net_device
*net
= to_net_dev(dev
);
112 ssize_t ret
= -EINVAL
;
114 read_lock(&dev_base_lock
);
115 if (dev_isalive(net
))
116 ret
= sysfs_format_mac(buf
, net
->dev_addr
, net
->addr_len
);
117 read_unlock(&dev_base_lock
);
121 static ssize_t
show_broadcast(struct device
*dev
,
122 struct device_attribute
*attr
, char *buf
)
124 struct net_device
*net
= to_net_dev(dev
);
125 if (dev_isalive(net
))
126 return sysfs_format_mac(buf
, net
->broadcast
, net
->addr_len
);
130 static int change_carrier(struct net_device
*net
, unsigned long new_carrier
)
132 if (!netif_running(net
))
134 return dev_change_carrier(net
, (bool) new_carrier
);
137 static ssize_t
store_carrier(struct device
*dev
, struct device_attribute
*attr
,
138 const char *buf
, size_t len
)
140 return netdev_store(dev
, attr
, buf
, len
, change_carrier
);
143 static ssize_t
show_carrier(struct device
*dev
,
144 struct device_attribute
*attr
, char *buf
)
146 struct net_device
*netdev
= to_net_dev(dev
);
147 if (netif_running(netdev
)) {
148 return sprintf(buf
, fmt_dec
, !!netif_carrier_ok(netdev
));
153 static ssize_t
show_speed(struct device
*dev
,
154 struct device_attribute
*attr
, char *buf
)
156 struct net_device
*netdev
= to_net_dev(dev
);
160 return restart_syscall();
162 if (netif_running(netdev
)) {
163 struct ethtool_cmd cmd
;
164 if (!__ethtool_get_settings(netdev
, &cmd
))
165 ret
= sprintf(buf
, fmt_udec
, ethtool_cmd_speed(&cmd
));
171 static ssize_t
show_duplex(struct device
*dev
,
172 struct device_attribute
*attr
, char *buf
)
174 struct net_device
*netdev
= to_net_dev(dev
);
178 return restart_syscall();
180 if (netif_running(netdev
)) {
181 struct ethtool_cmd cmd
;
182 if (!__ethtool_get_settings(netdev
, &cmd
)) {
184 switch (cmd
.duplex
) {
195 ret
= sprintf(buf
, "%s\n", duplex
);
202 static ssize_t
show_dormant(struct device
*dev
,
203 struct device_attribute
*attr
, char *buf
)
205 struct net_device
*netdev
= to_net_dev(dev
);
207 if (netif_running(netdev
))
208 return sprintf(buf
, fmt_dec
, !!netif_dormant(netdev
));
213 static const char *const operstates
[] = {
215 "notpresent", /* currently unused */
218 "testing", /* currently unused */
223 static ssize_t
show_operstate(struct device
*dev
,
224 struct device_attribute
*attr
, char *buf
)
226 const struct net_device
*netdev
= to_net_dev(dev
);
227 unsigned char operstate
;
229 read_lock(&dev_base_lock
);
230 operstate
= netdev
->operstate
;
231 if (!netif_running(netdev
))
232 operstate
= IF_OPER_DOWN
;
233 read_unlock(&dev_base_lock
);
235 if (operstate
>= ARRAY_SIZE(operstates
))
236 return -EINVAL
; /* should not happen */
238 return sprintf(buf
, "%s\n", operstates
[operstate
]);
241 /* read-write attributes */
242 NETDEVICE_SHOW(mtu
, fmt_dec
);
244 static int change_mtu(struct net_device
*net
, unsigned long new_mtu
)
246 return dev_set_mtu(net
, (int) new_mtu
);
249 static ssize_t
store_mtu(struct device
*dev
, struct device_attribute
*attr
,
250 const char *buf
, size_t len
)
252 return netdev_store(dev
, attr
, buf
, len
, change_mtu
);
255 NETDEVICE_SHOW(flags
, fmt_hex
);
257 static int change_flags(struct net_device
*net
, unsigned long new_flags
)
259 return dev_change_flags(net
, (unsigned int) new_flags
);
262 static ssize_t
store_flags(struct device
*dev
, struct device_attribute
*attr
,
263 const char *buf
, size_t len
)
265 return netdev_store(dev
, attr
, buf
, len
, change_flags
);
268 NETDEVICE_SHOW(tx_queue_len
, fmt_ulong
);
270 static int change_tx_queue_len(struct net_device
*net
, unsigned long new_len
)
272 net
->tx_queue_len
= new_len
;
276 static ssize_t
store_tx_queue_len(struct device
*dev
,
277 struct device_attribute
*attr
,
278 const char *buf
, size_t len
)
280 if (!capable(CAP_NET_ADMIN
))
283 return netdev_store(dev
, attr
, buf
, len
, change_tx_queue_len
);
286 static ssize_t
store_ifalias(struct device
*dev
, struct device_attribute
*attr
,
287 const char *buf
, size_t len
)
289 struct net_device
*netdev
= to_net_dev(dev
);
290 struct net
*net
= dev_net(netdev
);
294 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
297 /* ignore trailing newline */
298 if (len
> 0 && buf
[len
- 1] == '\n')
302 return restart_syscall();
303 ret
= dev_set_alias(netdev
, buf
, count
);
306 return ret
< 0 ? ret
: len
;
309 static ssize_t
show_ifalias(struct device
*dev
,
310 struct device_attribute
*attr
, char *buf
)
312 const struct net_device
*netdev
= to_net_dev(dev
);
316 return restart_syscall();
318 ret
= sprintf(buf
, "%s\n", netdev
->ifalias
);
323 NETDEVICE_SHOW(group
, fmt_dec
);
325 static int change_group(struct net_device
*net
, unsigned long new_group
)
327 dev_set_group(net
, (int) new_group
);
331 static ssize_t
store_group(struct device
*dev
, struct device_attribute
*attr
,
332 const char *buf
, size_t len
)
334 return netdev_store(dev
, attr
, buf
, len
, change_group
);
337 static struct device_attribute net_class_attributes
[] = {
338 __ATTR(addr_assign_type
, S_IRUGO
, show_addr_assign_type
, NULL
),
339 __ATTR(addr_len
, S_IRUGO
, show_addr_len
, NULL
),
340 __ATTR(dev_id
, S_IRUGO
, show_dev_id
, NULL
),
341 __ATTR(ifalias
, S_IRUGO
| S_IWUSR
, show_ifalias
, store_ifalias
),
342 __ATTR(iflink
, S_IRUGO
, show_iflink
, NULL
),
343 __ATTR(ifindex
, S_IRUGO
, show_ifindex
, NULL
),
344 __ATTR(type
, S_IRUGO
, show_type
, NULL
),
345 __ATTR(link_mode
, S_IRUGO
, show_link_mode
, NULL
),
346 __ATTR(address
, S_IRUGO
, show_address
, NULL
),
347 __ATTR(broadcast
, S_IRUGO
, show_broadcast
, NULL
),
348 __ATTR(carrier
, S_IRUGO
| S_IWUSR
, show_carrier
, store_carrier
),
349 __ATTR(speed
, S_IRUGO
, show_speed
, NULL
),
350 __ATTR(duplex
, S_IRUGO
, show_duplex
, NULL
),
351 __ATTR(dormant
, S_IRUGO
, show_dormant
, NULL
),
352 __ATTR(operstate
, S_IRUGO
, show_operstate
, NULL
),
353 __ATTR(mtu
, S_IRUGO
| S_IWUSR
, show_mtu
, store_mtu
),
354 __ATTR(flags
, S_IRUGO
| S_IWUSR
, show_flags
, store_flags
),
355 __ATTR(tx_queue_len
, S_IRUGO
| S_IWUSR
, show_tx_queue_len
,
357 __ATTR(netdev_group
, S_IRUGO
| S_IWUSR
, show_group
, store_group
),
361 /* Show a given an attribute in the statistics group */
362 static ssize_t
netstat_show(const struct device
*d
,
363 struct device_attribute
*attr
, char *buf
,
364 unsigned long offset
)
366 struct net_device
*dev
= to_net_dev(d
);
367 ssize_t ret
= -EINVAL
;
369 WARN_ON(offset
> sizeof(struct rtnl_link_stats64
) ||
370 offset
% sizeof(u64
) != 0);
372 read_lock(&dev_base_lock
);
373 if (dev_isalive(dev
)) {
374 struct rtnl_link_stats64 temp
;
375 const struct rtnl_link_stats64
*stats
= dev_get_stats(dev
, &temp
);
377 ret
= sprintf(buf
, fmt_u64
, *(u64
*)(((u8
*) stats
) + offset
));
379 read_unlock(&dev_base_lock
);
383 /* generate a read-only statistics attribute */
384 #define NETSTAT_ENTRY(name) \
385 static ssize_t show_##name(struct device *d, \
386 struct device_attribute *attr, char *buf) \
388 return netstat_show(d, attr, buf, \
389 offsetof(struct rtnl_link_stats64, name)); \
391 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
393 NETSTAT_ENTRY(rx_packets
);
394 NETSTAT_ENTRY(tx_packets
);
395 NETSTAT_ENTRY(rx_bytes
);
396 NETSTAT_ENTRY(tx_bytes
);
397 NETSTAT_ENTRY(rx_errors
);
398 NETSTAT_ENTRY(tx_errors
);
399 NETSTAT_ENTRY(rx_dropped
);
400 NETSTAT_ENTRY(tx_dropped
);
401 NETSTAT_ENTRY(multicast
);
402 NETSTAT_ENTRY(collisions
);
403 NETSTAT_ENTRY(rx_length_errors
);
404 NETSTAT_ENTRY(rx_over_errors
);
405 NETSTAT_ENTRY(rx_crc_errors
);
406 NETSTAT_ENTRY(rx_frame_errors
);
407 NETSTAT_ENTRY(rx_fifo_errors
);
408 NETSTAT_ENTRY(rx_missed_errors
);
409 NETSTAT_ENTRY(tx_aborted_errors
);
410 NETSTAT_ENTRY(tx_carrier_errors
);
411 NETSTAT_ENTRY(tx_fifo_errors
);
412 NETSTAT_ENTRY(tx_heartbeat_errors
);
413 NETSTAT_ENTRY(tx_window_errors
);
414 NETSTAT_ENTRY(rx_compressed
);
415 NETSTAT_ENTRY(tx_compressed
);
417 static struct attribute
*netstat_attrs
[] = {
418 &dev_attr_rx_packets
.attr
,
419 &dev_attr_tx_packets
.attr
,
420 &dev_attr_rx_bytes
.attr
,
421 &dev_attr_tx_bytes
.attr
,
422 &dev_attr_rx_errors
.attr
,
423 &dev_attr_tx_errors
.attr
,
424 &dev_attr_rx_dropped
.attr
,
425 &dev_attr_tx_dropped
.attr
,
426 &dev_attr_multicast
.attr
,
427 &dev_attr_collisions
.attr
,
428 &dev_attr_rx_length_errors
.attr
,
429 &dev_attr_rx_over_errors
.attr
,
430 &dev_attr_rx_crc_errors
.attr
,
431 &dev_attr_rx_frame_errors
.attr
,
432 &dev_attr_rx_fifo_errors
.attr
,
433 &dev_attr_rx_missed_errors
.attr
,
434 &dev_attr_tx_aborted_errors
.attr
,
435 &dev_attr_tx_carrier_errors
.attr
,
436 &dev_attr_tx_fifo_errors
.attr
,
437 &dev_attr_tx_heartbeat_errors
.attr
,
438 &dev_attr_tx_window_errors
.attr
,
439 &dev_attr_rx_compressed
.attr
,
440 &dev_attr_tx_compressed
.attr
,
445 static struct attribute_group netstat_group
= {
446 .name
= "statistics",
447 .attrs
= netstat_attrs
,
450 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
451 static struct attribute
*wireless_attrs
[] = {
455 static struct attribute_group wireless_group
= {
457 .attrs
= wireless_attrs
,
460 #endif /* CONFIG_SYSFS */
464 * RX queue sysfs structures and functions.
466 struct rx_queue_attribute
{
467 struct attribute attr
;
468 ssize_t (*show
)(struct netdev_rx_queue
*queue
,
469 struct rx_queue_attribute
*attr
, char *buf
);
470 ssize_t (*store
)(struct netdev_rx_queue
*queue
,
471 struct rx_queue_attribute
*attr
, const char *buf
, size_t len
);
473 #define to_rx_queue_attr(_attr) container_of(_attr, \
474 struct rx_queue_attribute, attr)
476 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
478 static ssize_t
rx_queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
481 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
482 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
484 if (!attribute
->show
)
487 return attribute
->show(queue
, attribute
, buf
);
490 static ssize_t
rx_queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
491 const char *buf
, size_t count
)
493 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
494 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
496 if (!attribute
->store
)
499 return attribute
->store(queue
, attribute
, buf
, count
);
502 static const struct sysfs_ops rx_queue_sysfs_ops
= {
503 .show
= rx_queue_attr_show
,
504 .store
= rx_queue_attr_store
,
507 static ssize_t
show_rps_map(struct netdev_rx_queue
*queue
,
508 struct rx_queue_attribute
*attribute
, char *buf
)
515 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
519 map
= rcu_dereference(queue
->rps_map
);
521 for (i
= 0; i
< map
->len
; i
++)
522 cpumask_set_cpu(map
->cpus
[i
], mask
);
524 len
+= cpumask_scnprintf(buf
+ len
, PAGE_SIZE
, mask
);
525 if (PAGE_SIZE
- len
< 3) {
527 free_cpumask_var(mask
);
532 free_cpumask_var(mask
);
533 len
+= sprintf(buf
+ len
, "\n");
537 static ssize_t
store_rps_map(struct netdev_rx_queue
*queue
,
538 struct rx_queue_attribute
*attribute
,
539 const char *buf
, size_t len
)
541 struct rps_map
*old_map
, *map
;
544 static DEFINE_SPINLOCK(rps_map_lock
);
546 if (!capable(CAP_NET_ADMIN
))
549 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
552 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
554 free_cpumask_var(mask
);
558 map
= kzalloc(max_t(unsigned int,
559 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
562 free_cpumask_var(mask
);
567 for_each_cpu_and(cpu
, mask
, cpu_online_mask
)
568 map
->cpus
[i
++] = cpu
;
577 spin_lock(&rps_map_lock
);
578 old_map
= rcu_dereference_protected(queue
->rps_map
,
579 lockdep_is_held(&rps_map_lock
));
580 rcu_assign_pointer(queue
->rps_map
, map
);
581 spin_unlock(&rps_map_lock
);
584 static_key_slow_inc(&rps_needed
);
586 kfree_rcu(old_map
, rcu
);
587 static_key_slow_dec(&rps_needed
);
589 free_cpumask_var(mask
);
593 static ssize_t
show_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
594 struct rx_queue_attribute
*attr
,
597 struct rps_dev_flow_table
*flow_table
;
598 unsigned long val
= 0;
601 flow_table
= rcu_dereference(queue
->rps_flow_table
);
603 val
= (unsigned long)flow_table
->mask
+ 1;
606 return sprintf(buf
, "%lu\n", val
);
609 static void rps_dev_flow_table_release(struct rcu_head
*rcu
)
611 struct rps_dev_flow_table
*table
= container_of(rcu
,
612 struct rps_dev_flow_table
, rcu
);
616 static ssize_t
store_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
617 struct rx_queue_attribute
*attr
,
618 const char *buf
, size_t len
)
620 unsigned long mask
, count
;
621 struct rps_dev_flow_table
*table
, *old_table
;
622 static DEFINE_SPINLOCK(rps_dev_flow_lock
);
625 if (!capable(CAP_NET_ADMIN
))
628 rc
= kstrtoul(buf
, 0, &count
);
634 /* mask = roundup_pow_of_two(count) - 1;
635 * without overflows...
637 while ((mask
| (mask
>> 1)) != mask
)
639 /* On 64 bit arches, must check mask fits in table->mask (u32),
640 * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
643 #if BITS_PER_LONG > 32
644 if (mask
> (unsigned long)(u32
)mask
)
647 if (mask
> (ULONG_MAX
- RPS_DEV_FLOW_TABLE_SIZE(1))
648 / sizeof(struct rps_dev_flow
)) {
649 /* Enforce a limit to prevent overflow */
653 table
= vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask
+ 1));
658 for (count
= 0; count
<= mask
; count
++)
659 table
->flows
[count
].cpu
= RPS_NO_CPU
;
663 spin_lock(&rps_dev_flow_lock
);
664 old_table
= rcu_dereference_protected(queue
->rps_flow_table
,
665 lockdep_is_held(&rps_dev_flow_lock
));
666 rcu_assign_pointer(queue
->rps_flow_table
, table
);
667 spin_unlock(&rps_dev_flow_lock
);
670 call_rcu(&old_table
->rcu
, rps_dev_flow_table_release
);
675 static struct rx_queue_attribute rps_cpus_attribute
=
676 __ATTR(rps_cpus
, S_IRUGO
| S_IWUSR
, show_rps_map
, store_rps_map
);
679 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute
=
680 __ATTR(rps_flow_cnt
, S_IRUGO
| S_IWUSR
,
681 show_rps_dev_flow_table_cnt
, store_rps_dev_flow_table_cnt
);
683 static struct attribute
*rx_queue_default_attrs
[] = {
684 &rps_cpus_attribute
.attr
,
685 &rps_dev_flow_table_cnt_attribute
.attr
,
689 static void rx_queue_release(struct kobject
*kobj
)
691 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
693 struct rps_dev_flow_table
*flow_table
;
696 map
= rcu_dereference_protected(queue
->rps_map
, 1);
698 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
702 flow_table
= rcu_dereference_protected(queue
->rps_flow_table
, 1);
704 RCU_INIT_POINTER(queue
->rps_flow_table
, NULL
);
705 call_rcu(&flow_table
->rcu
, rps_dev_flow_table_release
);
708 memset(kobj
, 0, sizeof(*kobj
));
712 static struct kobj_type rx_queue_ktype
= {
713 .sysfs_ops
= &rx_queue_sysfs_ops
,
714 .release
= rx_queue_release
,
715 .default_attrs
= rx_queue_default_attrs
,
718 static int rx_queue_add_kobject(struct net_device
*net
, int index
)
720 struct netdev_rx_queue
*queue
= net
->_rx
+ index
;
721 struct kobject
*kobj
= &queue
->kobj
;
724 kobj
->kset
= net
->queues_kset
;
725 error
= kobject_init_and_add(kobj
, &rx_queue_ktype
, NULL
,
732 kobject_uevent(kobj
, KOBJ_ADD
);
733 dev_hold(queue
->dev
);
737 #endif /* CONFIG_RPS */
740 net_rx_queue_update_kobjects(struct net_device
*net
, int old_num
, int new_num
)
746 for (i
= old_num
; i
< new_num
; i
++) {
747 error
= rx_queue_add_kobject(net
, i
);
754 while (--i
>= new_num
)
755 kobject_put(&net
->_rx
[i
].kobj
);
765 * netdev_queue sysfs structures and functions.
767 struct netdev_queue_attribute
{
768 struct attribute attr
;
769 ssize_t (*show
)(struct netdev_queue
*queue
,
770 struct netdev_queue_attribute
*attr
, char *buf
);
771 ssize_t (*store
)(struct netdev_queue
*queue
,
772 struct netdev_queue_attribute
*attr
, const char *buf
, size_t len
);
774 #define to_netdev_queue_attr(_attr) container_of(_attr, \
775 struct netdev_queue_attribute, attr)
777 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
779 static ssize_t
netdev_queue_attr_show(struct kobject
*kobj
,
780 struct attribute
*attr
, char *buf
)
782 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
783 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
785 if (!attribute
->show
)
788 return attribute
->show(queue
, attribute
, buf
);
791 static ssize_t
netdev_queue_attr_store(struct kobject
*kobj
,
792 struct attribute
*attr
,
793 const char *buf
, size_t count
)
795 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
796 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
798 if (!attribute
->store
)
801 return attribute
->store(queue
, attribute
, buf
, count
);
804 static const struct sysfs_ops netdev_queue_sysfs_ops
= {
805 .show
= netdev_queue_attr_show
,
806 .store
= netdev_queue_attr_store
,
809 static ssize_t
show_trans_timeout(struct netdev_queue
*queue
,
810 struct netdev_queue_attribute
*attribute
,
813 unsigned long trans_timeout
;
815 spin_lock_irq(&queue
->_xmit_lock
);
816 trans_timeout
= queue
->trans_timeout
;
817 spin_unlock_irq(&queue
->_xmit_lock
);
819 return sprintf(buf
, "%lu", trans_timeout
);
822 static struct netdev_queue_attribute queue_trans_timeout
=
823 __ATTR(tx_timeout
, S_IRUGO
, show_trans_timeout
, NULL
);
827 * Byte queue limits sysfs structures and functions.
829 static ssize_t
bql_show(char *buf
, unsigned int value
)
831 return sprintf(buf
, "%u\n", value
);
834 static ssize_t
bql_set(const char *buf
, const size_t count
,
835 unsigned int *pvalue
)
840 if (!strcmp(buf
, "max") || !strcmp(buf
, "max\n"))
841 value
= DQL_MAX_LIMIT
;
843 err
= kstrtouint(buf
, 10, &value
);
846 if (value
> DQL_MAX_LIMIT
)
855 static ssize_t
bql_show_hold_time(struct netdev_queue
*queue
,
856 struct netdev_queue_attribute
*attr
,
859 struct dql
*dql
= &queue
->dql
;
861 return sprintf(buf
, "%u\n", jiffies_to_msecs(dql
->slack_hold_time
));
864 static ssize_t
bql_set_hold_time(struct netdev_queue
*queue
,
865 struct netdev_queue_attribute
*attribute
,
866 const char *buf
, size_t len
)
868 struct dql
*dql
= &queue
->dql
;
872 err
= kstrtouint(buf
, 10, &value
);
876 dql
->slack_hold_time
= msecs_to_jiffies(value
);
881 static struct netdev_queue_attribute bql_hold_time_attribute
=
882 __ATTR(hold_time
, S_IRUGO
| S_IWUSR
, bql_show_hold_time
,
885 static ssize_t
bql_show_inflight(struct netdev_queue
*queue
,
886 struct netdev_queue_attribute
*attr
,
889 struct dql
*dql
= &queue
->dql
;
891 return sprintf(buf
, "%u\n", dql
->num_queued
- dql
->num_completed
);
894 static struct netdev_queue_attribute bql_inflight_attribute
=
895 __ATTR(inflight
, S_IRUGO
, bql_show_inflight
, NULL
);
897 #define BQL_ATTR(NAME, FIELD) \
898 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
899 struct netdev_queue_attribute *attr, \
902 return bql_show(buf, queue->dql.FIELD); \
905 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
906 struct netdev_queue_attribute *attr, \
907 const char *buf, size_t len) \
909 return bql_set(buf, len, &queue->dql.FIELD); \
912 static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
913 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
916 BQL_ATTR(limit
, limit
)
917 BQL_ATTR(limit_max
, max_limit
)
918 BQL_ATTR(limit_min
, min_limit
)
920 static struct attribute
*dql_attrs
[] = {
921 &bql_limit_attribute
.attr
,
922 &bql_limit_max_attribute
.attr
,
923 &bql_limit_min_attribute
.attr
,
924 &bql_hold_time_attribute
.attr
,
925 &bql_inflight_attribute
.attr
,
929 static struct attribute_group dql_group
= {
930 .name
= "byte_queue_limits",
933 #endif /* CONFIG_BQL */
936 static inline unsigned int get_netdev_queue_index(struct netdev_queue
*queue
)
938 struct net_device
*dev
= queue
->dev
;
941 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
942 if (queue
== &dev
->_tx
[i
])
945 BUG_ON(i
>= dev
->num_tx_queues
);
951 static ssize_t
show_xps_map(struct netdev_queue
*queue
,
952 struct netdev_queue_attribute
*attribute
, char *buf
)
954 struct net_device
*dev
= queue
->dev
;
955 struct xps_dev_maps
*dev_maps
;
961 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
964 index
= get_netdev_queue_index(queue
);
967 dev_maps
= rcu_dereference(dev
->xps_maps
);
969 for_each_possible_cpu(i
) {
970 struct xps_map
*map
=
971 rcu_dereference(dev_maps
->cpu_map
[i
]);
974 for (j
= 0; j
< map
->len
; j
++) {
975 if (map
->queues
[j
] == index
) {
976 cpumask_set_cpu(i
, mask
);
985 len
+= cpumask_scnprintf(buf
+ len
, PAGE_SIZE
, mask
);
986 if (PAGE_SIZE
- len
< 3) {
987 free_cpumask_var(mask
);
991 free_cpumask_var(mask
);
992 len
+= sprintf(buf
+ len
, "\n");
996 static ssize_t
store_xps_map(struct netdev_queue
*queue
,
997 struct netdev_queue_attribute
*attribute
,
998 const char *buf
, size_t len
)
1000 struct net_device
*dev
= queue
->dev
;
1001 unsigned long index
;
1005 if (!capable(CAP_NET_ADMIN
))
1008 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
1011 index
= get_netdev_queue_index(queue
);
1013 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
1015 free_cpumask_var(mask
);
1019 err
= netif_set_xps_queue(dev
, mask
, index
);
1021 free_cpumask_var(mask
);
1026 static struct netdev_queue_attribute xps_cpus_attribute
=
1027 __ATTR(xps_cpus
, S_IRUGO
| S_IWUSR
, show_xps_map
, store_xps_map
);
1028 #endif /* CONFIG_XPS */
1030 static struct attribute
*netdev_queue_default_attrs
[] = {
1031 &queue_trans_timeout
.attr
,
1033 &xps_cpus_attribute
.attr
,
1038 static void netdev_queue_release(struct kobject
*kobj
)
1040 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1042 memset(kobj
, 0, sizeof(*kobj
));
1043 dev_put(queue
->dev
);
1046 static struct kobj_type netdev_queue_ktype
= {
1047 .sysfs_ops
= &netdev_queue_sysfs_ops
,
1048 .release
= netdev_queue_release
,
1049 .default_attrs
= netdev_queue_default_attrs
,
1052 static int netdev_queue_add_kobject(struct net_device
*net
, int index
)
1054 struct netdev_queue
*queue
= net
->_tx
+ index
;
1055 struct kobject
*kobj
= &queue
->kobj
;
1058 kobj
->kset
= net
->queues_kset
;
1059 error
= kobject_init_and_add(kobj
, &netdev_queue_ktype
, NULL
,
1065 error
= sysfs_create_group(kobj
, &dql_group
);
1070 kobject_uevent(kobj
, KOBJ_ADD
);
1071 dev_hold(queue
->dev
);
1078 #endif /* CONFIG_SYSFS */
1081 netdev_queue_update_kobjects(struct net_device
*net
, int old_num
, int new_num
)
1087 for (i
= old_num
; i
< new_num
; i
++) {
1088 error
= netdev_queue_add_kobject(net
, i
);
1095 while (--i
>= new_num
) {
1096 struct netdev_queue
*queue
= net
->_tx
+ i
;
1099 sysfs_remove_group(&queue
->kobj
, &dql_group
);
1101 kobject_put(&queue
->kobj
);
1107 #endif /* CONFIG_SYSFS */
1110 static int register_queue_kobjects(struct net_device
*net
)
1112 int error
= 0, txq
= 0, rxq
= 0, real_rx
= 0, real_tx
= 0;
1115 net
->queues_kset
= kset_create_and_add("queues",
1116 NULL
, &net
->dev
.kobj
);
1117 if (!net
->queues_kset
)
1122 real_rx
= net
->real_num_rx_queues
;
1124 real_tx
= net
->real_num_tx_queues
;
1126 error
= net_rx_queue_update_kobjects(net
, 0, real_rx
);
1131 error
= netdev_queue_update_kobjects(net
, 0, real_tx
);
1139 netdev_queue_update_kobjects(net
, txq
, 0);
1140 net_rx_queue_update_kobjects(net
, rxq
, 0);
1144 static void remove_queue_kobjects(struct net_device
*net
)
1146 int real_rx
= 0, real_tx
= 0;
1149 real_rx
= net
->real_num_rx_queues
;
1151 real_tx
= net
->real_num_tx_queues
;
1153 net_rx_queue_update_kobjects(net
, real_rx
, 0);
1154 netdev_queue_update_kobjects(net
, real_tx
, 0);
1156 kset_unregister(net
->queues_kset
);
1160 static void *net_grab_current_ns(void)
1162 struct net
*ns
= current
->nsproxy
->net_ns
;
1163 #ifdef CONFIG_NET_NS
1165 atomic_inc(&ns
->passive
);
1170 static const void *net_initial_ns(void)
1175 static const void *net_netlink_ns(struct sock
*sk
)
1177 return sock_net(sk
);
1180 struct kobj_ns_type_operations net_ns_type_operations
= {
1181 .type
= KOBJ_NS_TYPE_NET
,
1182 .grab_current_ns
= net_grab_current_ns
,
1183 .netlink_ns
= net_netlink_ns
,
1184 .initial_ns
= net_initial_ns
,
1185 .drop_ns
= net_drop_ns
,
1187 EXPORT_SYMBOL_GPL(net_ns_type_operations
);
1189 static int netdev_uevent(struct device
*d
, struct kobj_uevent_env
*env
)
1191 struct net_device
*dev
= to_net_dev(d
);
1194 /* pass interface to uevent. */
1195 retval
= add_uevent_var(env
, "INTERFACE=%s", dev
->name
);
1199 /* pass ifindex to uevent.
1200 * ifindex is useful as it won't change (interface name may change)
1201 * and is what RtNetlink uses natively. */
1202 retval
= add_uevent_var(env
, "IFINDEX=%d", dev
->ifindex
);
1209 * netdev_release -- destroy and free a dead device.
1210 * Called when last reference to device kobject is gone.
1212 static void netdev_release(struct device
*d
)
1214 struct net_device
*dev
= to_net_dev(d
);
1216 BUG_ON(dev
->reg_state
!= NETREG_RELEASED
);
1218 kfree(dev
->ifalias
);
1219 kfree((char *)dev
- dev
->padded
);
1222 static const void *net_namespace(struct device
*d
)
1224 struct net_device
*dev
;
1225 dev
= container_of(d
, struct net_device
, dev
);
1226 return dev_net(dev
);
1229 static struct class net_class
= {
1231 .dev_release
= netdev_release
,
1233 .dev_attrs
= net_class_attributes
,
1234 #endif /* CONFIG_SYSFS */
1235 .dev_uevent
= netdev_uevent
,
1236 .ns_type
= &net_ns_type_operations
,
1237 .namespace = net_namespace
,
1240 /* Delete sysfs entries but hold kobject reference until after all
1241 * netdev references are gone.
1243 void netdev_unregister_kobject(struct net_device
* net
)
1245 struct device
*dev
= &(net
->dev
);
1247 kobject_get(&dev
->kobj
);
1249 remove_queue_kobjects(net
);
1251 pm_runtime_set_memalloc_noio(dev
, false);
1256 /* Create sysfs entries for network device. */
1257 int netdev_register_kobject(struct net_device
*net
)
1259 struct device
*dev
= &(net
->dev
);
1260 const struct attribute_group
**groups
= net
->sysfs_groups
;
1263 device_initialize(dev
);
1264 dev
->class = &net_class
;
1265 dev
->platform_data
= net
;
1266 dev
->groups
= groups
;
1268 dev_set_name(dev
, "%s", net
->name
);
1271 /* Allow for a device specific group */
1275 *groups
++ = &netstat_group
;
1277 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1278 if (net
->ieee80211_ptr
)
1279 *groups
++ = &wireless_group
;
1280 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1281 else if (net
->wireless_handlers
)
1282 *groups
++ = &wireless_group
;
1285 #endif /* CONFIG_SYSFS */
1287 error
= device_add(dev
);
1291 error
= register_queue_kobjects(net
);
1297 pm_runtime_set_memalloc_noio(dev
, true);
1302 int netdev_class_create_file(struct class_attribute
*class_attr
)
1304 return class_create_file(&net_class
, class_attr
);
1306 EXPORT_SYMBOL(netdev_class_create_file
);
1308 void netdev_class_remove_file(struct class_attribute
*class_attr
)
1310 class_remove_file(&net_class
, class_attr
);
1312 EXPORT_SYMBOL(netdev_class_remove_file
);
1314 int netdev_kobject_init(void)
1316 kobj_ns_type_register(&net_ns_type_operations
);
1317 return class_register(&net_class
);