2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/wireless.h>
22 #include <linux/vmalloc.h>
25 #include "net-sysfs.h"
28 static const char fmt_hex
[] = "%#x\n";
29 static const char fmt_long_hex
[] = "%#lx\n";
30 static const char fmt_dec
[] = "%d\n";
31 static const char fmt_udec
[] = "%u\n";
32 static const char fmt_ulong
[] = "%lu\n";
33 static const char fmt_u64
[] = "%llu\n";
35 static inline int dev_isalive(const struct net_device
*dev
)
37 return dev
->reg_state
<= NETREG_REGISTERED
;
40 /* use same locking rules as GIF* ioctl's */
41 static ssize_t
netdev_show(const struct device
*dev
,
42 struct device_attribute
*attr
, char *buf
,
43 ssize_t (*format
)(const struct net_device
*, char *))
45 struct net_device
*net
= to_net_dev(dev
);
46 ssize_t ret
= -EINVAL
;
48 read_lock(&dev_base_lock
);
50 ret
= (*format
)(net
, buf
);
51 read_unlock(&dev_base_lock
);
56 /* generate a show function for simple field */
57 #define NETDEVICE_SHOW(field, format_string) \
58 static ssize_t format_##field(const struct net_device *net, char *buf) \
60 return sprintf(buf, format_string, net->field); \
62 static ssize_t show_##field(struct device *dev, \
63 struct device_attribute *attr, char *buf) \
65 return netdev_show(dev, attr, buf, format_##field); \
69 /* use same locking and permission rules as SIF* ioctl's */
70 static ssize_t
netdev_store(struct device
*dev
, struct device_attribute
*attr
,
71 const char *buf
, size_t len
,
72 int (*set
)(struct net_device
*, unsigned long))
74 struct net_device
*net
= to_net_dev(dev
);
79 if (!capable(CAP_NET_ADMIN
))
82 new = simple_strtoul(buf
, &endp
, 0);
87 return restart_syscall();
89 if (dev_isalive(net
)) {
90 if ((ret
= (*set
)(net
, new)) == 0)
98 NETDEVICE_SHOW(dev_id
, fmt_hex
);
99 NETDEVICE_SHOW(addr_assign_type
, fmt_dec
);
100 NETDEVICE_SHOW(addr_len
, fmt_dec
);
101 NETDEVICE_SHOW(iflink
, fmt_dec
);
102 NETDEVICE_SHOW(ifindex
, fmt_dec
);
103 NETDEVICE_SHOW(type
, fmt_dec
);
104 NETDEVICE_SHOW(link_mode
, fmt_dec
);
106 /* use same locking rules as GIFHWADDR ioctl's */
107 static ssize_t
show_address(struct device
*dev
, struct device_attribute
*attr
,
110 struct net_device
*net
= to_net_dev(dev
);
111 ssize_t ret
= -EINVAL
;
113 read_lock(&dev_base_lock
);
114 if (dev_isalive(net
))
115 ret
= sysfs_format_mac(buf
, net
->dev_addr
, net
->addr_len
);
116 read_unlock(&dev_base_lock
);
120 static ssize_t
show_broadcast(struct device
*dev
,
121 struct device_attribute
*attr
, char *buf
)
123 struct net_device
*net
= to_net_dev(dev
);
124 if (dev_isalive(net
))
125 return sysfs_format_mac(buf
, net
->broadcast
, net
->addr_len
);
129 static ssize_t
show_carrier(struct device
*dev
,
130 struct device_attribute
*attr
, char *buf
)
132 struct net_device
*netdev
= to_net_dev(dev
);
133 if (netif_running(netdev
)) {
134 return sprintf(buf
, fmt_dec
, !!netif_carrier_ok(netdev
));
139 static ssize_t
show_speed(struct device
*dev
,
140 struct device_attribute
*attr
, char *buf
)
142 struct net_device
*netdev
= to_net_dev(dev
);
146 return restart_syscall();
148 if (netif_running(netdev
)) {
149 struct ethtool_cmd cmd
;
150 if (!dev_ethtool_get_settings(netdev
, &cmd
))
151 ret
= sprintf(buf
, fmt_udec
, ethtool_cmd_speed(&cmd
));
157 static ssize_t
show_duplex(struct device
*dev
,
158 struct device_attribute
*attr
, char *buf
)
160 struct net_device
*netdev
= to_net_dev(dev
);
164 return restart_syscall();
166 if (netif_running(netdev
)) {
167 struct ethtool_cmd cmd
;
168 if (!dev_ethtool_get_settings(netdev
, &cmd
))
169 ret
= sprintf(buf
, "%s\n",
170 cmd
.duplex
? "full" : "half");
176 static ssize_t
show_dormant(struct device
*dev
,
177 struct device_attribute
*attr
, char *buf
)
179 struct net_device
*netdev
= to_net_dev(dev
);
181 if (netif_running(netdev
))
182 return sprintf(buf
, fmt_dec
, !!netif_dormant(netdev
));
187 static const char *const operstates
[] = {
189 "notpresent", /* currently unused */
192 "testing", /* currently unused */
197 static ssize_t
show_operstate(struct device
*dev
,
198 struct device_attribute
*attr
, char *buf
)
200 const struct net_device
*netdev
= to_net_dev(dev
);
201 unsigned char operstate
;
203 read_lock(&dev_base_lock
);
204 operstate
= netdev
->operstate
;
205 if (!netif_running(netdev
))
206 operstate
= IF_OPER_DOWN
;
207 read_unlock(&dev_base_lock
);
209 if (operstate
>= ARRAY_SIZE(operstates
))
210 return -EINVAL
; /* should not happen */
212 return sprintf(buf
, "%s\n", operstates
[operstate
]);
215 /* read-write attributes */
216 NETDEVICE_SHOW(mtu
, fmt_dec
);
218 static int change_mtu(struct net_device
*net
, unsigned long new_mtu
)
220 return dev_set_mtu(net
, (int) new_mtu
);
223 static ssize_t
store_mtu(struct device
*dev
, struct device_attribute
*attr
,
224 const char *buf
, size_t len
)
226 return netdev_store(dev
, attr
, buf
, len
, change_mtu
);
229 NETDEVICE_SHOW(flags
, fmt_hex
);
231 static int change_flags(struct net_device
*net
, unsigned long new_flags
)
233 return dev_change_flags(net
, (unsigned) new_flags
);
236 static ssize_t
store_flags(struct device
*dev
, struct device_attribute
*attr
,
237 const char *buf
, size_t len
)
239 return netdev_store(dev
, attr
, buf
, len
, change_flags
);
242 NETDEVICE_SHOW(tx_queue_len
, fmt_ulong
);
244 static int change_tx_queue_len(struct net_device
*net
, unsigned long new_len
)
246 net
->tx_queue_len
= new_len
;
250 static ssize_t
store_tx_queue_len(struct device
*dev
,
251 struct device_attribute
*attr
,
252 const char *buf
, size_t len
)
254 return netdev_store(dev
, attr
, buf
, len
, change_tx_queue_len
);
257 static ssize_t
store_ifalias(struct device
*dev
, struct device_attribute
*attr
,
258 const char *buf
, size_t len
)
260 struct net_device
*netdev
= to_net_dev(dev
);
264 if (!capable(CAP_NET_ADMIN
))
267 /* ignore trailing newline */
268 if (len
> 0 && buf
[len
- 1] == '\n')
272 return restart_syscall();
273 ret
= dev_set_alias(netdev
, buf
, count
);
276 return ret
< 0 ? ret
: len
;
279 static ssize_t
show_ifalias(struct device
*dev
,
280 struct device_attribute
*attr
, char *buf
)
282 const struct net_device
*netdev
= to_net_dev(dev
);
286 return restart_syscall();
288 ret
= sprintf(buf
, "%s\n", netdev
->ifalias
);
293 NETDEVICE_SHOW(group
, fmt_dec
);
295 static int change_group(struct net_device
*net
, unsigned long new_group
)
297 dev_set_group(net
, (int) new_group
);
301 static ssize_t
store_group(struct device
*dev
, struct device_attribute
*attr
,
302 const char *buf
, size_t len
)
304 return netdev_store(dev
, attr
, buf
, len
, change_group
);
307 static struct device_attribute net_class_attributes
[] = {
308 __ATTR(addr_assign_type
, S_IRUGO
, show_addr_assign_type
, NULL
),
309 __ATTR(addr_len
, S_IRUGO
, show_addr_len
, NULL
),
310 __ATTR(dev_id
, S_IRUGO
, show_dev_id
, NULL
),
311 __ATTR(ifalias
, S_IRUGO
| S_IWUSR
, show_ifalias
, store_ifalias
),
312 __ATTR(iflink
, S_IRUGO
, show_iflink
, NULL
),
313 __ATTR(ifindex
, S_IRUGO
, show_ifindex
, NULL
),
314 __ATTR(type
, S_IRUGO
, show_type
, NULL
),
315 __ATTR(link_mode
, S_IRUGO
, show_link_mode
, NULL
),
316 __ATTR(address
, S_IRUGO
, show_address
, NULL
),
317 __ATTR(broadcast
, S_IRUGO
, show_broadcast
, NULL
),
318 __ATTR(carrier
, S_IRUGO
, show_carrier
, NULL
),
319 __ATTR(speed
, S_IRUGO
, show_speed
, NULL
),
320 __ATTR(duplex
, S_IRUGO
, show_duplex
, NULL
),
321 __ATTR(dormant
, S_IRUGO
, show_dormant
, NULL
),
322 __ATTR(operstate
, S_IRUGO
, show_operstate
, NULL
),
323 __ATTR(mtu
, S_IRUGO
| S_IWUSR
, show_mtu
, store_mtu
),
324 __ATTR(flags
, S_IRUGO
| S_IWUSR
, show_flags
, store_flags
),
325 __ATTR(tx_queue_len
, S_IRUGO
| S_IWUSR
, show_tx_queue_len
,
327 __ATTR(netdev_group
, S_IRUGO
| S_IWUSR
, show_group
, store_group
),
331 /* Show a given an attribute in the statistics group */
332 static ssize_t
netstat_show(const struct device
*d
,
333 struct device_attribute
*attr
, char *buf
,
334 unsigned long offset
)
336 struct net_device
*dev
= to_net_dev(d
);
337 ssize_t ret
= -EINVAL
;
339 WARN_ON(offset
> sizeof(struct rtnl_link_stats64
) ||
340 offset
% sizeof(u64
) != 0);
342 read_lock(&dev_base_lock
);
343 if (dev_isalive(dev
)) {
344 struct rtnl_link_stats64 temp
;
345 const struct rtnl_link_stats64
*stats
= dev_get_stats(dev
, &temp
);
347 ret
= sprintf(buf
, fmt_u64
, *(u64
*)(((u8
*) stats
) + offset
));
349 read_unlock(&dev_base_lock
);
353 /* generate a read-only statistics attribute */
354 #define NETSTAT_ENTRY(name) \
355 static ssize_t show_##name(struct device *d, \
356 struct device_attribute *attr, char *buf) \
358 return netstat_show(d, attr, buf, \
359 offsetof(struct rtnl_link_stats64, name)); \
361 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
363 NETSTAT_ENTRY(rx_packets
);
364 NETSTAT_ENTRY(tx_packets
);
365 NETSTAT_ENTRY(rx_bytes
);
366 NETSTAT_ENTRY(tx_bytes
);
367 NETSTAT_ENTRY(rx_errors
);
368 NETSTAT_ENTRY(tx_errors
);
369 NETSTAT_ENTRY(rx_dropped
);
370 NETSTAT_ENTRY(tx_dropped
);
371 NETSTAT_ENTRY(multicast
);
372 NETSTAT_ENTRY(collisions
);
373 NETSTAT_ENTRY(rx_length_errors
);
374 NETSTAT_ENTRY(rx_over_errors
);
375 NETSTAT_ENTRY(rx_crc_errors
);
376 NETSTAT_ENTRY(rx_frame_errors
);
377 NETSTAT_ENTRY(rx_fifo_errors
);
378 NETSTAT_ENTRY(rx_missed_errors
);
379 NETSTAT_ENTRY(tx_aborted_errors
);
380 NETSTAT_ENTRY(tx_carrier_errors
);
381 NETSTAT_ENTRY(tx_fifo_errors
);
382 NETSTAT_ENTRY(tx_heartbeat_errors
);
383 NETSTAT_ENTRY(tx_window_errors
);
384 NETSTAT_ENTRY(rx_compressed
);
385 NETSTAT_ENTRY(tx_compressed
);
387 static struct attribute
*netstat_attrs
[] = {
388 &dev_attr_rx_packets
.attr
,
389 &dev_attr_tx_packets
.attr
,
390 &dev_attr_rx_bytes
.attr
,
391 &dev_attr_tx_bytes
.attr
,
392 &dev_attr_rx_errors
.attr
,
393 &dev_attr_tx_errors
.attr
,
394 &dev_attr_rx_dropped
.attr
,
395 &dev_attr_tx_dropped
.attr
,
396 &dev_attr_multicast
.attr
,
397 &dev_attr_collisions
.attr
,
398 &dev_attr_rx_length_errors
.attr
,
399 &dev_attr_rx_over_errors
.attr
,
400 &dev_attr_rx_crc_errors
.attr
,
401 &dev_attr_rx_frame_errors
.attr
,
402 &dev_attr_rx_fifo_errors
.attr
,
403 &dev_attr_rx_missed_errors
.attr
,
404 &dev_attr_tx_aborted_errors
.attr
,
405 &dev_attr_tx_carrier_errors
.attr
,
406 &dev_attr_tx_fifo_errors
.attr
,
407 &dev_attr_tx_heartbeat_errors
.attr
,
408 &dev_attr_tx_window_errors
.attr
,
409 &dev_attr_rx_compressed
.attr
,
410 &dev_attr_tx_compressed
.attr
,
415 static struct attribute_group netstat_group
= {
416 .name
= "statistics",
417 .attrs
= netstat_attrs
,
420 #ifdef CONFIG_WIRELESS_EXT_SYSFS
421 /* helper function that does all the locking etc for wireless stats */
422 static ssize_t
wireless_show(struct device
*d
, char *buf
,
423 ssize_t (*format
)(const struct iw_statistics
*,
426 struct net_device
*dev
= to_net_dev(d
);
427 const struct iw_statistics
*iw
;
428 ssize_t ret
= -EINVAL
;
431 return restart_syscall();
432 if (dev_isalive(dev
)) {
433 iw
= get_wireless_stats(dev
);
435 ret
= (*format
)(iw
, buf
);
442 /* show function template for wireless fields */
443 #define WIRELESS_SHOW(name, field, format_string) \
444 static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
446 return sprintf(buf, format_string, iw->field); \
448 static ssize_t show_iw_##name(struct device *d, \
449 struct device_attribute *attr, char *buf) \
451 return wireless_show(d, buf, format_iw_##name); \
453 static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
455 WIRELESS_SHOW(status
, status
, fmt_hex
);
456 WIRELESS_SHOW(link
, qual
.qual
, fmt_dec
);
457 WIRELESS_SHOW(level
, qual
.level
, fmt_dec
);
458 WIRELESS_SHOW(noise
, qual
.noise
, fmt_dec
);
459 WIRELESS_SHOW(nwid
, discard
.nwid
, fmt_dec
);
460 WIRELESS_SHOW(crypt
, discard
.code
, fmt_dec
);
461 WIRELESS_SHOW(fragment
, discard
.fragment
, fmt_dec
);
462 WIRELESS_SHOW(misc
, discard
.misc
, fmt_dec
);
463 WIRELESS_SHOW(retries
, discard
.retries
, fmt_dec
);
464 WIRELESS_SHOW(beacon
, miss
.beacon
, fmt_dec
);
466 static struct attribute
*wireless_attrs
[] = {
467 &dev_attr_status
.attr
,
469 &dev_attr_level
.attr
,
470 &dev_attr_noise
.attr
,
472 &dev_attr_crypt
.attr
,
473 &dev_attr_fragment
.attr
,
474 &dev_attr_retries
.attr
,
476 &dev_attr_beacon
.attr
,
480 static struct attribute_group wireless_group
= {
482 .attrs
= wireless_attrs
,
485 #endif /* CONFIG_SYSFS */
489 * RX queue sysfs structures and functions.
491 struct rx_queue_attribute
{
492 struct attribute attr
;
493 ssize_t (*show
)(struct netdev_rx_queue
*queue
,
494 struct rx_queue_attribute
*attr
, char *buf
);
495 ssize_t (*store
)(struct netdev_rx_queue
*queue
,
496 struct rx_queue_attribute
*attr
, const char *buf
, size_t len
);
498 #define to_rx_queue_attr(_attr) container_of(_attr, \
499 struct rx_queue_attribute, attr)
501 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
503 static ssize_t
rx_queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
506 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
507 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
509 if (!attribute
->show
)
512 return attribute
->show(queue
, attribute
, buf
);
515 static ssize_t
rx_queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
516 const char *buf
, size_t count
)
518 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
519 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
521 if (!attribute
->store
)
524 return attribute
->store(queue
, attribute
, buf
, count
);
527 static const struct sysfs_ops rx_queue_sysfs_ops
= {
528 .show
= rx_queue_attr_show
,
529 .store
= rx_queue_attr_store
,
532 static ssize_t
show_rps_map(struct netdev_rx_queue
*queue
,
533 struct rx_queue_attribute
*attribute
, char *buf
)
540 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
544 map
= rcu_dereference(queue
->rps_map
);
546 for (i
= 0; i
< map
->len
; i
++)
547 cpumask_set_cpu(map
->cpus
[i
], mask
);
549 len
+= cpumask_scnprintf(buf
+ len
, PAGE_SIZE
, mask
);
550 if (PAGE_SIZE
- len
< 3) {
552 free_cpumask_var(mask
);
557 free_cpumask_var(mask
);
558 len
+= sprintf(buf
+ len
, "\n");
562 static ssize_t
store_rps_map(struct netdev_rx_queue
*queue
,
563 struct rx_queue_attribute
*attribute
,
564 const char *buf
, size_t len
)
566 struct rps_map
*old_map
, *map
;
569 static DEFINE_SPINLOCK(rps_map_lock
);
571 if (!capable(CAP_NET_ADMIN
))
574 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
577 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
579 free_cpumask_var(mask
);
583 map
= kzalloc(max_t(unsigned,
584 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
587 free_cpumask_var(mask
);
592 for_each_cpu_and(cpu
, mask
, cpu_online_mask
)
593 map
->cpus
[i
++] = cpu
;
602 spin_lock(&rps_map_lock
);
603 old_map
= rcu_dereference_protected(queue
->rps_map
,
604 lockdep_is_held(&rps_map_lock
));
605 rcu_assign_pointer(queue
->rps_map
, map
);
606 spin_unlock(&rps_map_lock
);
609 kfree_rcu(old_map
, rcu
);
611 free_cpumask_var(mask
);
615 static ssize_t
show_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
616 struct rx_queue_attribute
*attr
,
619 struct rps_dev_flow_table
*flow_table
;
620 unsigned int val
= 0;
623 flow_table
= rcu_dereference(queue
->rps_flow_table
);
625 val
= flow_table
->mask
+ 1;
628 return sprintf(buf
, "%u\n", val
);
631 static void rps_dev_flow_table_release_work(struct work_struct
*work
)
633 struct rps_dev_flow_table
*table
= container_of(work
,
634 struct rps_dev_flow_table
, free_work
);
639 static void rps_dev_flow_table_release(struct rcu_head
*rcu
)
641 struct rps_dev_flow_table
*table
= container_of(rcu
,
642 struct rps_dev_flow_table
, rcu
);
644 INIT_WORK(&table
->free_work
, rps_dev_flow_table_release_work
);
645 schedule_work(&table
->free_work
);
648 static ssize_t
store_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
649 struct rx_queue_attribute
*attr
,
650 const char *buf
, size_t len
)
654 struct rps_dev_flow_table
*table
, *old_table
;
655 static DEFINE_SPINLOCK(rps_dev_flow_lock
);
657 if (!capable(CAP_NET_ADMIN
))
660 count
= simple_strtoul(buf
, &endp
, 0);
668 /* Enforce a limit to prevent overflow */
671 count
= roundup_pow_of_two(count
);
672 table
= vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count
));
676 table
->mask
= count
- 1;
677 for (i
= 0; i
< count
; i
++)
678 table
->flows
[i
].cpu
= RPS_NO_CPU
;
682 spin_lock(&rps_dev_flow_lock
);
683 old_table
= rcu_dereference_protected(queue
->rps_flow_table
,
684 lockdep_is_held(&rps_dev_flow_lock
));
685 rcu_assign_pointer(queue
->rps_flow_table
, table
);
686 spin_unlock(&rps_dev_flow_lock
);
689 call_rcu(&old_table
->rcu
, rps_dev_flow_table_release
);
694 static struct rx_queue_attribute rps_cpus_attribute
=
695 __ATTR(rps_cpus
, S_IRUGO
| S_IWUSR
, show_rps_map
, store_rps_map
);
698 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute
=
699 __ATTR(rps_flow_cnt
, S_IRUGO
| S_IWUSR
,
700 show_rps_dev_flow_table_cnt
, store_rps_dev_flow_table_cnt
);
702 static struct attribute
*rx_queue_default_attrs
[] = {
703 &rps_cpus_attribute
.attr
,
704 &rps_dev_flow_table_cnt_attribute
.attr
,
708 static void rx_queue_release(struct kobject
*kobj
)
710 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
712 struct rps_dev_flow_table
*flow_table
;
715 map
= rcu_dereference_raw(queue
->rps_map
);
717 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
721 flow_table
= rcu_dereference_raw(queue
->rps_flow_table
);
723 RCU_INIT_POINTER(queue
->rps_flow_table
, NULL
);
724 call_rcu(&flow_table
->rcu
, rps_dev_flow_table_release
);
727 memset(kobj
, 0, sizeof(*kobj
));
731 static struct kobj_type rx_queue_ktype
= {
732 .sysfs_ops
= &rx_queue_sysfs_ops
,
733 .release
= rx_queue_release
,
734 .default_attrs
= rx_queue_default_attrs
,
737 static int rx_queue_add_kobject(struct net_device
*net
, int index
)
739 struct netdev_rx_queue
*queue
= net
->_rx
+ index
;
740 struct kobject
*kobj
= &queue
->kobj
;
743 kobj
->kset
= net
->queues_kset
;
744 error
= kobject_init_and_add(kobj
, &rx_queue_ktype
, NULL
,
751 kobject_uevent(kobj
, KOBJ_ADD
);
752 dev_hold(queue
->dev
);
756 #endif /* CONFIG_RPS */
759 net_rx_queue_update_kobjects(struct net_device
*net
, int old_num
, int new_num
)
765 for (i
= old_num
; i
< new_num
; i
++) {
766 error
= rx_queue_add_kobject(net
, i
);
773 while (--i
>= new_num
)
774 kobject_put(&net
->_rx
[i
].kobj
);
784 * netdev_queue sysfs structures and functions.
786 struct netdev_queue_attribute
{
787 struct attribute attr
;
788 ssize_t (*show
)(struct netdev_queue
*queue
,
789 struct netdev_queue_attribute
*attr
, char *buf
);
790 ssize_t (*store
)(struct netdev_queue
*queue
,
791 struct netdev_queue_attribute
*attr
, const char *buf
, size_t len
);
793 #define to_netdev_queue_attr(_attr) container_of(_attr, \
794 struct netdev_queue_attribute, attr)
796 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
798 static ssize_t
netdev_queue_attr_show(struct kobject
*kobj
,
799 struct attribute
*attr
, char *buf
)
801 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
802 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
804 if (!attribute
->show
)
807 return attribute
->show(queue
, attribute
, buf
);
810 static ssize_t
netdev_queue_attr_store(struct kobject
*kobj
,
811 struct attribute
*attr
,
812 const char *buf
, size_t count
)
814 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
815 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
817 if (!attribute
->store
)
820 return attribute
->store(queue
, attribute
, buf
, count
);
823 static const struct sysfs_ops netdev_queue_sysfs_ops
= {
824 .show
= netdev_queue_attr_show
,
825 .store
= netdev_queue_attr_store
,
828 static inline unsigned int get_netdev_queue_index(struct netdev_queue
*queue
)
830 struct net_device
*dev
= queue
->dev
;
833 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
834 if (queue
== &dev
->_tx
[i
])
837 BUG_ON(i
>= dev
->num_tx_queues
);
843 static ssize_t
show_xps_map(struct netdev_queue
*queue
,
844 struct netdev_queue_attribute
*attribute
, char *buf
)
846 struct net_device
*dev
= queue
->dev
;
847 struct xps_dev_maps
*dev_maps
;
853 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
856 index
= get_netdev_queue_index(queue
);
859 dev_maps
= rcu_dereference(dev
->xps_maps
);
861 for_each_possible_cpu(i
) {
862 struct xps_map
*map
=
863 rcu_dereference(dev_maps
->cpu_map
[i
]);
866 for (j
= 0; j
< map
->len
; j
++) {
867 if (map
->queues
[j
] == index
) {
868 cpumask_set_cpu(i
, mask
);
877 len
+= cpumask_scnprintf(buf
+ len
, PAGE_SIZE
, mask
);
878 if (PAGE_SIZE
- len
< 3) {
879 free_cpumask_var(mask
);
883 free_cpumask_var(mask
);
884 len
+= sprintf(buf
+ len
, "\n");
888 static DEFINE_MUTEX(xps_map_mutex
);
889 #define xmap_dereference(P) \
890 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
892 static ssize_t
store_xps_map(struct netdev_queue
*queue
,
893 struct netdev_queue_attribute
*attribute
,
894 const char *buf
, size_t len
)
896 struct net_device
*dev
= queue
->dev
;
898 int err
, i
, cpu
, pos
, map_len
, alloc_len
, need_set
;
900 struct xps_map
*map
, *new_map
;
901 struct xps_dev_maps
*dev_maps
, *new_dev_maps
;
905 if (!capable(CAP_NET_ADMIN
))
908 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
911 index
= get_netdev_queue_index(queue
);
913 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
915 free_cpumask_var(mask
);
919 new_dev_maps
= kzalloc(max_t(unsigned,
920 XPS_DEV_MAPS_SIZE
, L1_CACHE_BYTES
), GFP_KERNEL
);
922 free_cpumask_var(mask
);
926 mutex_lock(&xps_map_mutex
);
928 dev_maps
= xmap_dereference(dev
->xps_maps
);
930 for_each_possible_cpu(cpu
) {
932 xmap_dereference(dev_maps
->cpu_map
[cpu
]) : NULL
;
935 for (pos
= 0; pos
< map
->len
; pos
++)
936 if (map
->queues
[pos
] == index
)
939 alloc_len
= map
->alloc_len
;
941 pos
= map_len
= alloc_len
= 0;
943 need_set
= cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
);
947 numa_node
= cpu_to_node(cpu
);
948 else if (numa_node
!= cpu_to_node(cpu
))
952 if (need_set
&& pos
>= map_len
) {
953 /* Need to add queue to this CPU's map */
954 if (map_len
>= alloc_len
) {
955 alloc_len
= alloc_len
?
956 2 * alloc_len
: XPS_MIN_MAP_ALLOC
;
957 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
),
962 new_map
->alloc_len
= alloc_len
;
963 for (i
= 0; i
< map_len
; i
++)
964 new_map
->queues
[i
] = map
->queues
[i
];
965 new_map
->len
= map_len
;
967 new_map
->queues
[new_map
->len
++] = index
;
968 } else if (!need_set
&& pos
< map_len
) {
969 /* Need to remove queue from this CPU's map */
971 new_map
->queues
[pos
] =
972 new_map
->queues
[--new_map
->len
];
976 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[cpu
], new_map
);
979 /* Cleanup old maps */
980 for_each_possible_cpu(cpu
) {
982 xmap_dereference(dev_maps
->cpu_map
[cpu
]) : NULL
;
983 if (map
&& xmap_dereference(new_dev_maps
->cpu_map
[cpu
]) != map
)
985 if (new_dev_maps
->cpu_map
[cpu
])
990 rcu_assign_pointer(dev
->xps_maps
, new_dev_maps
);
993 rcu_assign_pointer(dev
->xps_maps
, NULL
);
997 kfree_rcu(dev_maps
, rcu
);
999 netdev_queue_numa_node_write(queue
, (numa_node
>= 0) ? numa_node
:
1002 mutex_unlock(&xps_map_mutex
);
1004 free_cpumask_var(mask
);
1008 mutex_unlock(&xps_map_mutex
);
1011 for_each_possible_cpu(i
)
1012 kfree(rcu_dereference_protected(
1013 new_dev_maps
->cpu_map
[i
],
1015 kfree(new_dev_maps
);
1016 free_cpumask_var(mask
);
1020 static struct netdev_queue_attribute xps_cpus_attribute
=
1021 __ATTR(xps_cpus
, S_IRUGO
| S_IWUSR
, show_xps_map
, store_xps_map
);
1023 static struct attribute
*netdev_queue_default_attrs
[] = {
1024 &xps_cpus_attribute
.attr
,
1028 static void netdev_queue_release(struct kobject
*kobj
)
1030 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1031 struct net_device
*dev
= queue
->dev
;
1032 struct xps_dev_maps
*dev_maps
;
1033 struct xps_map
*map
;
1034 unsigned long index
;
1035 int i
, pos
, nonempty
= 0;
1037 index
= get_netdev_queue_index(queue
);
1039 mutex_lock(&xps_map_mutex
);
1040 dev_maps
= xmap_dereference(dev
->xps_maps
);
1043 for_each_possible_cpu(i
) {
1044 map
= xmap_dereference(dev_maps
->cpu_map
[i
]);
1048 for (pos
= 0; pos
< map
->len
; pos
++)
1049 if (map
->queues
[pos
] == index
)
1052 if (pos
< map
->len
) {
1055 map
->queues
[--map
->len
];
1057 RCU_INIT_POINTER(dev_maps
->cpu_map
[i
],
1059 kfree_rcu(map
, rcu
);
1068 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
1069 kfree_rcu(dev_maps
, rcu
);
1073 mutex_unlock(&xps_map_mutex
);
1075 memset(kobj
, 0, sizeof(*kobj
));
1076 dev_put(queue
->dev
);
1079 static struct kobj_type netdev_queue_ktype
= {
1080 .sysfs_ops
= &netdev_queue_sysfs_ops
,
1081 .release
= netdev_queue_release
,
1082 .default_attrs
= netdev_queue_default_attrs
,
1085 static int netdev_queue_add_kobject(struct net_device
*net
, int index
)
1087 struct netdev_queue
*queue
= net
->_tx
+ index
;
1088 struct kobject
*kobj
= &queue
->kobj
;
1091 kobj
->kset
= net
->queues_kset
;
1092 error
= kobject_init_and_add(kobj
, &netdev_queue_ktype
, NULL
,
1099 kobject_uevent(kobj
, KOBJ_ADD
);
1100 dev_hold(queue
->dev
);
1104 #endif /* CONFIG_XPS */
1107 netdev_queue_update_kobjects(struct net_device
*net
, int old_num
, int new_num
)
1113 for (i
= old_num
; i
< new_num
; i
++) {
1114 error
= netdev_queue_add_kobject(net
, i
);
1121 while (--i
>= new_num
)
1122 kobject_put(&net
->_tx
[i
].kobj
);
1130 static int register_queue_kobjects(struct net_device
*net
)
1132 int error
= 0, txq
= 0, rxq
= 0, real_rx
= 0, real_tx
= 0;
1134 #if defined(CONFIG_RPS) || defined(CONFIG_XPS)
1135 net
->queues_kset
= kset_create_and_add("queues",
1136 NULL
, &net
->dev
.kobj
);
1137 if (!net
->queues_kset
)
1142 real_rx
= net
->real_num_rx_queues
;
1144 real_tx
= net
->real_num_tx_queues
;
1146 error
= net_rx_queue_update_kobjects(net
, 0, real_rx
);
1151 error
= netdev_queue_update_kobjects(net
, 0, real_tx
);
1159 netdev_queue_update_kobjects(net
, txq
, 0);
1160 net_rx_queue_update_kobjects(net
, rxq
, 0);
1164 static void remove_queue_kobjects(struct net_device
*net
)
1166 int real_rx
= 0, real_tx
= 0;
1169 real_rx
= net
->real_num_rx_queues
;
1171 real_tx
= net
->real_num_tx_queues
;
1173 net_rx_queue_update_kobjects(net
, real_rx
, 0);
1174 netdev_queue_update_kobjects(net
, real_tx
, 0);
1175 #if defined(CONFIG_RPS) || defined(CONFIG_XPS)
1176 kset_unregister(net
->queues_kset
);
1180 static void *net_grab_current_ns(void)
1182 struct net
*ns
= current
->nsproxy
->net_ns
;
1183 #ifdef CONFIG_NET_NS
1185 atomic_inc(&ns
->passive
);
1190 static const void *net_initial_ns(void)
1195 static const void *net_netlink_ns(struct sock
*sk
)
1197 return sock_net(sk
);
1200 struct kobj_ns_type_operations net_ns_type_operations
= {
1201 .type
= KOBJ_NS_TYPE_NET
,
1202 .grab_current_ns
= net_grab_current_ns
,
1203 .netlink_ns
= net_netlink_ns
,
1204 .initial_ns
= net_initial_ns
,
1205 .drop_ns
= net_drop_ns
,
1207 EXPORT_SYMBOL_GPL(net_ns_type_operations
);
1209 #ifdef CONFIG_HOTPLUG
1210 static int netdev_uevent(struct device
*d
, struct kobj_uevent_env
*env
)
1212 struct net_device
*dev
= to_net_dev(d
);
1215 /* pass interface to uevent. */
1216 retval
= add_uevent_var(env
, "INTERFACE=%s", dev
->name
);
1220 /* pass ifindex to uevent.
1221 * ifindex is useful as it won't change (interface name may change)
1222 * and is what RtNetlink uses natively. */
1223 retval
= add_uevent_var(env
, "IFINDEX=%d", dev
->ifindex
);
1231 * netdev_release -- destroy and free a dead device.
1232 * Called when last reference to device kobject is gone.
1234 static void netdev_release(struct device
*d
)
1236 struct net_device
*dev
= to_net_dev(d
);
1238 BUG_ON(dev
->reg_state
!= NETREG_RELEASED
);
1240 kfree(dev
->ifalias
);
1241 kfree((char *)dev
- dev
->padded
);
1244 static const void *net_namespace(struct device
*d
)
1246 struct net_device
*dev
;
1247 dev
= container_of(d
, struct net_device
, dev
);
1248 return dev_net(dev
);
1251 static struct class net_class
= {
1253 .dev_release
= netdev_release
,
1255 .dev_attrs
= net_class_attributes
,
1256 #endif /* CONFIG_SYSFS */
1257 #ifdef CONFIG_HOTPLUG
1258 .dev_uevent
= netdev_uevent
,
1260 .ns_type
= &net_ns_type_operations
,
1261 .namespace = net_namespace
,
1264 /* Delete sysfs entries but hold kobject reference until after all
1265 * netdev references are gone.
1267 void netdev_unregister_kobject(struct net_device
* net
)
1269 struct device
*dev
= &(net
->dev
);
1271 kobject_get(&dev
->kobj
);
1273 remove_queue_kobjects(net
);
1278 /* Create sysfs entries for network device. */
1279 int netdev_register_kobject(struct net_device
*net
)
1281 struct device
*dev
= &(net
->dev
);
1282 const struct attribute_group
**groups
= net
->sysfs_groups
;
1285 device_initialize(dev
);
1286 dev
->class = &net_class
;
1287 dev
->platform_data
= net
;
1288 dev
->groups
= groups
;
1290 dev_set_name(dev
, "%s", net
->name
);
1293 /* Allow for a device specific group */
1297 *groups
++ = &netstat_group
;
1298 #ifdef CONFIG_WIRELESS_EXT_SYSFS
1299 if (net
->ieee80211_ptr
)
1300 *groups
++ = &wireless_group
;
1301 #ifdef CONFIG_WIRELESS_EXT
1302 else if (net
->wireless_handlers
)
1303 *groups
++ = &wireless_group
;
1306 #endif /* CONFIG_SYSFS */
1308 error
= device_add(dev
);
1312 error
= register_queue_kobjects(net
);
1321 int netdev_class_create_file(struct class_attribute
*class_attr
)
1323 return class_create_file(&net_class
, class_attr
);
1325 EXPORT_SYMBOL(netdev_class_create_file
);
1327 void netdev_class_remove_file(struct class_attribute
*class_attr
)
1329 class_remove_file(&net_class
, class_attr
);
1331 EXPORT_SYMBOL(netdev_class_remove_file
);
1333 int netdev_kobject_init(void)
1335 kobj_ns_type_register(&net_ns_type_operations
);
1336 return class_register(&net_class
);