Merge branch 'akpm' (second patchbomb from Andrew Morton)
[linux/fpc-iii.git] / net / core / net-sysfs.c
blob9dd06699b09c9434e60aa40a948adf69d5505d2d
1 /*
2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
18 #include <net/sock.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/vmalloc.h>
22 #include <linux/export.h>
23 #include <linux/jiffies.h>
24 #include <linux/pm_runtime.h>
26 #include "net-sysfs.h"
28 #ifdef CONFIG_SYSFS
29 static const char fmt_hex[] = "%#x\n";
30 static const char fmt_long_hex[] = "%#lx\n";
31 static const char fmt_dec[] = "%d\n";
32 static const char fmt_udec[] = "%u\n";
33 static const char fmt_ulong[] = "%lu\n";
34 static const char fmt_u64[] = "%llu\n";
36 static inline int dev_isalive(const struct net_device *dev)
38 return dev->reg_state <= NETREG_REGISTERED;
41 /* use same locking rules as GIF* ioctl's */
42 static ssize_t netdev_show(const struct device *dev,
43 struct device_attribute *attr, char *buf,
44 ssize_t (*format)(const struct net_device *, char *))
46 struct net_device *ndev = to_net_dev(dev);
47 ssize_t ret = -EINVAL;
49 read_lock(&dev_base_lock);
50 if (dev_isalive(ndev))
51 ret = (*format)(ndev, buf);
52 read_unlock(&dev_base_lock);
54 return ret;
57 /* generate a show function for simple field */
58 #define NETDEVICE_SHOW(field, format_string) \
59 static ssize_t format_##field(const struct net_device *dev, char *buf) \
60 { \
61 return sprintf(buf, format_string, dev->field); \
62 } \
63 static ssize_t field##_show(struct device *dev, \
64 struct device_attribute *attr, char *buf) \
65 { \
66 return netdev_show(dev, attr, buf, format_##field); \
67 } \
69 #define NETDEVICE_SHOW_RO(field, format_string) \
70 NETDEVICE_SHOW(field, format_string); \
71 static DEVICE_ATTR_RO(field)
73 #define NETDEVICE_SHOW_RW(field, format_string) \
74 NETDEVICE_SHOW(field, format_string); \
75 static DEVICE_ATTR_RW(field)
77 /* use same locking and permission rules as SIF* ioctl's */
78 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
79 const char *buf, size_t len,
80 int (*set)(struct net_device *, unsigned long))
82 struct net_device *netdev = to_net_dev(dev);
83 struct net *net = dev_net(netdev);
84 unsigned long new;
85 int ret = -EINVAL;
87 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
88 return -EPERM;
90 ret = kstrtoul(buf, 0, &new);
91 if (ret)
92 goto err;
94 if (!rtnl_trylock())
95 return restart_syscall();
97 if (dev_isalive(netdev)) {
98 if ((ret = (*set)(netdev, new)) == 0)
99 ret = len;
101 rtnl_unlock();
102 err:
103 return ret;
106 NETDEVICE_SHOW_RO(dev_id, fmt_hex);
107 NETDEVICE_SHOW_RO(dev_port, fmt_dec);
108 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
109 NETDEVICE_SHOW_RO(addr_len, fmt_dec);
110 NETDEVICE_SHOW_RO(iflink, fmt_dec);
111 NETDEVICE_SHOW_RO(ifindex, fmt_dec);
112 NETDEVICE_SHOW_RO(type, fmt_dec);
113 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
115 static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
117 return sprintf(buf, fmt_dec, dev->name_assign_type);
120 static ssize_t name_assign_type_show(struct device *dev,
121 struct device_attribute *attr,
122 char *buf)
124 struct net_device *ndev = to_net_dev(dev);
125 ssize_t ret = -EINVAL;
127 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
128 ret = netdev_show(dev, attr, buf, format_name_assign_type);
130 return ret;
132 static DEVICE_ATTR_RO(name_assign_type);
134 /* use same locking rules as GIFHWADDR ioctl's */
135 static ssize_t address_show(struct device *dev, struct device_attribute *attr,
136 char *buf)
138 struct net_device *ndev = to_net_dev(dev);
139 ssize_t ret = -EINVAL;
141 read_lock(&dev_base_lock);
142 if (dev_isalive(ndev))
143 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
144 read_unlock(&dev_base_lock);
145 return ret;
147 static DEVICE_ATTR_RO(address);
149 static ssize_t broadcast_show(struct device *dev,
150 struct device_attribute *attr, char *buf)
152 struct net_device *ndev = to_net_dev(dev);
153 if (dev_isalive(ndev))
154 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
155 return -EINVAL;
157 static DEVICE_ATTR_RO(broadcast);
159 static int change_carrier(struct net_device *dev, unsigned long new_carrier)
161 if (!netif_running(dev))
162 return -EINVAL;
163 return dev_change_carrier(dev, (bool) new_carrier);
166 static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t len)
169 return netdev_store(dev, attr, buf, len, change_carrier);
172 static ssize_t carrier_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
175 struct net_device *netdev = to_net_dev(dev);
176 if (netif_running(netdev)) {
177 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
179 return -EINVAL;
181 static DEVICE_ATTR_RW(carrier);
183 static ssize_t speed_show(struct device *dev,
184 struct device_attribute *attr, char *buf)
186 struct net_device *netdev = to_net_dev(dev);
187 int ret = -EINVAL;
189 if (!rtnl_trylock())
190 return restart_syscall();
192 if (netif_running(netdev)) {
193 struct ethtool_cmd cmd;
194 if (!__ethtool_get_settings(netdev, &cmd))
195 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
197 rtnl_unlock();
198 return ret;
200 static DEVICE_ATTR_RO(speed);
202 static ssize_t duplex_show(struct device *dev,
203 struct device_attribute *attr, char *buf)
205 struct net_device *netdev = to_net_dev(dev);
206 int ret = -EINVAL;
208 if (!rtnl_trylock())
209 return restart_syscall();
211 if (netif_running(netdev)) {
212 struct ethtool_cmd cmd;
213 if (!__ethtool_get_settings(netdev, &cmd)) {
214 const char *duplex;
215 switch (cmd.duplex) {
216 case DUPLEX_HALF:
217 duplex = "half";
218 break;
219 case DUPLEX_FULL:
220 duplex = "full";
221 break;
222 default:
223 duplex = "unknown";
224 break;
226 ret = sprintf(buf, "%s\n", duplex);
229 rtnl_unlock();
230 return ret;
232 static DEVICE_ATTR_RO(duplex);
234 static ssize_t dormant_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
237 struct net_device *netdev = to_net_dev(dev);
239 if (netif_running(netdev))
240 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
242 return -EINVAL;
244 static DEVICE_ATTR_RO(dormant);
246 static const char *const operstates[] = {
247 "unknown",
248 "notpresent", /* currently unused */
249 "down",
250 "lowerlayerdown",
251 "testing", /* currently unused */
252 "dormant",
253 "up"
256 static ssize_t operstate_show(struct device *dev,
257 struct device_attribute *attr, char *buf)
259 const struct net_device *netdev = to_net_dev(dev);
260 unsigned char operstate;
262 read_lock(&dev_base_lock);
263 operstate = netdev->operstate;
264 if (!netif_running(netdev))
265 operstate = IF_OPER_DOWN;
266 read_unlock(&dev_base_lock);
268 if (operstate >= ARRAY_SIZE(operstates))
269 return -EINVAL; /* should not happen */
271 return sprintf(buf, "%s\n", operstates[operstate]);
273 static DEVICE_ATTR_RO(operstate);
275 static ssize_t carrier_changes_show(struct device *dev,
276 struct device_attribute *attr,
277 char *buf)
279 struct net_device *netdev = to_net_dev(dev);
280 return sprintf(buf, fmt_dec,
281 atomic_read(&netdev->carrier_changes));
283 static DEVICE_ATTR_RO(carrier_changes);
285 /* read-write attributes */
287 static int change_mtu(struct net_device *dev, unsigned long new_mtu)
289 return dev_set_mtu(dev, (int) new_mtu);
292 static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
293 const char *buf, size_t len)
295 return netdev_store(dev, attr, buf, len, change_mtu);
297 NETDEVICE_SHOW_RW(mtu, fmt_dec);
299 static int change_flags(struct net_device *dev, unsigned long new_flags)
301 return dev_change_flags(dev, (unsigned int) new_flags);
304 static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
305 const char *buf, size_t len)
307 return netdev_store(dev, attr, buf, len, change_flags);
309 NETDEVICE_SHOW_RW(flags, fmt_hex);
311 static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
313 dev->tx_queue_len = new_len;
314 return 0;
317 static ssize_t tx_queue_len_store(struct device *dev,
318 struct device_attribute *attr,
319 const char *buf, size_t len)
321 if (!capable(CAP_NET_ADMIN))
322 return -EPERM;
324 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
326 NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
328 static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
329 const char *buf, size_t len)
331 struct net_device *netdev = to_net_dev(dev);
332 struct net *net = dev_net(netdev);
333 size_t count = len;
334 ssize_t ret;
336 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
337 return -EPERM;
339 /* ignore trailing newline */
340 if (len > 0 && buf[len - 1] == '\n')
341 --count;
343 if (!rtnl_trylock())
344 return restart_syscall();
345 ret = dev_set_alias(netdev, buf, count);
346 rtnl_unlock();
348 return ret < 0 ? ret : len;
351 static ssize_t ifalias_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
354 const struct net_device *netdev = to_net_dev(dev);
355 ssize_t ret = 0;
357 if (!rtnl_trylock())
358 return restart_syscall();
359 if (netdev->ifalias)
360 ret = sprintf(buf, "%s\n", netdev->ifalias);
361 rtnl_unlock();
362 return ret;
364 static DEVICE_ATTR_RW(ifalias);
366 static int change_group(struct net_device *dev, unsigned long new_group)
368 dev_set_group(dev, (int) new_group);
369 return 0;
372 static ssize_t group_store(struct device *dev, struct device_attribute *attr,
373 const char *buf, size_t len)
375 return netdev_store(dev, attr, buf, len, change_group);
377 NETDEVICE_SHOW(group, fmt_dec);
378 static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
380 static ssize_t phys_port_id_show(struct device *dev,
381 struct device_attribute *attr, char *buf)
383 struct net_device *netdev = to_net_dev(dev);
384 ssize_t ret = -EINVAL;
386 if (!rtnl_trylock())
387 return restart_syscall();
389 if (dev_isalive(netdev)) {
390 struct netdev_phys_port_id ppid;
392 ret = dev_get_phys_port_id(netdev, &ppid);
393 if (!ret)
394 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
396 rtnl_unlock();
398 return ret;
400 static DEVICE_ATTR_RO(phys_port_id);
402 static struct attribute *net_class_attrs[] = {
403 &dev_attr_netdev_group.attr,
404 &dev_attr_type.attr,
405 &dev_attr_dev_id.attr,
406 &dev_attr_dev_port.attr,
407 &dev_attr_iflink.attr,
408 &dev_attr_ifindex.attr,
409 &dev_attr_name_assign_type.attr,
410 &dev_attr_addr_assign_type.attr,
411 &dev_attr_addr_len.attr,
412 &dev_attr_link_mode.attr,
413 &dev_attr_address.attr,
414 &dev_attr_broadcast.attr,
415 &dev_attr_speed.attr,
416 &dev_attr_duplex.attr,
417 &dev_attr_dormant.attr,
418 &dev_attr_operstate.attr,
419 &dev_attr_carrier_changes.attr,
420 &dev_attr_ifalias.attr,
421 &dev_attr_carrier.attr,
422 &dev_attr_mtu.attr,
423 &dev_attr_flags.attr,
424 &dev_attr_tx_queue_len.attr,
425 &dev_attr_phys_port_id.attr,
426 NULL,
428 ATTRIBUTE_GROUPS(net_class);
430 /* Show a given an attribute in the statistics group */
431 static ssize_t netstat_show(const struct device *d,
432 struct device_attribute *attr, char *buf,
433 unsigned long offset)
435 struct net_device *dev = to_net_dev(d);
436 ssize_t ret = -EINVAL;
438 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
439 offset % sizeof(u64) != 0);
441 read_lock(&dev_base_lock);
442 if (dev_isalive(dev)) {
443 struct rtnl_link_stats64 temp;
444 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
446 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
448 read_unlock(&dev_base_lock);
449 return ret;
452 /* generate a read-only statistics attribute */
453 #define NETSTAT_ENTRY(name) \
454 static ssize_t name##_show(struct device *d, \
455 struct device_attribute *attr, char *buf) \
457 return netstat_show(d, attr, buf, \
458 offsetof(struct rtnl_link_stats64, name)); \
460 static DEVICE_ATTR_RO(name)
462 NETSTAT_ENTRY(rx_packets);
463 NETSTAT_ENTRY(tx_packets);
464 NETSTAT_ENTRY(rx_bytes);
465 NETSTAT_ENTRY(tx_bytes);
466 NETSTAT_ENTRY(rx_errors);
467 NETSTAT_ENTRY(tx_errors);
468 NETSTAT_ENTRY(rx_dropped);
469 NETSTAT_ENTRY(tx_dropped);
470 NETSTAT_ENTRY(multicast);
471 NETSTAT_ENTRY(collisions);
472 NETSTAT_ENTRY(rx_length_errors);
473 NETSTAT_ENTRY(rx_over_errors);
474 NETSTAT_ENTRY(rx_crc_errors);
475 NETSTAT_ENTRY(rx_frame_errors);
476 NETSTAT_ENTRY(rx_fifo_errors);
477 NETSTAT_ENTRY(rx_missed_errors);
478 NETSTAT_ENTRY(tx_aborted_errors);
479 NETSTAT_ENTRY(tx_carrier_errors);
480 NETSTAT_ENTRY(tx_fifo_errors);
481 NETSTAT_ENTRY(tx_heartbeat_errors);
482 NETSTAT_ENTRY(tx_window_errors);
483 NETSTAT_ENTRY(rx_compressed);
484 NETSTAT_ENTRY(tx_compressed);
486 static struct attribute *netstat_attrs[] = {
487 &dev_attr_rx_packets.attr,
488 &dev_attr_tx_packets.attr,
489 &dev_attr_rx_bytes.attr,
490 &dev_attr_tx_bytes.attr,
491 &dev_attr_rx_errors.attr,
492 &dev_attr_tx_errors.attr,
493 &dev_attr_rx_dropped.attr,
494 &dev_attr_tx_dropped.attr,
495 &dev_attr_multicast.attr,
496 &dev_attr_collisions.attr,
497 &dev_attr_rx_length_errors.attr,
498 &dev_attr_rx_over_errors.attr,
499 &dev_attr_rx_crc_errors.attr,
500 &dev_attr_rx_frame_errors.attr,
501 &dev_attr_rx_fifo_errors.attr,
502 &dev_attr_rx_missed_errors.attr,
503 &dev_attr_tx_aborted_errors.attr,
504 &dev_attr_tx_carrier_errors.attr,
505 &dev_attr_tx_fifo_errors.attr,
506 &dev_attr_tx_heartbeat_errors.attr,
507 &dev_attr_tx_window_errors.attr,
508 &dev_attr_rx_compressed.attr,
509 &dev_attr_tx_compressed.attr,
510 NULL
514 static struct attribute_group netstat_group = {
515 .name = "statistics",
516 .attrs = netstat_attrs,
519 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
520 static struct attribute *wireless_attrs[] = {
521 NULL
524 static struct attribute_group wireless_group = {
525 .name = "wireless",
526 .attrs = wireless_attrs,
528 #endif
530 #else /* CONFIG_SYSFS */
531 #define net_class_groups NULL
532 #endif /* CONFIG_SYSFS */
534 #ifdef CONFIG_SYSFS
535 #define to_rx_queue_attr(_attr) container_of(_attr, \
536 struct rx_queue_attribute, attr)
538 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
540 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
541 char *buf)
543 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
544 struct netdev_rx_queue *queue = to_rx_queue(kobj);
546 if (!attribute->show)
547 return -EIO;
549 return attribute->show(queue, attribute, buf);
552 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
553 const char *buf, size_t count)
555 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
556 struct netdev_rx_queue *queue = to_rx_queue(kobj);
558 if (!attribute->store)
559 return -EIO;
561 return attribute->store(queue, attribute, buf, count);
564 static const struct sysfs_ops rx_queue_sysfs_ops = {
565 .show = rx_queue_attr_show,
566 .store = rx_queue_attr_store,
569 #ifdef CONFIG_RPS
570 static ssize_t show_rps_map(struct netdev_rx_queue *queue,
571 struct rx_queue_attribute *attribute, char *buf)
573 struct rps_map *map;
574 cpumask_var_t mask;
575 size_t len = 0;
576 int i;
578 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
579 return -ENOMEM;
581 rcu_read_lock();
582 map = rcu_dereference(queue->rps_map);
583 if (map)
584 for (i = 0; i < map->len; i++)
585 cpumask_set_cpu(map->cpus[i], mask);
587 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
588 if (PAGE_SIZE - len < 3) {
589 rcu_read_unlock();
590 free_cpumask_var(mask);
591 return -EINVAL;
593 rcu_read_unlock();
595 free_cpumask_var(mask);
596 len += sprintf(buf + len, "\n");
597 return len;
600 static ssize_t store_rps_map(struct netdev_rx_queue *queue,
601 struct rx_queue_attribute *attribute,
602 const char *buf, size_t len)
604 struct rps_map *old_map, *map;
605 cpumask_var_t mask;
606 int err, cpu, i;
607 static DEFINE_SPINLOCK(rps_map_lock);
609 if (!capable(CAP_NET_ADMIN))
610 return -EPERM;
612 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
613 return -ENOMEM;
615 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
616 if (err) {
617 free_cpumask_var(mask);
618 return err;
621 map = kzalloc(max_t(unsigned int,
622 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
623 GFP_KERNEL);
624 if (!map) {
625 free_cpumask_var(mask);
626 return -ENOMEM;
629 i = 0;
630 for_each_cpu_and(cpu, mask, cpu_online_mask)
631 map->cpus[i++] = cpu;
633 if (i)
634 map->len = i;
635 else {
636 kfree(map);
637 map = NULL;
640 spin_lock(&rps_map_lock);
641 old_map = rcu_dereference_protected(queue->rps_map,
642 lockdep_is_held(&rps_map_lock));
643 rcu_assign_pointer(queue->rps_map, map);
644 spin_unlock(&rps_map_lock);
646 if (map)
647 static_key_slow_inc(&rps_needed);
648 if (old_map) {
649 kfree_rcu(old_map, rcu);
650 static_key_slow_dec(&rps_needed);
652 free_cpumask_var(mask);
653 return len;
656 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
657 struct rx_queue_attribute *attr,
658 char *buf)
660 struct rps_dev_flow_table *flow_table;
661 unsigned long val = 0;
663 rcu_read_lock();
664 flow_table = rcu_dereference(queue->rps_flow_table);
665 if (flow_table)
666 val = (unsigned long)flow_table->mask + 1;
667 rcu_read_unlock();
669 return sprintf(buf, "%lu\n", val);
672 static void rps_dev_flow_table_release(struct rcu_head *rcu)
674 struct rps_dev_flow_table *table = container_of(rcu,
675 struct rps_dev_flow_table, rcu);
676 vfree(table);
679 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
680 struct rx_queue_attribute *attr,
681 const char *buf, size_t len)
683 unsigned long mask, count;
684 struct rps_dev_flow_table *table, *old_table;
685 static DEFINE_SPINLOCK(rps_dev_flow_lock);
686 int rc;
688 if (!capable(CAP_NET_ADMIN))
689 return -EPERM;
691 rc = kstrtoul(buf, 0, &count);
692 if (rc < 0)
693 return rc;
695 if (count) {
696 mask = count - 1;
697 /* mask = roundup_pow_of_two(count) - 1;
698 * without overflows...
700 while ((mask | (mask >> 1)) != mask)
701 mask |= (mask >> 1);
702 /* On 64 bit arches, must check mask fits in table->mask (u32),
703 * and on 32bit arches, must check
704 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
706 #if BITS_PER_LONG > 32
707 if (mask > (unsigned long)(u32)mask)
708 return -EINVAL;
709 #else
710 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
711 / sizeof(struct rps_dev_flow)) {
712 /* Enforce a limit to prevent overflow */
713 return -EINVAL;
715 #endif
716 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
717 if (!table)
718 return -ENOMEM;
720 table->mask = mask;
721 for (count = 0; count <= mask; count++)
722 table->flows[count].cpu = RPS_NO_CPU;
723 } else
724 table = NULL;
726 spin_lock(&rps_dev_flow_lock);
727 old_table = rcu_dereference_protected(queue->rps_flow_table,
728 lockdep_is_held(&rps_dev_flow_lock));
729 rcu_assign_pointer(queue->rps_flow_table, table);
730 spin_unlock(&rps_dev_flow_lock);
732 if (old_table)
733 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
735 return len;
738 static struct rx_queue_attribute rps_cpus_attribute =
739 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
742 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
743 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
744 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
745 #endif /* CONFIG_RPS */
747 static struct attribute *rx_queue_default_attrs[] = {
748 #ifdef CONFIG_RPS
749 &rps_cpus_attribute.attr,
750 &rps_dev_flow_table_cnt_attribute.attr,
751 #endif
752 NULL
755 static void rx_queue_release(struct kobject *kobj)
757 struct netdev_rx_queue *queue = to_rx_queue(kobj);
758 #ifdef CONFIG_RPS
759 struct rps_map *map;
760 struct rps_dev_flow_table *flow_table;
763 map = rcu_dereference_protected(queue->rps_map, 1);
764 if (map) {
765 RCU_INIT_POINTER(queue->rps_map, NULL);
766 kfree_rcu(map, rcu);
769 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
770 if (flow_table) {
771 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
772 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
774 #endif
776 memset(kobj, 0, sizeof(*kobj));
777 dev_put(queue->dev);
780 static const void *rx_queue_namespace(struct kobject *kobj)
782 struct netdev_rx_queue *queue = to_rx_queue(kobj);
783 struct device *dev = &queue->dev->dev;
784 const void *ns = NULL;
786 if (dev->class && dev->class->ns_type)
787 ns = dev->class->namespace(dev);
789 return ns;
792 static struct kobj_type rx_queue_ktype = {
793 .sysfs_ops = &rx_queue_sysfs_ops,
794 .release = rx_queue_release,
795 .default_attrs = rx_queue_default_attrs,
796 .namespace = rx_queue_namespace
799 static int rx_queue_add_kobject(struct net_device *dev, int index)
801 struct netdev_rx_queue *queue = dev->_rx + index;
802 struct kobject *kobj = &queue->kobj;
803 int error = 0;
805 kobj->kset = dev->queues_kset;
806 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
807 "rx-%u", index);
808 if (error)
809 goto exit;
811 if (dev->sysfs_rx_queue_group) {
812 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
813 if (error)
814 goto exit;
817 kobject_uevent(kobj, KOBJ_ADD);
818 dev_hold(queue->dev);
820 return error;
821 exit:
822 kobject_put(kobj);
823 return error;
825 #endif /* CONFIG_SYSFS */
828 net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
830 #ifdef CONFIG_SYSFS
831 int i;
832 int error = 0;
834 #ifndef CONFIG_RPS
835 if (!dev->sysfs_rx_queue_group)
836 return 0;
837 #endif
838 for (i = old_num; i < new_num; i++) {
839 error = rx_queue_add_kobject(dev, i);
840 if (error) {
841 new_num = old_num;
842 break;
846 while (--i >= new_num) {
847 if (dev->sysfs_rx_queue_group)
848 sysfs_remove_group(&dev->_rx[i].kobj,
849 dev->sysfs_rx_queue_group);
850 kobject_put(&dev->_rx[i].kobj);
853 return error;
854 #else
855 return 0;
856 #endif
859 #ifdef CONFIG_SYSFS
861 * netdev_queue sysfs structures and functions.
863 struct netdev_queue_attribute {
864 struct attribute attr;
865 ssize_t (*show)(struct netdev_queue *queue,
866 struct netdev_queue_attribute *attr, char *buf);
867 ssize_t (*store)(struct netdev_queue *queue,
868 struct netdev_queue_attribute *attr, const char *buf, size_t len);
870 #define to_netdev_queue_attr(_attr) container_of(_attr, \
871 struct netdev_queue_attribute, attr)
873 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
875 static ssize_t netdev_queue_attr_show(struct kobject *kobj,
876 struct attribute *attr, char *buf)
878 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
879 struct netdev_queue *queue = to_netdev_queue(kobj);
881 if (!attribute->show)
882 return -EIO;
884 return attribute->show(queue, attribute, buf);
887 static ssize_t netdev_queue_attr_store(struct kobject *kobj,
888 struct attribute *attr,
889 const char *buf, size_t count)
891 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
892 struct netdev_queue *queue = to_netdev_queue(kobj);
894 if (!attribute->store)
895 return -EIO;
897 return attribute->store(queue, attribute, buf, count);
900 static const struct sysfs_ops netdev_queue_sysfs_ops = {
901 .show = netdev_queue_attr_show,
902 .store = netdev_queue_attr_store,
905 static ssize_t show_trans_timeout(struct netdev_queue *queue,
906 struct netdev_queue_attribute *attribute,
907 char *buf)
909 unsigned long trans_timeout;
911 spin_lock_irq(&queue->_xmit_lock);
912 trans_timeout = queue->trans_timeout;
913 spin_unlock_irq(&queue->_xmit_lock);
915 return sprintf(buf, "%lu", trans_timeout);
918 static struct netdev_queue_attribute queue_trans_timeout =
919 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
921 #ifdef CONFIG_BQL
923 * Byte queue limits sysfs structures and functions.
925 static ssize_t bql_show(char *buf, unsigned int value)
927 return sprintf(buf, "%u\n", value);
930 static ssize_t bql_set(const char *buf, const size_t count,
931 unsigned int *pvalue)
933 unsigned int value;
934 int err;
936 if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
937 value = DQL_MAX_LIMIT;
938 else {
939 err = kstrtouint(buf, 10, &value);
940 if (err < 0)
941 return err;
942 if (value > DQL_MAX_LIMIT)
943 return -EINVAL;
946 *pvalue = value;
948 return count;
951 static ssize_t bql_show_hold_time(struct netdev_queue *queue,
952 struct netdev_queue_attribute *attr,
953 char *buf)
955 struct dql *dql = &queue->dql;
957 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
960 static ssize_t bql_set_hold_time(struct netdev_queue *queue,
961 struct netdev_queue_attribute *attribute,
962 const char *buf, size_t len)
964 struct dql *dql = &queue->dql;
965 unsigned int value;
966 int err;
968 err = kstrtouint(buf, 10, &value);
969 if (err < 0)
970 return err;
972 dql->slack_hold_time = msecs_to_jiffies(value);
974 return len;
977 static struct netdev_queue_attribute bql_hold_time_attribute =
978 __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
979 bql_set_hold_time);
981 static ssize_t bql_show_inflight(struct netdev_queue *queue,
982 struct netdev_queue_attribute *attr,
983 char *buf)
985 struct dql *dql = &queue->dql;
987 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
990 static struct netdev_queue_attribute bql_inflight_attribute =
991 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
993 #define BQL_ATTR(NAME, FIELD) \
994 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
995 struct netdev_queue_attribute *attr, \
996 char *buf) \
998 return bql_show(buf, queue->dql.FIELD); \
1001 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
1002 struct netdev_queue_attribute *attr, \
1003 const char *buf, size_t len) \
1005 return bql_set(buf, len, &queue->dql.FIELD); \
1008 static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
1009 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
1010 bql_set_ ## NAME);
1012 BQL_ATTR(limit, limit)
1013 BQL_ATTR(limit_max, max_limit)
1014 BQL_ATTR(limit_min, min_limit)
1016 static struct attribute *dql_attrs[] = {
1017 &bql_limit_attribute.attr,
1018 &bql_limit_max_attribute.attr,
1019 &bql_limit_min_attribute.attr,
1020 &bql_hold_time_attribute.attr,
1021 &bql_inflight_attribute.attr,
1022 NULL
1025 static struct attribute_group dql_group = {
1026 .name = "byte_queue_limits",
1027 .attrs = dql_attrs,
1029 #endif /* CONFIG_BQL */
1031 #ifdef CONFIG_XPS
1032 static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1034 struct net_device *dev = queue->dev;
1035 unsigned int i;
1037 i = queue - dev->_tx;
1038 BUG_ON(i >= dev->num_tx_queues);
1040 return i;
1044 static ssize_t show_xps_map(struct netdev_queue *queue,
1045 struct netdev_queue_attribute *attribute, char *buf)
1047 struct net_device *dev = queue->dev;
1048 struct xps_dev_maps *dev_maps;
1049 cpumask_var_t mask;
1050 unsigned long index;
1051 size_t len = 0;
1052 int i;
1054 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1055 return -ENOMEM;
1057 index = get_netdev_queue_index(queue);
1059 rcu_read_lock();
1060 dev_maps = rcu_dereference(dev->xps_maps);
1061 if (dev_maps) {
1062 for_each_possible_cpu(i) {
1063 struct xps_map *map =
1064 rcu_dereference(dev_maps->cpu_map[i]);
1065 if (map) {
1066 int j;
1067 for (j = 0; j < map->len; j++) {
1068 if (map->queues[j] == index) {
1069 cpumask_set_cpu(i, mask);
1070 break;
1076 rcu_read_unlock();
1078 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
1079 if (PAGE_SIZE - len < 3) {
1080 free_cpumask_var(mask);
1081 return -EINVAL;
1084 free_cpumask_var(mask);
1085 len += sprintf(buf + len, "\n");
1086 return len;
1089 static ssize_t store_xps_map(struct netdev_queue *queue,
1090 struct netdev_queue_attribute *attribute,
1091 const char *buf, size_t len)
1093 struct net_device *dev = queue->dev;
1094 unsigned long index;
1095 cpumask_var_t mask;
1096 int err;
1098 if (!capable(CAP_NET_ADMIN))
1099 return -EPERM;
1101 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1102 return -ENOMEM;
1104 index = get_netdev_queue_index(queue);
1106 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1107 if (err) {
1108 free_cpumask_var(mask);
1109 return err;
1112 err = netif_set_xps_queue(dev, mask, index);
1114 free_cpumask_var(mask);
1116 return err ? : len;
1119 static struct netdev_queue_attribute xps_cpus_attribute =
1120 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1121 #endif /* CONFIG_XPS */
1123 static struct attribute *netdev_queue_default_attrs[] = {
1124 &queue_trans_timeout.attr,
1125 #ifdef CONFIG_XPS
1126 &xps_cpus_attribute.attr,
1127 #endif
1128 NULL
1131 static void netdev_queue_release(struct kobject *kobj)
1133 struct netdev_queue *queue = to_netdev_queue(kobj);
1135 memset(kobj, 0, sizeof(*kobj));
1136 dev_put(queue->dev);
1139 static const void *netdev_queue_namespace(struct kobject *kobj)
1141 struct netdev_queue *queue = to_netdev_queue(kobj);
1142 struct device *dev = &queue->dev->dev;
1143 const void *ns = NULL;
1145 if (dev->class && dev->class->ns_type)
1146 ns = dev->class->namespace(dev);
1148 return ns;
1151 static struct kobj_type netdev_queue_ktype = {
1152 .sysfs_ops = &netdev_queue_sysfs_ops,
1153 .release = netdev_queue_release,
1154 .default_attrs = netdev_queue_default_attrs,
1155 .namespace = netdev_queue_namespace,
1158 static int netdev_queue_add_kobject(struct net_device *dev, int index)
1160 struct netdev_queue *queue = dev->_tx + index;
1161 struct kobject *kobj = &queue->kobj;
1162 int error = 0;
1164 kobj->kset = dev->queues_kset;
1165 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1166 "tx-%u", index);
1167 if (error)
1168 goto exit;
1170 #ifdef CONFIG_BQL
1171 error = sysfs_create_group(kobj, &dql_group);
1172 if (error)
1173 goto exit;
1174 #endif
1176 kobject_uevent(kobj, KOBJ_ADD);
1177 dev_hold(queue->dev);
1179 return 0;
1180 exit:
1181 kobject_put(kobj);
1182 return error;
1184 #endif /* CONFIG_SYSFS */
1187 netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1189 #ifdef CONFIG_SYSFS
1190 int i;
1191 int error = 0;
1193 for (i = old_num; i < new_num; i++) {
1194 error = netdev_queue_add_kobject(dev, i);
1195 if (error) {
1196 new_num = old_num;
1197 break;
1201 while (--i >= new_num) {
1202 struct netdev_queue *queue = dev->_tx + i;
1204 #ifdef CONFIG_BQL
1205 sysfs_remove_group(&queue->kobj, &dql_group);
1206 #endif
1207 kobject_put(&queue->kobj);
1210 return error;
1211 #else
1212 return 0;
1213 #endif /* CONFIG_SYSFS */
1216 static int register_queue_kobjects(struct net_device *dev)
1218 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1220 #ifdef CONFIG_SYSFS
1221 dev->queues_kset = kset_create_and_add("queues",
1222 NULL, &dev->dev.kobj);
1223 if (!dev->queues_kset)
1224 return -ENOMEM;
1225 real_rx = dev->real_num_rx_queues;
1226 #endif
1227 real_tx = dev->real_num_tx_queues;
1229 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1230 if (error)
1231 goto error;
1232 rxq = real_rx;
1234 error = netdev_queue_update_kobjects(dev, 0, real_tx);
1235 if (error)
1236 goto error;
1237 txq = real_tx;
1239 return 0;
1241 error:
1242 netdev_queue_update_kobjects(dev, txq, 0);
1243 net_rx_queue_update_kobjects(dev, rxq, 0);
1244 return error;
1247 static void remove_queue_kobjects(struct net_device *dev)
1249 int real_rx = 0, real_tx = 0;
1251 #ifdef CONFIG_SYSFS
1252 real_rx = dev->real_num_rx_queues;
1253 #endif
1254 real_tx = dev->real_num_tx_queues;
1256 net_rx_queue_update_kobjects(dev, real_rx, 0);
1257 netdev_queue_update_kobjects(dev, real_tx, 0);
1258 #ifdef CONFIG_SYSFS
1259 kset_unregister(dev->queues_kset);
1260 #endif
1263 static bool net_current_may_mount(void)
1265 struct net *net = current->nsproxy->net_ns;
1267 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1270 static void *net_grab_current_ns(void)
1272 struct net *ns = current->nsproxy->net_ns;
1273 #ifdef CONFIG_NET_NS
1274 if (ns)
1275 atomic_inc(&ns->passive);
1276 #endif
1277 return ns;
1280 static const void *net_initial_ns(void)
1282 return &init_net;
1285 static const void *net_netlink_ns(struct sock *sk)
1287 return sock_net(sk);
1290 struct kobj_ns_type_operations net_ns_type_operations = {
1291 .type = KOBJ_NS_TYPE_NET,
1292 .current_may_mount = net_current_may_mount,
1293 .grab_current_ns = net_grab_current_ns,
1294 .netlink_ns = net_netlink_ns,
1295 .initial_ns = net_initial_ns,
1296 .drop_ns = net_drop_ns,
1298 EXPORT_SYMBOL_GPL(net_ns_type_operations);
1300 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1302 struct net_device *dev = to_net_dev(d);
1303 int retval;
1305 /* pass interface to uevent. */
1306 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1307 if (retval)
1308 goto exit;
1310 /* pass ifindex to uevent.
1311 * ifindex is useful as it won't change (interface name may change)
1312 * and is what RtNetlink uses natively. */
1313 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1315 exit:
1316 return retval;
1320 * netdev_release -- destroy and free a dead device.
1321 * Called when last reference to device kobject is gone.
1323 static void netdev_release(struct device *d)
1325 struct net_device *dev = to_net_dev(d);
1327 BUG_ON(dev->reg_state != NETREG_RELEASED);
1329 kfree(dev->ifalias);
1330 netdev_freemem(dev);
1333 static const void *net_namespace(struct device *d)
1335 struct net_device *dev;
1336 dev = container_of(d, struct net_device, dev);
1337 return dev_net(dev);
1340 static struct class net_class = {
1341 .name = "net",
1342 .dev_release = netdev_release,
1343 .dev_groups = net_class_groups,
1344 .dev_uevent = netdev_uevent,
1345 .ns_type = &net_ns_type_operations,
1346 .namespace = net_namespace,
1349 /* Delete sysfs entries but hold kobject reference until after all
1350 * netdev references are gone.
1352 void netdev_unregister_kobject(struct net_device *ndev)
1354 struct device *dev = &(ndev->dev);
1356 kobject_get(&dev->kobj);
1358 remove_queue_kobjects(ndev);
1360 pm_runtime_set_memalloc_noio(dev, false);
1362 device_del(dev);
1365 /* Create sysfs entries for network device. */
1366 int netdev_register_kobject(struct net_device *ndev)
1368 struct device *dev = &(ndev->dev);
1369 const struct attribute_group **groups = ndev->sysfs_groups;
1370 int error = 0;
1372 device_initialize(dev);
1373 dev->class = &net_class;
1374 dev->platform_data = ndev;
1375 dev->groups = groups;
1377 dev_set_name(dev, "%s", ndev->name);
1379 #ifdef CONFIG_SYSFS
1380 /* Allow for a device specific group */
1381 if (*groups)
1382 groups++;
1384 *groups++ = &netstat_group;
1386 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1387 if (ndev->ieee80211_ptr)
1388 *groups++ = &wireless_group;
1389 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1390 else if (ndev->wireless_handlers)
1391 *groups++ = &wireless_group;
1392 #endif
1393 #endif
1394 #endif /* CONFIG_SYSFS */
1396 error = device_add(dev);
1397 if (error)
1398 return error;
1400 error = register_queue_kobjects(ndev);
1401 if (error) {
1402 device_del(dev);
1403 return error;
1406 pm_runtime_set_memalloc_noio(dev, true);
1408 return error;
1411 int netdev_class_create_file_ns(struct class_attribute *class_attr,
1412 const void *ns)
1414 return class_create_file_ns(&net_class, class_attr, ns);
1416 EXPORT_SYMBOL(netdev_class_create_file_ns);
1418 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1419 const void *ns)
1421 class_remove_file_ns(&net_class, class_attr, ns);
1423 EXPORT_SYMBOL(netdev_class_remove_file_ns);
1425 int __init netdev_kobject_init(void)
1427 kobj_ns_type_register(&net_ns_type_operations);
1428 return class_register(&net_class);