2 * linux/kernel/irq/proc.c
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
6 * This file contains the /proc/irq/ handling code.
10 #include <linux/gfp.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/mutex.h>
17 #include "internals.h"
22 * procfs protects read/write of /proc/irq/N/ files against a
23 * concurrent free of the interrupt descriptor. remove_proc_entry()
24 * immediately prevents new read/writes to happen and waits for
25 * already running read/write functions to complete.
27 * We remove the proc entries first and then delete the interrupt
28 * descriptor from the radix tree and free it. So it is guaranteed
29 * that irq_to_desc(N) is valid as long as the read/writes are
30 * permitted by procfs.
32 * The read from /proc/interrupts is a different problem because there
33 * is no protection. So the lookup and the access to irqdesc
34 * information must be protected by sparse_irq_lock.
36 static struct proc_dir_entry
*root_irq_dir
;
47 static int show_irq_affinity(int type
, struct seq_file
*m
)
49 struct irq_desc
*desc
= irq_to_desc((long)m
->private);
50 const struct cpumask
*mask
;
55 mask
= desc
->irq_common_data
.affinity
;
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 if (irqd_is_setaffinity_pending(&desc
->irq_data
))
58 mask
= desc
->pending_mask
;
63 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
64 mask
= desc
->irq_common_data
.effective_affinity
;
74 seq_printf(m
, "%*pbl\n", cpumask_pr_args(mask
));
78 seq_printf(m
, "%*pb\n", cpumask_pr_args(mask
));
84 static int irq_affinity_hint_proc_show(struct seq_file
*m
, void *v
)
86 struct irq_desc
*desc
= irq_to_desc((long)m
->private);
90 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
93 raw_spin_lock_irqsave(&desc
->lock
, flags
);
94 if (desc
->affinity_hint
)
95 cpumask_copy(mask
, desc
->affinity_hint
);
96 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
98 seq_printf(m
, "%*pb\n", cpumask_pr_args(mask
));
99 free_cpumask_var(mask
);
104 #ifndef is_affinity_mask_valid
105 #define is_affinity_mask_valid(val) 1
109 static int irq_affinity_proc_show(struct seq_file
*m
, void *v
)
111 return show_irq_affinity(AFFINITY
, m
);
114 static int irq_affinity_list_proc_show(struct seq_file
*m
, void *v
)
116 return show_irq_affinity(AFFINITY_LIST
, m
);
120 static ssize_t
write_irq_affinity(int type
, struct file
*file
,
121 const char __user
*buffer
, size_t count
, loff_t
*pos
)
123 unsigned int irq
= (int)(long)PDE_DATA(file_inode(file
));
124 cpumask_var_t new_value
;
127 if (!irq_can_set_affinity_usr(irq
) || no_irq_affinity
)
130 if (!alloc_cpumask_var(&new_value
, GFP_KERNEL
))
134 err
= cpumask_parselist_user(buffer
, count
, new_value
);
136 err
= cpumask_parse_user(buffer
, count
, new_value
);
140 if (!is_affinity_mask_valid(new_value
)) {
146 * Do not allow disabling IRQs completely - it's a too easy
147 * way to make the system unusable accidentally :-) At least
148 * one online CPU still has to be targeted.
150 if (!cpumask_intersects(new_value
, cpu_online_mask
)) {
152 * Special case for empty set - allow the architecture code
153 * to set default SMP affinity.
155 err
= irq_select_affinity_usr(irq
) ? -EINVAL
: count
;
157 irq_set_affinity(irq
, new_value
);
162 free_cpumask_var(new_value
);
166 static ssize_t
irq_affinity_proc_write(struct file
*file
,
167 const char __user
*buffer
, size_t count
, loff_t
*pos
)
169 return write_irq_affinity(0, file
, buffer
, count
, pos
);
172 static ssize_t
irq_affinity_list_proc_write(struct file
*file
,
173 const char __user
*buffer
, size_t count
, loff_t
*pos
)
175 return write_irq_affinity(1, file
, buffer
, count
, pos
);
178 static int irq_affinity_proc_open(struct inode
*inode
, struct file
*file
)
180 return single_open(file
, irq_affinity_proc_show
, PDE_DATA(inode
));
183 static int irq_affinity_list_proc_open(struct inode
*inode
, struct file
*file
)
185 return single_open(file
, irq_affinity_list_proc_show
, PDE_DATA(inode
));
188 static int irq_affinity_hint_proc_open(struct inode
*inode
, struct file
*file
)
190 return single_open(file
, irq_affinity_hint_proc_show
, PDE_DATA(inode
));
193 static const struct file_operations irq_affinity_proc_fops
= {
194 .open
= irq_affinity_proc_open
,
197 .release
= single_release
,
198 .write
= irq_affinity_proc_write
,
201 static const struct file_operations irq_affinity_hint_proc_fops
= {
202 .open
= irq_affinity_hint_proc_open
,
205 .release
= single_release
,
208 static const struct file_operations irq_affinity_list_proc_fops
= {
209 .open
= irq_affinity_list_proc_open
,
212 .release
= single_release
,
213 .write
= irq_affinity_list_proc_write
,
216 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
217 static int irq_effective_aff_proc_show(struct seq_file
*m
, void *v
)
219 return show_irq_affinity(EFFECTIVE
, m
);
222 static int irq_effective_aff_list_proc_show(struct seq_file
*m
, void *v
)
224 return show_irq_affinity(EFFECTIVE_LIST
, m
);
227 static int irq_effective_aff_proc_open(struct inode
*inode
, struct file
*file
)
229 return single_open(file
, irq_effective_aff_proc_show
, PDE_DATA(inode
));
232 static int irq_effective_aff_list_proc_open(struct inode
*inode
,
235 return single_open(file
, irq_effective_aff_list_proc_show
,
239 static const struct file_operations irq_effective_aff_proc_fops
= {
240 .open
= irq_effective_aff_proc_open
,
243 .release
= single_release
,
246 static const struct file_operations irq_effective_aff_list_proc_fops
= {
247 .open
= irq_effective_aff_list_proc_open
,
250 .release
= single_release
,
254 static int default_affinity_show(struct seq_file
*m
, void *v
)
256 seq_printf(m
, "%*pb\n", cpumask_pr_args(irq_default_affinity
));
260 static ssize_t
default_affinity_write(struct file
*file
,
261 const char __user
*buffer
, size_t count
, loff_t
*ppos
)
263 cpumask_var_t new_value
;
266 if (!alloc_cpumask_var(&new_value
, GFP_KERNEL
))
269 err
= cpumask_parse_user(buffer
, count
, new_value
);
273 if (!is_affinity_mask_valid(new_value
)) {
279 * Do not allow disabling IRQs completely - it's a too easy
280 * way to make the system unusable accidentally :-) At least
281 * one online CPU still has to be targeted.
283 if (!cpumask_intersects(new_value
, cpu_online_mask
)) {
288 cpumask_copy(irq_default_affinity
, new_value
);
292 free_cpumask_var(new_value
);
296 static int default_affinity_open(struct inode
*inode
, struct file
*file
)
298 return single_open(file
, default_affinity_show
, PDE_DATA(inode
));
301 static const struct file_operations default_affinity_proc_fops
= {
302 .open
= default_affinity_open
,
305 .release
= single_release
,
306 .write
= default_affinity_write
,
309 static int irq_node_proc_show(struct seq_file
*m
, void *v
)
311 struct irq_desc
*desc
= irq_to_desc((long) m
->private);
313 seq_printf(m
, "%d\n", irq_desc_get_node(desc
));
317 static int irq_node_proc_open(struct inode
*inode
, struct file
*file
)
319 return single_open(file
, irq_node_proc_show
, PDE_DATA(inode
));
322 static const struct file_operations irq_node_proc_fops
= {
323 .open
= irq_node_proc_open
,
326 .release
= single_release
,
330 static int irq_spurious_proc_show(struct seq_file
*m
, void *v
)
332 struct irq_desc
*desc
= irq_to_desc((long) m
->private);
334 seq_printf(m
, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
335 desc
->irq_count
, desc
->irqs_unhandled
,
336 jiffies_to_msecs(desc
->last_unhandled
));
340 static int irq_spurious_proc_open(struct inode
*inode
, struct file
*file
)
342 return single_open(file
, irq_spurious_proc_show
, PDE_DATA(inode
));
345 static const struct file_operations irq_spurious_proc_fops
= {
346 .open
= irq_spurious_proc_open
,
349 .release
= single_release
,
352 #define MAX_NAMELEN 128
354 static int name_unique(unsigned int irq
, struct irqaction
*new_action
)
356 struct irq_desc
*desc
= irq_to_desc(irq
);
357 struct irqaction
*action
;
361 raw_spin_lock_irqsave(&desc
->lock
, flags
);
362 for_each_action_of_desc(desc
, action
) {
363 if ((action
!= new_action
) && action
->name
&&
364 !strcmp(new_action
->name
, action
->name
)) {
369 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
373 void register_handler_proc(unsigned int irq
, struct irqaction
*action
)
375 char name
[MAX_NAMELEN
];
376 struct irq_desc
*desc
= irq_to_desc(irq
);
378 if (!desc
->dir
|| action
->dir
|| !action
->name
||
379 !name_unique(irq
, action
))
382 snprintf(name
, MAX_NAMELEN
, "%s", action
->name
);
384 /* create /proc/irq/1234/handler/ */
385 action
->dir
= proc_mkdir(name
, desc
->dir
);
390 #define MAX_NAMELEN 10
392 void register_irq_proc(unsigned int irq
, struct irq_desc
*desc
)
394 static DEFINE_MUTEX(register_lock
);
395 void __maybe_unused
*irqp
= (void *)(unsigned long) irq
;
396 char name
[MAX_NAMELEN
];
398 if (!root_irq_dir
|| (desc
->irq_data
.chip
== &no_irq_chip
))
402 * irq directories are registered only when a handler is
403 * added, not when the descriptor is created, so multiple
404 * tasks might try to register at the same time.
406 mutex_lock(®ister_lock
);
411 sprintf(name
, "%d", irq
);
413 /* create /proc/irq/1234 */
414 desc
->dir
= proc_mkdir(name
, root_irq_dir
);
419 /* create /proc/irq/<irq>/smp_affinity */
420 proc_create_data("smp_affinity", 0644, desc
->dir
,
421 &irq_affinity_proc_fops
, irqp
);
423 /* create /proc/irq/<irq>/affinity_hint */
424 proc_create_data("affinity_hint", 0444, desc
->dir
,
425 &irq_affinity_hint_proc_fops
, irqp
);
427 /* create /proc/irq/<irq>/smp_affinity_list */
428 proc_create_data("smp_affinity_list", 0644, desc
->dir
,
429 &irq_affinity_list_proc_fops
, irqp
);
431 proc_create_data("node", 0444, desc
->dir
,
432 &irq_node_proc_fops
, irqp
);
433 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
434 proc_create_data("effective_affinity", 0444, desc
->dir
,
435 &irq_effective_aff_proc_fops
, irqp
);
436 proc_create_data("effective_affinity_list", 0444, desc
->dir
,
437 &irq_effective_aff_list_proc_fops
, irqp
);
440 proc_create_data("spurious", 0444, desc
->dir
,
441 &irq_spurious_proc_fops
, (void *)(long)irq
);
444 mutex_unlock(®ister_lock
);
447 void unregister_irq_proc(unsigned int irq
, struct irq_desc
*desc
)
449 char name
[MAX_NAMELEN
];
451 if (!root_irq_dir
|| !desc
->dir
)
454 remove_proc_entry("smp_affinity", desc
->dir
);
455 remove_proc_entry("affinity_hint", desc
->dir
);
456 remove_proc_entry("smp_affinity_list", desc
->dir
);
457 remove_proc_entry("node", desc
->dir
);
458 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
459 remove_proc_entry("effective_affinity", desc
->dir
);
460 remove_proc_entry("effective_affinity_list", desc
->dir
);
463 remove_proc_entry("spurious", desc
->dir
);
465 sprintf(name
, "%u", irq
);
466 remove_proc_entry(name
, root_irq_dir
);
471 void unregister_handler_proc(unsigned int irq
, struct irqaction
*action
)
473 proc_remove(action
->dir
);
476 static void register_default_affinity_proc(void)
479 proc_create("irq/default_smp_affinity", 0644, NULL
,
480 &default_affinity_proc_fops
);
484 void init_irq_proc(void)
487 struct irq_desc
*desc
;
489 /* create /proc/irq */
490 root_irq_dir
= proc_mkdir("irq", NULL
);
494 register_default_affinity_proc();
497 * Create entries for all existing IRQs.
499 for_each_irq_desc(irq
, desc
)
500 register_irq_proc(irq
, desc
);
503 #ifdef CONFIG_GENERIC_IRQ_SHOW
505 int __weak
arch_show_interrupts(struct seq_file
*p
, int prec
)
510 #ifndef ACTUAL_NR_IRQS
511 # define ACTUAL_NR_IRQS nr_irqs
514 int show_interrupts(struct seq_file
*p
, void *v
)
518 unsigned long flags
, any_count
= 0;
519 int i
= *(loff_t
*) v
, j
;
520 struct irqaction
*action
;
521 struct irq_desc
*desc
;
523 if (i
> ACTUAL_NR_IRQS
)
526 if (i
== ACTUAL_NR_IRQS
)
527 return arch_show_interrupts(p
, prec
);
529 /* print header and calculate the width of the first column */
531 for (prec
= 3, j
= 1000; prec
< 10 && j
<= nr_irqs
; ++prec
)
534 seq_printf(p
, "%*s", prec
+ 8, "");
535 for_each_online_cpu(j
)
536 seq_printf(p
, "CPU%-8d", j
);
541 desc
= irq_to_desc(i
);
545 raw_spin_lock_irqsave(&desc
->lock
, flags
);
546 for_each_online_cpu(j
)
547 any_count
|= kstat_irqs_cpu(i
, j
);
548 action
= desc
->action
;
549 if ((!action
|| irq_desc_is_chained(desc
)) && !any_count
)
552 seq_printf(p
, "%*d: ", prec
, i
);
553 for_each_online_cpu(j
)
554 seq_printf(p
, "%10u ", kstat_irqs_cpu(i
, j
));
556 if (desc
->irq_data
.chip
) {
557 if (desc
->irq_data
.chip
->irq_print_chip
)
558 desc
->irq_data
.chip
->irq_print_chip(&desc
->irq_data
, p
);
559 else if (desc
->irq_data
.chip
->name
)
560 seq_printf(p
, " %8s", desc
->irq_data
.chip
->name
);
562 seq_printf(p
, " %8s", "-");
564 seq_printf(p
, " %8s", "None");
566 if (desc
->irq_data
.domain
)
567 seq_printf(p
, " %*d", prec
, (int) desc
->irq_data
.hwirq
);
569 seq_printf(p
, " %*s", prec
, "");
570 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
571 seq_printf(p
, " %-8s", irqd_is_level_type(&desc
->irq_data
) ? "Level" : "Edge");
574 seq_printf(p
, "-%-8s", desc
->name
);
577 seq_printf(p
, " %s", action
->name
);
578 while ((action
= action
->next
) != NULL
)
579 seq_printf(p
, ", %s", action
->name
);
584 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);