2 * linux/kernel/irq/proc.c
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
6 * This file contains the /proc/irq/ handling code.
10 #include <linux/gfp.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
16 #include "internals.h"
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
35 static struct proc_dir_entry
*root_irq_dir
;
39 static int show_irq_affinity(int type
, struct seq_file
*m
, void *v
)
41 struct irq_desc
*desc
= irq_to_desc((long)m
->private);
42 const struct cpumask
*mask
= desc
->irq_data
.affinity
;
44 #ifdef CONFIG_GENERIC_PENDING_IRQ
45 if (irqd_is_setaffinity_pending(&desc
->irq_data
))
46 mask
= desc
->pending_mask
;
49 seq_cpumask_list(m
, mask
);
56 static int irq_affinity_hint_proc_show(struct seq_file
*m
, void *v
)
58 struct irq_desc
*desc
= irq_to_desc((long)m
->private);
62 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
65 raw_spin_lock_irqsave(&desc
->lock
, flags
);
66 if (desc
->affinity_hint
)
67 cpumask_copy(mask
, desc
->affinity_hint
);
68 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
72 free_cpumask_var(mask
);
77 #ifndef is_affinity_mask_valid
78 #define is_affinity_mask_valid(val) 1
82 static int irq_affinity_proc_show(struct seq_file
*m
, void *v
)
84 return show_irq_affinity(0, m
, v
);
87 static int irq_affinity_list_proc_show(struct seq_file
*m
, void *v
)
89 return show_irq_affinity(1, m
, v
);
93 static ssize_t
write_irq_affinity(int type
, struct file
*file
,
94 const char __user
*buffer
, size_t count
, loff_t
*pos
)
96 unsigned int irq
= (int)(long)PDE_DATA(file_inode(file
));
97 cpumask_var_t new_value
;
100 if (!irq_can_set_affinity(irq
) || no_irq_affinity
)
103 if (!alloc_cpumask_var(&new_value
, GFP_KERNEL
))
107 err
= cpumask_parselist_user(buffer
, count
, new_value
);
109 err
= cpumask_parse_user(buffer
, count
, new_value
);
113 if (!is_affinity_mask_valid(new_value
)) {
119 * Do not allow disabling IRQs completely - it's a too easy
120 * way to make the system unusable accidentally :-) At least
121 * one online CPU still has to be targeted.
123 if (!cpumask_intersects(new_value
, cpu_online_mask
)) {
124 /* Special case for empty set - allow the architecture
125 code to set default SMP affinity. */
126 err
= irq_select_affinity_usr(irq
, new_value
) ? -EINVAL
: count
;
128 irq_set_affinity(irq
, new_value
);
133 free_cpumask_var(new_value
);
137 static ssize_t
irq_affinity_proc_write(struct file
*file
,
138 const char __user
*buffer
, size_t count
, loff_t
*pos
)
140 return write_irq_affinity(0, file
, buffer
, count
, pos
);
143 static ssize_t
irq_affinity_list_proc_write(struct file
*file
,
144 const char __user
*buffer
, size_t count
, loff_t
*pos
)
146 return write_irq_affinity(1, file
, buffer
, count
, pos
);
149 static int irq_affinity_proc_open(struct inode
*inode
, struct file
*file
)
151 return single_open(file
, irq_affinity_proc_show
, PDE_DATA(inode
));
154 static int irq_affinity_list_proc_open(struct inode
*inode
, struct file
*file
)
156 return single_open(file
, irq_affinity_list_proc_show
, PDE_DATA(inode
));
159 static int irq_affinity_hint_proc_open(struct inode
*inode
, struct file
*file
)
161 return single_open(file
, irq_affinity_hint_proc_show
, PDE_DATA(inode
));
164 static const struct file_operations irq_affinity_proc_fops
= {
165 .open
= irq_affinity_proc_open
,
168 .release
= single_release
,
169 .write
= irq_affinity_proc_write
,
172 static const struct file_operations irq_affinity_hint_proc_fops
= {
173 .open
= irq_affinity_hint_proc_open
,
176 .release
= single_release
,
179 static const struct file_operations irq_affinity_list_proc_fops
= {
180 .open
= irq_affinity_list_proc_open
,
183 .release
= single_release
,
184 .write
= irq_affinity_list_proc_write
,
187 static int default_affinity_show(struct seq_file
*m
, void *v
)
189 seq_cpumask(m
, irq_default_affinity
);
194 static ssize_t
default_affinity_write(struct file
*file
,
195 const char __user
*buffer
, size_t count
, loff_t
*ppos
)
197 cpumask_var_t new_value
;
200 if (!alloc_cpumask_var(&new_value
, GFP_KERNEL
))
203 err
= cpumask_parse_user(buffer
, count
, new_value
);
207 if (!is_affinity_mask_valid(new_value
)) {
213 * Do not allow disabling IRQs completely - it's a too easy
214 * way to make the system unusable accidentally :-) At least
215 * one online CPU still has to be targeted.
217 if (!cpumask_intersects(new_value
, cpu_online_mask
)) {
222 cpumask_copy(irq_default_affinity
, new_value
);
226 free_cpumask_var(new_value
);
230 static int default_affinity_open(struct inode
*inode
, struct file
*file
)
232 return single_open(file
, default_affinity_show
, PDE_DATA(inode
));
235 static const struct file_operations default_affinity_proc_fops
= {
236 .open
= default_affinity_open
,
239 .release
= single_release
,
240 .write
= default_affinity_write
,
243 static int irq_node_proc_show(struct seq_file
*m
, void *v
)
245 struct irq_desc
*desc
= irq_to_desc((long) m
->private);
247 seq_printf(m
, "%d\n", desc
->irq_data
.node
);
251 static int irq_node_proc_open(struct inode
*inode
, struct file
*file
)
253 return single_open(file
, irq_node_proc_show
, PDE_DATA(inode
));
256 static const struct file_operations irq_node_proc_fops
= {
257 .open
= irq_node_proc_open
,
260 .release
= single_release
,
264 static int irq_spurious_proc_show(struct seq_file
*m
, void *v
)
266 struct irq_desc
*desc
= irq_to_desc((long) m
->private);
268 seq_printf(m
, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
269 desc
->irq_count
, desc
->irqs_unhandled
,
270 jiffies_to_msecs(desc
->last_unhandled
));
274 static int irq_spurious_proc_open(struct inode
*inode
, struct file
*file
)
276 return single_open(file
, irq_spurious_proc_show
, PDE_DATA(inode
));
279 static const struct file_operations irq_spurious_proc_fops
= {
280 .open
= irq_spurious_proc_open
,
283 .release
= single_release
,
286 #define MAX_NAMELEN 128
288 static int name_unique(unsigned int irq
, struct irqaction
*new_action
)
290 struct irq_desc
*desc
= irq_to_desc(irq
);
291 struct irqaction
*action
;
295 raw_spin_lock_irqsave(&desc
->lock
, flags
);
296 for (action
= desc
->action
; action
; action
= action
->next
) {
297 if ((action
!= new_action
) && action
->name
&&
298 !strcmp(new_action
->name
, action
->name
)) {
303 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
307 void register_handler_proc(unsigned int irq
, struct irqaction
*action
)
309 char name
[MAX_NAMELEN
];
310 struct irq_desc
*desc
= irq_to_desc(irq
);
312 if (!desc
->dir
|| action
->dir
|| !action
->name
||
313 !name_unique(irq
, action
))
316 memset(name
, 0, MAX_NAMELEN
);
317 snprintf(name
, MAX_NAMELEN
, "%s", action
->name
);
319 /* create /proc/irq/1234/handler/ */
320 action
->dir
= proc_mkdir(name
, desc
->dir
);
325 #define MAX_NAMELEN 10
327 void register_irq_proc(unsigned int irq
, struct irq_desc
*desc
)
329 char name
[MAX_NAMELEN
];
331 if (!root_irq_dir
|| (desc
->irq_data
.chip
== &no_irq_chip
) || desc
->dir
)
334 memset(name
, 0, MAX_NAMELEN
);
335 sprintf(name
, "%d", irq
);
337 /* create /proc/irq/1234 */
338 desc
->dir
= proc_mkdir(name
, root_irq_dir
);
343 /* create /proc/irq/<irq>/smp_affinity */
344 proc_create_data("smp_affinity", 0600, desc
->dir
,
345 &irq_affinity_proc_fops
, (void *)(long)irq
);
347 /* create /proc/irq/<irq>/affinity_hint */
348 proc_create_data("affinity_hint", 0400, desc
->dir
,
349 &irq_affinity_hint_proc_fops
, (void *)(long)irq
);
351 /* create /proc/irq/<irq>/smp_affinity_list */
352 proc_create_data("smp_affinity_list", 0600, desc
->dir
,
353 &irq_affinity_list_proc_fops
, (void *)(long)irq
);
355 proc_create_data("node", 0444, desc
->dir
,
356 &irq_node_proc_fops
, (void *)(long)irq
);
359 proc_create_data("spurious", 0444, desc
->dir
,
360 &irq_spurious_proc_fops
, (void *)(long)irq
);
363 void unregister_irq_proc(unsigned int irq
, struct irq_desc
*desc
)
365 char name
[MAX_NAMELEN
];
367 if (!root_irq_dir
|| !desc
->dir
)
370 remove_proc_entry("smp_affinity", desc
->dir
);
371 remove_proc_entry("affinity_hint", desc
->dir
);
372 remove_proc_entry("smp_affinity_list", desc
->dir
);
373 remove_proc_entry("node", desc
->dir
);
375 remove_proc_entry("spurious", desc
->dir
);
377 memset(name
, 0, MAX_NAMELEN
);
378 sprintf(name
, "%u", irq
);
379 remove_proc_entry(name
, root_irq_dir
);
384 void unregister_handler_proc(unsigned int irq
, struct irqaction
*action
)
386 proc_remove(action
->dir
);
389 static void register_default_affinity_proc(void)
392 proc_create("irq/default_smp_affinity", 0600, NULL
,
393 &default_affinity_proc_fops
);
397 void init_irq_proc(void)
400 struct irq_desc
*desc
;
402 /* create /proc/irq */
403 root_irq_dir
= proc_mkdir("irq", NULL
);
407 register_default_affinity_proc();
410 * Create entries for all existing IRQs.
412 for_each_irq_desc(irq
, desc
) {
416 register_irq_proc(irq
, desc
);
420 #ifdef CONFIG_GENERIC_IRQ_SHOW
422 int __weak
arch_show_interrupts(struct seq_file
*p
, int prec
)
427 #ifndef ACTUAL_NR_IRQS
428 # define ACTUAL_NR_IRQS nr_irqs
431 int show_interrupts(struct seq_file
*p
, void *v
)
435 unsigned long flags
, any_count
= 0;
436 int i
= *(loff_t
*) v
, j
;
437 struct irqaction
*action
;
438 struct irq_desc
*desc
;
440 if (i
> ACTUAL_NR_IRQS
)
443 if (i
== ACTUAL_NR_IRQS
)
444 return arch_show_interrupts(p
, prec
);
446 /* print header and calculate the width of the first column */
448 for (prec
= 3, j
= 1000; prec
< 10 && j
<= nr_irqs
; ++prec
)
451 seq_printf(p
, "%*s", prec
+ 8, "");
452 for_each_online_cpu(j
)
453 seq_printf(p
, "CPU%-8d", j
);
458 desc
= irq_to_desc(i
);
462 raw_spin_lock_irqsave(&desc
->lock
, flags
);
463 for_each_online_cpu(j
)
464 any_count
|= kstat_irqs_cpu(i
, j
);
465 action
= desc
->action
;
466 if (!action
&& !any_count
)
469 seq_printf(p
, "%*d: ", prec
, i
);
470 for_each_online_cpu(j
)
471 seq_printf(p
, "%10u ", kstat_irqs_cpu(i
, j
));
473 if (desc
->irq_data
.chip
) {
474 if (desc
->irq_data
.chip
->irq_print_chip
)
475 desc
->irq_data
.chip
->irq_print_chip(&desc
->irq_data
, p
);
476 else if (desc
->irq_data
.chip
->name
)
477 seq_printf(p
, " %8s", desc
->irq_data
.chip
->name
);
479 seq_printf(p
, " %8s", "-");
481 seq_printf(p
, " %8s", "None");
483 if (desc
->irq_data
.domain
)
484 seq_printf(p
, " %*d", prec
, (int) desc
->irq_data
.hwirq
);
485 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
486 seq_printf(p
, " %-8s", irqd_is_level_type(&desc
->irq_data
) ? "Level" : "Edge");
489 seq_printf(p
, "-%-8s", desc
->name
);
492 seq_printf(p
, " %s", action
->name
);
493 while ((action
= action
->next
) != NULL
)
494 seq_printf(p
, ", %s", action
->name
);
499 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);