usb: Prevent dead ports when xhci is not enabled
[linux/fpc-iii.git] / kernel / irq / proc.c
blob4bd4faa6323ac14d327addcf1012c9318c51f623
1 /*
2 * linux/kernel/irq/proc.c
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
6 * This file contains the /proc/irq/ handling code.
7 */
9 #include <linux/irq.h>
10 #include <linux/gfp.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
16 #include "internals.h"
18 static struct proc_dir_entry *root_irq_dir;
20 #ifdef CONFIG_SMP
22 static int show_irq_affinity(int type, struct seq_file *m, void *v)
24 struct irq_desc *desc = irq_to_desc((long)m->private);
25 const struct cpumask *mask = desc->irq_data.affinity;
27 #ifdef CONFIG_GENERIC_PENDING_IRQ
28 if (irqd_is_setaffinity_pending(&desc->irq_data))
29 mask = desc->pending_mask;
30 #endif
31 if (type)
32 seq_cpumask_list(m, mask);
33 else
34 seq_cpumask(m, mask);
35 seq_putc(m, '\n');
36 return 0;
39 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
41 struct irq_desc *desc = irq_to_desc((long)m->private);
42 unsigned long flags;
43 cpumask_var_t mask;
45 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
46 return -ENOMEM;
48 raw_spin_lock_irqsave(&desc->lock, flags);
49 if (desc->affinity_hint)
50 cpumask_copy(mask, desc->affinity_hint);
51 raw_spin_unlock_irqrestore(&desc->lock, flags);
53 seq_cpumask(m, mask);
54 seq_putc(m, '\n');
55 free_cpumask_var(mask);
57 return 0;
60 #ifndef is_affinity_mask_valid
61 #define is_affinity_mask_valid(val) 1
62 #endif
64 int no_irq_affinity;
65 static int irq_affinity_proc_show(struct seq_file *m, void *v)
67 return show_irq_affinity(0, m, v);
70 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
72 return show_irq_affinity(1, m, v);
76 static ssize_t write_irq_affinity(int type, struct file *file,
77 const char __user *buffer, size_t count, loff_t *pos)
79 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
80 cpumask_var_t new_value;
81 int err;
83 if (!irq_can_set_affinity(irq) || no_irq_affinity)
84 return -EIO;
86 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
87 return -ENOMEM;
89 if (type)
90 err = cpumask_parselist_user(buffer, count, new_value);
91 else
92 err = cpumask_parse_user(buffer, count, new_value);
93 if (err)
94 goto free_cpumask;
96 if (!is_affinity_mask_valid(new_value)) {
97 err = -EINVAL;
98 goto free_cpumask;
102 * Do not allow disabling IRQs completely - it's a too easy
103 * way to make the system unusable accidentally :-) At least
104 * one online CPU still has to be targeted.
106 if (!cpumask_intersects(new_value, cpu_online_mask)) {
107 /* Special case for empty set - allow the architecture
108 code to set default SMP affinity. */
109 err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
110 } else {
111 irq_set_affinity(irq, new_value);
112 err = count;
115 free_cpumask:
116 free_cpumask_var(new_value);
117 return err;
120 static ssize_t irq_affinity_proc_write(struct file *file,
121 const char __user *buffer, size_t count, loff_t *pos)
123 return write_irq_affinity(0, file, buffer, count, pos);
126 static ssize_t irq_affinity_list_proc_write(struct file *file,
127 const char __user *buffer, size_t count, loff_t *pos)
129 return write_irq_affinity(1, file, buffer, count, pos);
132 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
134 return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
137 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
139 return single_open(file, irq_affinity_list_proc_show, PDE(inode)->data);
142 static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
144 return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data);
147 static const struct file_operations irq_affinity_proc_fops = {
148 .open = irq_affinity_proc_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = single_release,
152 .write = irq_affinity_proc_write,
155 static const struct file_operations irq_affinity_hint_proc_fops = {
156 .open = irq_affinity_hint_proc_open,
157 .read = seq_read,
158 .llseek = seq_lseek,
159 .release = single_release,
162 static const struct file_operations irq_affinity_list_proc_fops = {
163 .open = irq_affinity_list_proc_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167 .write = irq_affinity_list_proc_write,
170 static int default_affinity_show(struct seq_file *m, void *v)
172 seq_cpumask(m, irq_default_affinity);
173 seq_putc(m, '\n');
174 return 0;
177 static ssize_t default_affinity_write(struct file *file,
178 const char __user *buffer, size_t count, loff_t *ppos)
180 cpumask_var_t new_value;
181 int err;
183 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
184 return -ENOMEM;
186 err = cpumask_parse_user(buffer, count, new_value);
187 if (err)
188 goto out;
190 if (!is_affinity_mask_valid(new_value)) {
191 err = -EINVAL;
192 goto out;
196 * Do not allow disabling IRQs completely - it's a too easy
197 * way to make the system unusable accidentally :-) At least
198 * one online CPU still has to be targeted.
200 if (!cpumask_intersects(new_value, cpu_online_mask)) {
201 err = -EINVAL;
202 goto out;
205 cpumask_copy(irq_default_affinity, new_value);
206 err = count;
208 out:
209 free_cpumask_var(new_value);
210 return err;
213 static int default_affinity_open(struct inode *inode, struct file *file)
215 return single_open(file, default_affinity_show, PDE(inode)->data);
218 static const struct file_operations default_affinity_proc_fops = {
219 .open = default_affinity_open,
220 .read = seq_read,
221 .llseek = seq_lseek,
222 .release = single_release,
223 .write = default_affinity_write,
226 static int irq_node_proc_show(struct seq_file *m, void *v)
228 struct irq_desc *desc = irq_to_desc((long) m->private);
230 seq_printf(m, "%d\n", desc->irq_data.node);
231 return 0;
234 static int irq_node_proc_open(struct inode *inode, struct file *file)
236 return single_open(file, irq_node_proc_show, PDE(inode)->data);
239 static const struct file_operations irq_node_proc_fops = {
240 .open = irq_node_proc_open,
241 .read = seq_read,
242 .llseek = seq_lseek,
243 .release = single_release,
245 #endif
247 static int irq_spurious_proc_show(struct seq_file *m, void *v)
249 struct irq_desc *desc = irq_to_desc((long) m->private);
251 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
252 desc->irq_count, desc->irqs_unhandled,
253 jiffies_to_msecs(desc->last_unhandled));
254 return 0;
257 static int irq_spurious_proc_open(struct inode *inode, struct file *file)
259 return single_open(file, irq_spurious_proc_show, PDE(inode)->data);
262 static const struct file_operations irq_spurious_proc_fops = {
263 .open = irq_spurious_proc_open,
264 .read = seq_read,
265 .llseek = seq_lseek,
266 .release = single_release,
269 #define MAX_NAMELEN 128
271 static int name_unique(unsigned int irq, struct irqaction *new_action)
273 struct irq_desc *desc = irq_to_desc(irq);
274 struct irqaction *action;
275 unsigned long flags;
276 int ret = 1;
278 raw_spin_lock_irqsave(&desc->lock, flags);
279 for (action = desc->action ; action; action = action->next) {
280 if ((action != new_action) && action->name &&
281 !strcmp(new_action->name, action->name)) {
282 ret = 0;
283 break;
286 raw_spin_unlock_irqrestore(&desc->lock, flags);
287 return ret;
290 void register_handler_proc(unsigned int irq, struct irqaction *action)
292 char name [MAX_NAMELEN];
293 struct irq_desc *desc = irq_to_desc(irq);
295 if (!desc->dir || action->dir || !action->name ||
296 !name_unique(irq, action))
297 return;
299 memset(name, 0, MAX_NAMELEN);
300 snprintf(name, MAX_NAMELEN, "%s", action->name);
302 /* create /proc/irq/1234/handler/ */
303 action->dir = proc_mkdir(name, desc->dir);
306 #undef MAX_NAMELEN
308 #define MAX_NAMELEN 10
310 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
312 char name [MAX_NAMELEN];
314 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
315 return;
317 memset(name, 0, MAX_NAMELEN);
318 sprintf(name, "%d", irq);
320 /* create /proc/irq/1234 */
321 desc->dir = proc_mkdir(name, root_irq_dir);
322 if (!desc->dir)
323 return;
325 #ifdef CONFIG_SMP
326 /* create /proc/irq/<irq>/smp_affinity */
327 proc_create_data("smp_affinity", 0600, desc->dir,
328 &irq_affinity_proc_fops, (void *)(long)irq);
330 /* create /proc/irq/<irq>/affinity_hint */
331 proc_create_data("affinity_hint", 0400, desc->dir,
332 &irq_affinity_hint_proc_fops, (void *)(long)irq);
334 /* create /proc/irq/<irq>/smp_affinity_list */
335 proc_create_data("smp_affinity_list", 0600, desc->dir,
336 &irq_affinity_list_proc_fops, (void *)(long)irq);
338 proc_create_data("node", 0444, desc->dir,
339 &irq_node_proc_fops, (void *)(long)irq);
340 #endif
342 proc_create_data("spurious", 0444, desc->dir,
343 &irq_spurious_proc_fops, (void *)(long)irq);
346 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
348 char name [MAX_NAMELEN];
350 if (!root_irq_dir || !desc->dir)
351 return;
352 #ifdef CONFIG_SMP
353 remove_proc_entry("smp_affinity", desc->dir);
354 remove_proc_entry("affinity_hint", desc->dir);
355 remove_proc_entry("smp_affinity_list", desc->dir);
356 remove_proc_entry("node", desc->dir);
357 #endif
358 remove_proc_entry("spurious", desc->dir);
360 memset(name, 0, MAX_NAMELEN);
361 sprintf(name, "%u", irq);
362 remove_proc_entry(name, root_irq_dir);
365 #undef MAX_NAMELEN
367 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
369 if (action->dir) {
370 struct irq_desc *desc = irq_to_desc(irq);
372 remove_proc_entry(action->dir->name, desc->dir);
376 static void register_default_affinity_proc(void)
378 #ifdef CONFIG_SMP
379 proc_create("irq/default_smp_affinity", 0600, NULL,
380 &default_affinity_proc_fops);
381 #endif
384 void init_irq_proc(void)
386 unsigned int irq;
387 struct irq_desc *desc;
389 /* create /proc/irq */
390 root_irq_dir = proc_mkdir("irq", NULL);
391 if (!root_irq_dir)
392 return;
394 register_default_affinity_proc();
397 * Create entries for all existing IRQs.
399 for_each_irq_desc(irq, desc) {
400 if (!desc)
401 continue;
403 register_irq_proc(irq, desc);
407 #ifdef CONFIG_GENERIC_IRQ_SHOW
409 int __weak arch_show_interrupts(struct seq_file *p, int prec)
411 return 0;
414 #ifndef ACTUAL_NR_IRQS
415 # define ACTUAL_NR_IRQS nr_irqs
416 #endif
418 int show_interrupts(struct seq_file *p, void *v)
420 static int prec;
422 unsigned long flags, any_count = 0;
423 int i = *(loff_t *) v, j;
424 struct irqaction *action;
425 struct irq_desc *desc;
427 if (i > ACTUAL_NR_IRQS)
428 return 0;
430 if (i == ACTUAL_NR_IRQS)
431 return arch_show_interrupts(p, prec);
433 /* print header and calculate the width of the first column */
434 if (i == 0) {
435 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
436 j *= 10;
438 seq_printf(p, "%*s", prec + 8, "");
439 for_each_online_cpu(j)
440 seq_printf(p, "CPU%-8d", j);
441 seq_putc(p, '\n');
444 desc = irq_to_desc(i);
445 if (!desc)
446 return 0;
448 raw_spin_lock_irqsave(&desc->lock, flags);
449 for_each_online_cpu(j)
450 any_count |= kstat_irqs_cpu(i, j);
451 action = desc->action;
452 if (!action && !any_count)
453 goto out;
455 seq_printf(p, "%*d: ", prec, i);
456 for_each_online_cpu(j)
457 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
459 if (desc->irq_data.chip) {
460 if (desc->irq_data.chip->irq_print_chip)
461 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
462 else if (desc->irq_data.chip->name)
463 seq_printf(p, " %8s", desc->irq_data.chip->name);
464 else
465 seq_printf(p, " %8s", "-");
466 } else {
467 seq_printf(p, " %8s", "None");
469 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
470 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
471 #endif
472 if (desc->name)
473 seq_printf(p, "-%-8s", desc->name);
475 if (action) {
476 seq_printf(p, " %s", action->name);
477 while ((action = action->next) != NULL)
478 seq_printf(p, ", %s", action->name);
481 seq_putc(p, '\n');
482 out:
483 raw_spin_unlock_irqrestore(&desc->lock, flags);
484 return 0;
486 #endif