drm/modes: Fix drm_mode_vrefres() docs
[drm/drm-misc.git] / kernel / irq / proc.c
blob8e29809de38de5cb2fc66f51b821234c4c90dd90
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 * This file contains the /proc/irq/ handling code.
6 */
8 #include <linux/irq.h>
9 #include <linux/gfp.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/mutex.h>
16 #include "internals.h"
19 * Access rules:
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
35 static struct proc_dir_entry *root_irq_dir;
37 #ifdef CONFIG_SMP
39 enum {
40 AFFINITY,
41 AFFINITY_LIST,
42 EFFECTIVE,
43 EFFECTIVE_LIST,
46 static int show_irq_affinity(int type, struct seq_file *m)
48 struct irq_desc *desc = irq_to_desc((long)m->private);
49 const struct cpumask *mask;
51 switch (type) {
52 case AFFINITY:
53 case AFFINITY_LIST:
54 mask = desc->irq_common_data.affinity;
55 if (irq_move_pending(&desc->irq_data))
56 mask = irq_desc_get_pending_mask(desc);
57 break;
58 case EFFECTIVE:
59 case EFFECTIVE_LIST:
60 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
61 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
62 break;
63 #endif
64 default:
65 return -EINVAL;
68 switch (type) {
69 case AFFINITY_LIST:
70 case EFFECTIVE_LIST:
71 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
72 break;
73 case AFFINITY:
74 case EFFECTIVE:
75 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
76 break;
78 return 0;
81 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
83 struct irq_desc *desc = irq_to_desc((long)m->private);
84 unsigned long flags;
85 cpumask_var_t mask;
87 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
88 return -ENOMEM;
90 raw_spin_lock_irqsave(&desc->lock, flags);
91 if (desc->affinity_hint)
92 cpumask_copy(mask, desc->affinity_hint);
93 raw_spin_unlock_irqrestore(&desc->lock, flags);
95 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
96 free_cpumask_var(mask);
98 return 0;
101 int no_irq_affinity;
102 static int irq_affinity_proc_show(struct seq_file *m, void *v)
104 return show_irq_affinity(AFFINITY, m);
107 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
109 return show_irq_affinity(AFFINITY_LIST, m);
112 #ifndef CONFIG_AUTO_IRQ_AFFINITY
113 static inline int irq_select_affinity_usr(unsigned int irq)
116 * If the interrupt is started up already then this fails. The
117 * interrupt is assigned to an online CPU already. There is no
118 * point to move it around randomly. Tell user space that the
119 * selected mask is bogus.
121 * If not then any change to the affinity is pointless because the
122 * startup code invokes irq_setup_affinity() which will select
123 * a online CPU anyway.
125 return -EINVAL;
127 #else
128 /* ALPHA magic affinity auto selector. Keep it for historical reasons. */
129 static inline int irq_select_affinity_usr(unsigned int irq)
131 return irq_select_affinity(irq);
133 #endif
135 static ssize_t write_irq_affinity(int type, struct file *file,
136 const char __user *buffer, size_t count, loff_t *pos)
138 unsigned int irq = (int)(long)pde_data(file_inode(file));
139 cpumask_var_t new_value;
140 int err;
142 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
143 return -EPERM;
145 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
146 return -ENOMEM;
148 if (type)
149 err = cpumask_parselist_user(buffer, count, new_value);
150 else
151 err = cpumask_parse_user(buffer, count, new_value);
152 if (err)
153 goto free_cpumask;
156 * Do not allow disabling IRQs completely - it's a too easy
157 * way to make the system unusable accidentally :-) At least
158 * one online CPU still has to be targeted.
160 if (!cpumask_intersects(new_value, cpu_online_mask)) {
162 * Special case for empty set - allow the architecture code
163 * to set default SMP affinity.
165 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
166 } else {
167 err = irq_set_affinity(irq, new_value);
168 if (!err)
169 err = count;
172 free_cpumask:
173 free_cpumask_var(new_value);
174 return err;
177 static ssize_t irq_affinity_proc_write(struct file *file,
178 const char __user *buffer, size_t count, loff_t *pos)
180 return write_irq_affinity(0, file, buffer, count, pos);
183 static ssize_t irq_affinity_list_proc_write(struct file *file,
184 const char __user *buffer, size_t count, loff_t *pos)
186 return write_irq_affinity(1, file, buffer, count, pos);
189 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
191 return single_open(file, irq_affinity_proc_show, pde_data(inode));
194 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
196 return single_open(file, irq_affinity_list_proc_show, pde_data(inode));
199 static const struct proc_ops irq_affinity_proc_ops = {
200 .proc_open = irq_affinity_proc_open,
201 .proc_read = seq_read,
202 .proc_lseek = seq_lseek,
203 .proc_release = single_release,
204 .proc_write = irq_affinity_proc_write,
207 static const struct proc_ops irq_affinity_list_proc_ops = {
208 .proc_open = irq_affinity_list_proc_open,
209 .proc_read = seq_read,
210 .proc_lseek = seq_lseek,
211 .proc_release = single_release,
212 .proc_write = irq_affinity_list_proc_write,
215 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
216 static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
218 return show_irq_affinity(EFFECTIVE, m);
221 static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
223 return show_irq_affinity(EFFECTIVE_LIST, m);
225 #endif
227 static int default_affinity_show(struct seq_file *m, void *v)
229 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
230 return 0;
233 static ssize_t default_affinity_write(struct file *file,
234 const char __user *buffer, size_t count, loff_t *ppos)
236 cpumask_var_t new_value;
237 int err;
239 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
240 return -ENOMEM;
242 err = cpumask_parse_user(buffer, count, new_value);
243 if (err)
244 goto out;
247 * Do not allow disabling IRQs completely - it's a too easy
248 * way to make the system unusable accidentally :-) At least
249 * one online CPU still has to be targeted.
251 if (!cpumask_intersects(new_value, cpu_online_mask)) {
252 err = -EINVAL;
253 goto out;
256 cpumask_copy(irq_default_affinity, new_value);
257 err = count;
259 out:
260 free_cpumask_var(new_value);
261 return err;
264 static int default_affinity_open(struct inode *inode, struct file *file)
266 return single_open(file, default_affinity_show, pde_data(inode));
269 static const struct proc_ops default_affinity_proc_ops = {
270 .proc_open = default_affinity_open,
271 .proc_read = seq_read,
272 .proc_lseek = seq_lseek,
273 .proc_release = single_release,
274 .proc_write = default_affinity_write,
277 static int irq_node_proc_show(struct seq_file *m, void *v)
279 struct irq_desc *desc = irq_to_desc((long) m->private);
281 seq_printf(m, "%d\n", irq_desc_get_node(desc));
282 return 0;
284 #endif
286 static int irq_spurious_proc_show(struct seq_file *m, void *v)
288 struct irq_desc *desc = irq_to_desc((long) m->private);
290 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
291 desc->irq_count, desc->irqs_unhandled,
292 jiffies_to_msecs(desc->last_unhandled));
293 return 0;
296 #define MAX_NAMELEN 128
298 static int name_unique(unsigned int irq, struct irqaction *new_action)
300 struct irq_desc *desc = irq_to_desc(irq);
301 struct irqaction *action;
302 unsigned long flags;
303 int ret = 1;
305 raw_spin_lock_irqsave(&desc->lock, flags);
306 for_each_action_of_desc(desc, action) {
307 if ((action != new_action) && action->name &&
308 !strcmp(new_action->name, action->name)) {
309 ret = 0;
310 break;
313 raw_spin_unlock_irqrestore(&desc->lock, flags);
314 return ret;
317 void register_handler_proc(unsigned int irq, struct irqaction *action)
319 char name [MAX_NAMELEN];
320 struct irq_desc *desc = irq_to_desc(irq);
322 if (!desc->dir || action->dir || !action->name ||
323 !name_unique(irq, action))
324 return;
326 snprintf(name, MAX_NAMELEN, "%s", action->name);
328 /* create /proc/irq/1234/handler/ */
329 action->dir = proc_mkdir(name, desc->dir);
332 #undef MAX_NAMELEN
334 #define MAX_NAMELEN 10
336 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
338 static DEFINE_MUTEX(register_lock);
339 void __maybe_unused *irqp = (void *)(unsigned long) irq;
340 char name [MAX_NAMELEN];
342 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
343 return;
346 * irq directories are registered only when a handler is
347 * added, not when the descriptor is created, so multiple
348 * tasks might try to register at the same time.
350 mutex_lock(&register_lock);
352 if (desc->dir)
353 goto out_unlock;
355 sprintf(name, "%d", irq);
357 /* create /proc/irq/1234 */
358 desc->dir = proc_mkdir(name, root_irq_dir);
359 if (!desc->dir)
360 goto out_unlock;
362 #ifdef CONFIG_SMP
363 umode_t umode = S_IRUGO;
365 if (irq_can_set_affinity_usr(desc->irq_data.irq))
366 umode |= S_IWUSR;
368 /* create /proc/irq/<irq>/smp_affinity */
369 proc_create_data("smp_affinity", umode, desc->dir,
370 &irq_affinity_proc_ops, irqp);
372 /* create /proc/irq/<irq>/affinity_hint */
373 proc_create_single_data("affinity_hint", 0444, desc->dir,
374 irq_affinity_hint_proc_show, irqp);
376 /* create /proc/irq/<irq>/smp_affinity_list */
377 proc_create_data("smp_affinity_list", umode, desc->dir,
378 &irq_affinity_list_proc_ops, irqp);
380 proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
381 irqp);
382 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
383 proc_create_single_data("effective_affinity", 0444, desc->dir,
384 irq_effective_aff_proc_show, irqp);
385 proc_create_single_data("effective_affinity_list", 0444, desc->dir,
386 irq_effective_aff_list_proc_show, irqp);
387 # endif
388 #endif
389 proc_create_single_data("spurious", 0444, desc->dir,
390 irq_spurious_proc_show, (void *)(long)irq);
392 out_unlock:
393 mutex_unlock(&register_lock);
396 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
398 char name [MAX_NAMELEN];
400 if (!root_irq_dir || !desc->dir)
401 return;
402 #ifdef CONFIG_SMP
403 remove_proc_entry("smp_affinity", desc->dir);
404 remove_proc_entry("affinity_hint", desc->dir);
405 remove_proc_entry("smp_affinity_list", desc->dir);
406 remove_proc_entry("node", desc->dir);
407 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
408 remove_proc_entry("effective_affinity", desc->dir);
409 remove_proc_entry("effective_affinity_list", desc->dir);
410 # endif
411 #endif
412 remove_proc_entry("spurious", desc->dir);
414 sprintf(name, "%u", irq);
415 remove_proc_entry(name, root_irq_dir);
418 #undef MAX_NAMELEN
420 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
422 proc_remove(action->dir);
425 static void register_default_affinity_proc(void)
427 #ifdef CONFIG_SMP
428 proc_create("irq/default_smp_affinity", 0644, NULL,
429 &default_affinity_proc_ops);
430 #endif
433 void init_irq_proc(void)
435 unsigned int irq;
436 struct irq_desc *desc;
438 /* create /proc/irq */
439 root_irq_dir = proc_mkdir("irq", NULL);
440 if (!root_irq_dir)
441 return;
443 register_default_affinity_proc();
446 * Create entries for all existing IRQs.
448 for_each_irq_desc(irq, desc)
449 register_irq_proc(irq, desc);
452 #ifdef CONFIG_GENERIC_IRQ_SHOW
454 int __weak arch_show_interrupts(struct seq_file *p, int prec)
456 return 0;
459 #ifndef ACTUAL_NR_IRQS
460 # define ACTUAL_NR_IRQS irq_get_nr_irqs()
461 #endif
463 int show_interrupts(struct seq_file *p, void *v)
465 const unsigned int nr_irqs = irq_get_nr_irqs();
466 static int prec;
468 int i = *(loff_t *) v, j;
469 struct irqaction *action;
470 struct irq_desc *desc;
471 unsigned long flags;
473 if (i > ACTUAL_NR_IRQS)
474 return 0;
476 if (i == ACTUAL_NR_IRQS)
477 return arch_show_interrupts(p, prec);
479 /* print header and calculate the width of the first column */
480 if (i == 0) {
481 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
482 j *= 10;
484 seq_printf(p, "%*s", prec + 8, "");
485 for_each_online_cpu(j)
486 seq_printf(p, "CPU%-8d", j);
487 seq_putc(p, '\n');
490 rcu_read_lock();
491 desc = irq_to_desc(i);
492 if (!desc || irq_settings_is_hidden(desc))
493 goto outsparse;
495 if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs)
496 goto outsparse;
498 seq_printf(p, "%*d:", prec, i);
499 for_each_online_cpu(j) {
500 unsigned int cnt = desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0;
502 seq_put_decimal_ull_width(p, " ", cnt, 10);
504 seq_putc(p, ' ');
506 raw_spin_lock_irqsave(&desc->lock, flags);
507 if (desc->irq_data.chip) {
508 if (desc->irq_data.chip->irq_print_chip)
509 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
510 else if (desc->irq_data.chip->name)
511 seq_printf(p, "%8s", desc->irq_data.chip->name);
512 else
513 seq_printf(p, "%8s", "-");
514 } else {
515 seq_printf(p, "%8s", "None");
517 if (desc->irq_data.domain)
518 seq_printf(p, " %*lu", prec, desc->irq_data.hwirq);
519 else
520 seq_printf(p, " %*s", prec, "");
521 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
522 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
523 #endif
524 if (desc->name)
525 seq_printf(p, "-%-8s", desc->name);
527 action = desc->action;
528 if (action) {
529 seq_printf(p, " %s", action->name);
530 while ((action = action->next) != NULL)
531 seq_printf(p, ", %s", action->name);
534 seq_putc(p, '\n');
535 raw_spin_unlock_irqrestore(&desc->lock, flags);
536 outsparse:
537 rcu_read_unlock();
538 return 0;
540 #endif