Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / kernel / irq / proc.c
blob98138788cb04df4b1a6956f6c49bb131a7c94388
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 * This file contains the /proc/irq/ handling code.
6 */
8 #include <linux/irq.h>
9 #include <linux/gfp.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/mutex.h>
16 #include "internals.h"
19 * Access rules:
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
35 static struct proc_dir_entry *root_irq_dir;
37 #ifdef CONFIG_SMP
39 enum {
40 AFFINITY,
41 AFFINITY_LIST,
42 EFFECTIVE,
43 EFFECTIVE_LIST,
46 static int show_irq_affinity(int type, struct seq_file *m)
48 struct irq_desc *desc = irq_to_desc((long)m->private);
49 const struct cpumask *mask;
51 switch (type) {
52 case AFFINITY:
53 case AFFINITY_LIST:
54 mask = desc->irq_common_data.affinity;
55 #ifdef CONFIG_GENERIC_PENDING_IRQ
56 if (irqd_is_setaffinity_pending(&desc->irq_data))
57 mask = desc->pending_mask;
58 #endif
59 break;
60 case EFFECTIVE:
61 case EFFECTIVE_LIST:
62 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
64 break;
65 #endif
66 default:
67 return -EINVAL;
70 switch (type) {
71 case AFFINITY_LIST:
72 case EFFECTIVE_LIST:
73 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
74 break;
75 case AFFINITY:
76 case EFFECTIVE:
77 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
78 break;
80 return 0;
83 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
85 struct irq_desc *desc = irq_to_desc((long)m->private);
86 unsigned long flags;
87 cpumask_var_t mask;
89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
90 return -ENOMEM;
92 raw_spin_lock_irqsave(&desc->lock, flags);
93 if (desc->affinity_hint)
94 cpumask_copy(mask, desc->affinity_hint);
95 raw_spin_unlock_irqrestore(&desc->lock, flags);
97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
98 free_cpumask_var(mask);
100 return 0;
103 int no_irq_affinity;
104 static int irq_affinity_proc_show(struct seq_file *m, void *v)
106 return show_irq_affinity(AFFINITY, m);
109 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
111 return show_irq_affinity(AFFINITY_LIST, m);
114 #ifndef CONFIG_AUTO_IRQ_AFFINITY
115 static inline int irq_select_affinity_usr(unsigned int irq)
118 * If the interrupt is started up already then this fails. The
119 * interrupt is assigned to an online CPU already. There is no
120 * point to move it around randomly. Tell user space that the
121 * selected mask is bogus.
123 * If not then any change to the affinity is pointless because the
124 * startup code invokes irq_setup_affinity() which will select
125 * a online CPU anyway.
127 return -EINVAL;
129 #else
130 /* ALPHA magic affinity auto selector. Keep it for historical reasons. */
131 static inline int irq_select_affinity_usr(unsigned int irq)
133 return irq_select_affinity(irq);
135 #endif
137 static ssize_t write_irq_affinity(int type, struct file *file,
138 const char __user *buffer, size_t count, loff_t *pos)
140 unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
141 cpumask_var_t new_value;
142 int err;
144 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
145 return -EIO;
147 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
148 return -ENOMEM;
150 if (type)
151 err = cpumask_parselist_user(buffer, count, new_value);
152 else
153 err = cpumask_parse_user(buffer, count, new_value);
154 if (err)
155 goto free_cpumask;
158 * Do not allow disabling IRQs completely - it's a too easy
159 * way to make the system unusable accidentally :-) At least
160 * one online CPU still has to be targeted.
162 if (!cpumask_intersects(new_value, cpu_online_mask)) {
164 * Special case for empty set - allow the architecture code
165 * to set default SMP affinity.
167 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
168 } else {
169 err = irq_set_affinity(irq, new_value);
170 if (!err)
171 err = count;
174 free_cpumask:
175 free_cpumask_var(new_value);
176 return err;
179 static ssize_t irq_affinity_proc_write(struct file *file,
180 const char __user *buffer, size_t count, loff_t *pos)
182 return write_irq_affinity(0, file, buffer, count, pos);
185 static ssize_t irq_affinity_list_proc_write(struct file *file,
186 const char __user *buffer, size_t count, loff_t *pos)
188 return write_irq_affinity(1, file, buffer, count, pos);
191 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
193 return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
196 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
198 return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
201 static const struct proc_ops irq_affinity_proc_ops = {
202 .proc_open = irq_affinity_proc_open,
203 .proc_read = seq_read,
204 .proc_lseek = seq_lseek,
205 .proc_release = single_release,
206 .proc_write = irq_affinity_proc_write,
209 static const struct proc_ops irq_affinity_list_proc_ops = {
210 .proc_open = irq_affinity_list_proc_open,
211 .proc_read = seq_read,
212 .proc_lseek = seq_lseek,
213 .proc_release = single_release,
214 .proc_write = irq_affinity_list_proc_write,
217 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
218 static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
220 return show_irq_affinity(EFFECTIVE, m);
223 static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
225 return show_irq_affinity(EFFECTIVE_LIST, m);
227 #endif
229 static int default_affinity_show(struct seq_file *m, void *v)
231 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
232 return 0;
235 static ssize_t default_affinity_write(struct file *file,
236 const char __user *buffer, size_t count, loff_t *ppos)
238 cpumask_var_t new_value;
239 int err;
241 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
242 return -ENOMEM;
244 err = cpumask_parse_user(buffer, count, new_value);
245 if (err)
246 goto out;
249 * Do not allow disabling IRQs completely - it's a too easy
250 * way to make the system unusable accidentally :-) At least
251 * one online CPU still has to be targeted.
253 if (!cpumask_intersects(new_value, cpu_online_mask)) {
254 err = -EINVAL;
255 goto out;
258 cpumask_copy(irq_default_affinity, new_value);
259 err = count;
261 out:
262 free_cpumask_var(new_value);
263 return err;
266 static int default_affinity_open(struct inode *inode, struct file *file)
268 return single_open(file, default_affinity_show, PDE_DATA(inode));
271 static const struct proc_ops default_affinity_proc_ops = {
272 .proc_open = default_affinity_open,
273 .proc_read = seq_read,
274 .proc_lseek = seq_lseek,
275 .proc_release = single_release,
276 .proc_write = default_affinity_write,
279 static int irq_node_proc_show(struct seq_file *m, void *v)
281 struct irq_desc *desc = irq_to_desc((long) m->private);
283 seq_printf(m, "%d\n", irq_desc_get_node(desc));
284 return 0;
286 #endif
288 static int irq_spurious_proc_show(struct seq_file *m, void *v)
290 struct irq_desc *desc = irq_to_desc((long) m->private);
292 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
293 desc->irq_count, desc->irqs_unhandled,
294 jiffies_to_msecs(desc->last_unhandled));
295 return 0;
298 #define MAX_NAMELEN 128
300 static int name_unique(unsigned int irq, struct irqaction *new_action)
302 struct irq_desc *desc = irq_to_desc(irq);
303 struct irqaction *action;
304 unsigned long flags;
305 int ret = 1;
307 raw_spin_lock_irqsave(&desc->lock, flags);
308 for_each_action_of_desc(desc, action) {
309 if ((action != new_action) && action->name &&
310 !strcmp(new_action->name, action->name)) {
311 ret = 0;
312 break;
315 raw_spin_unlock_irqrestore(&desc->lock, flags);
316 return ret;
319 void register_handler_proc(unsigned int irq, struct irqaction *action)
321 char name [MAX_NAMELEN];
322 struct irq_desc *desc = irq_to_desc(irq);
324 if (!desc->dir || action->dir || !action->name ||
325 !name_unique(irq, action))
326 return;
328 snprintf(name, MAX_NAMELEN, "%s", action->name);
330 /* create /proc/irq/1234/handler/ */
331 action->dir = proc_mkdir(name, desc->dir);
334 #undef MAX_NAMELEN
336 #define MAX_NAMELEN 10
338 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
340 static DEFINE_MUTEX(register_lock);
341 void __maybe_unused *irqp = (void *)(unsigned long) irq;
342 char name [MAX_NAMELEN];
344 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
345 return;
348 * irq directories are registered only when a handler is
349 * added, not when the descriptor is created, so multiple
350 * tasks might try to register at the same time.
352 mutex_lock(&register_lock);
354 if (desc->dir)
355 goto out_unlock;
357 sprintf(name, "%d", irq);
359 /* create /proc/irq/1234 */
360 desc->dir = proc_mkdir(name, root_irq_dir);
361 if (!desc->dir)
362 goto out_unlock;
364 #ifdef CONFIG_SMP
365 /* create /proc/irq/<irq>/smp_affinity */
366 proc_create_data("smp_affinity", 0644, desc->dir,
367 &irq_affinity_proc_ops, irqp);
369 /* create /proc/irq/<irq>/affinity_hint */
370 proc_create_single_data("affinity_hint", 0444, desc->dir,
371 irq_affinity_hint_proc_show, irqp);
373 /* create /proc/irq/<irq>/smp_affinity_list */
374 proc_create_data("smp_affinity_list", 0644, desc->dir,
375 &irq_affinity_list_proc_ops, irqp);
377 proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
378 irqp);
379 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
380 proc_create_single_data("effective_affinity", 0444, desc->dir,
381 irq_effective_aff_proc_show, irqp);
382 proc_create_single_data("effective_affinity_list", 0444, desc->dir,
383 irq_effective_aff_list_proc_show, irqp);
384 # endif
385 #endif
386 proc_create_single_data("spurious", 0444, desc->dir,
387 irq_spurious_proc_show, (void *)(long)irq);
389 out_unlock:
390 mutex_unlock(&register_lock);
393 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
395 char name [MAX_NAMELEN];
397 if (!root_irq_dir || !desc->dir)
398 return;
399 #ifdef CONFIG_SMP
400 remove_proc_entry("smp_affinity", desc->dir);
401 remove_proc_entry("affinity_hint", desc->dir);
402 remove_proc_entry("smp_affinity_list", desc->dir);
403 remove_proc_entry("node", desc->dir);
404 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
405 remove_proc_entry("effective_affinity", desc->dir);
406 remove_proc_entry("effective_affinity_list", desc->dir);
407 # endif
408 #endif
409 remove_proc_entry("spurious", desc->dir);
411 sprintf(name, "%u", irq);
412 remove_proc_entry(name, root_irq_dir);
415 #undef MAX_NAMELEN
417 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
419 proc_remove(action->dir);
422 static void register_default_affinity_proc(void)
424 #ifdef CONFIG_SMP
425 proc_create("irq/default_smp_affinity", 0644, NULL,
426 &default_affinity_proc_ops);
427 #endif
430 void init_irq_proc(void)
432 unsigned int irq;
433 struct irq_desc *desc;
435 /* create /proc/irq */
436 root_irq_dir = proc_mkdir("irq", NULL);
437 if (!root_irq_dir)
438 return;
440 register_default_affinity_proc();
443 * Create entries for all existing IRQs.
445 for_each_irq_desc(irq, desc)
446 register_irq_proc(irq, desc);
449 #ifdef CONFIG_GENERIC_IRQ_SHOW
451 int __weak arch_show_interrupts(struct seq_file *p, int prec)
453 return 0;
456 #ifndef ACTUAL_NR_IRQS
457 # define ACTUAL_NR_IRQS nr_irqs
458 #endif
460 int show_interrupts(struct seq_file *p, void *v)
462 static int prec;
464 unsigned long flags, any_count = 0;
465 int i = *(loff_t *) v, j;
466 struct irqaction *action;
467 struct irq_desc *desc;
469 if (i > ACTUAL_NR_IRQS)
470 return 0;
472 if (i == ACTUAL_NR_IRQS)
473 return arch_show_interrupts(p, prec);
475 /* print header and calculate the width of the first column */
476 if (i == 0) {
477 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
478 j *= 10;
480 seq_printf(p, "%*s", prec + 8, "");
481 for_each_online_cpu(j)
482 seq_printf(p, "CPU%-8d", j);
483 seq_putc(p, '\n');
486 rcu_read_lock();
487 desc = irq_to_desc(i);
488 if (!desc || irq_settings_is_hidden(desc))
489 goto outsparse;
491 if (desc->kstat_irqs) {
492 for_each_online_cpu(j)
493 any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j));
496 if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
497 goto outsparse;
499 seq_printf(p, "%*d: ", prec, i);
500 for_each_online_cpu(j)
501 seq_printf(p, "%10u ", desc->kstat_irqs ?
502 *per_cpu_ptr(desc->kstat_irqs, j) : 0);
504 raw_spin_lock_irqsave(&desc->lock, flags);
505 if (desc->irq_data.chip) {
506 if (desc->irq_data.chip->irq_print_chip)
507 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
508 else if (desc->irq_data.chip->name)
509 seq_printf(p, " %8s", desc->irq_data.chip->name);
510 else
511 seq_printf(p, " %8s", "-");
512 } else {
513 seq_printf(p, " %8s", "None");
515 if (desc->irq_data.domain)
516 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
517 else
518 seq_printf(p, " %*s", prec, "");
519 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
520 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
521 #endif
522 if (desc->name)
523 seq_printf(p, "-%-8s", desc->name);
525 action = desc->action;
526 if (action) {
527 seq_printf(p, " %s", action->name);
528 while ((action = action->next) != NULL)
529 seq_printf(p, ", %s", action->name);
532 seq_putc(p, '\n');
533 raw_spin_unlock_irqrestore(&desc->lock, flags);
534 outsparse:
535 rcu_read_unlock();
536 return 0;
538 #endif