2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * This file contains the interrupt descriptor management code
7 * Detailed information is available in Documentation/DocBook/genericirq
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 #include <linux/irqdomain.h>
19 #include "internals.h"
22 * lockdep: we want to handle all irq_desc locks as a single lock-class:
24 static struct lock_class_key irq_desc_lock_class
;
26 #if defined(CONFIG_SMP)
27 static void __init
init_irq_default_affinity(void)
29 alloc_cpumask_var(&irq_default_affinity
, GFP_NOWAIT
);
30 cpumask_setall(irq_default_affinity
);
33 static void __init
init_irq_default_affinity(void)
39 static int alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
)
41 if (!zalloc_cpumask_var_node(&desc
->irq_data
.affinity
, gfp
, node
))
44 #ifdef CONFIG_GENERIC_PENDING_IRQ
45 if (!zalloc_cpumask_var_node(&desc
->pending_mask
, gfp
, node
)) {
46 free_cpumask_var(desc
->irq_data
.affinity
);
53 static void desc_smp_init(struct irq_desc
*desc
, int node
)
55 desc
->irq_data
.node
= node
;
56 cpumask_copy(desc
->irq_data
.affinity
, irq_default_affinity
);
57 #ifdef CONFIG_GENERIC_PENDING_IRQ
58 cpumask_clear(desc
->pending_mask
);
64 alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
) { return 0; }
65 static inline void desc_smp_init(struct irq_desc
*desc
, int node
) { }
68 static void desc_set_defaults(unsigned int irq
, struct irq_desc
*desc
, int node
,
73 desc
->irq_data
.common
= &desc
->irq_common_data
;
74 desc
->irq_data
.irq
= irq
;
75 desc
->irq_data
.chip
= &no_irq_chip
;
76 desc
->irq_data
.chip_data
= NULL
;
77 desc
->irq_data
.handler_data
= NULL
;
78 desc
->irq_data
.msi_desc
= NULL
;
79 irq_settings_clr_and_set(desc
, ~0, _IRQ_DEFAULT_INIT_FLAGS
);
80 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
81 desc
->handle_irq
= handle_bad_irq
;
84 desc
->irqs_unhandled
= 0;
87 for_each_possible_cpu(cpu
)
88 *per_cpu_ptr(desc
->kstat_irqs
, cpu
) = 0;
89 desc_smp_init(desc
, node
);
92 int nr_irqs
= NR_IRQS
;
93 EXPORT_SYMBOL_GPL(nr_irqs
);
95 static DEFINE_MUTEX(sparse_irq_lock
);
96 static DECLARE_BITMAP(allocated_irqs
, IRQ_BITMAP_BITS
);
98 #ifdef CONFIG_SPARSE_IRQ
100 static RADIX_TREE(irq_desc_tree
, GFP_KERNEL
);
102 static void irq_insert_desc(unsigned int irq
, struct irq_desc
*desc
)
104 radix_tree_insert(&irq_desc_tree
, irq
, desc
);
107 struct irq_desc
*irq_to_desc(unsigned int irq
)
109 return radix_tree_lookup(&irq_desc_tree
, irq
);
111 EXPORT_SYMBOL(irq_to_desc
);
113 static void delete_irq_desc(unsigned int irq
)
115 radix_tree_delete(&irq_desc_tree
, irq
);
119 static void free_masks(struct irq_desc
*desc
)
121 #ifdef CONFIG_GENERIC_PENDING_IRQ
122 free_cpumask_var(desc
->pending_mask
);
124 free_cpumask_var(desc
->irq_data
.affinity
);
127 static inline void free_masks(struct irq_desc
*desc
) { }
130 void irq_lock_sparse(void)
132 mutex_lock(&sparse_irq_lock
);
135 void irq_unlock_sparse(void)
137 mutex_unlock(&sparse_irq_lock
);
140 static struct irq_desc
*alloc_desc(int irq
, int node
, struct module
*owner
)
142 struct irq_desc
*desc
;
143 gfp_t gfp
= GFP_KERNEL
;
145 desc
= kzalloc_node(sizeof(*desc
), gfp
, node
);
148 /* allocate based on nr_cpu_ids */
149 desc
->kstat_irqs
= alloc_percpu(unsigned int);
150 if (!desc
->kstat_irqs
)
153 if (alloc_masks(desc
, gfp
, node
))
156 raw_spin_lock_init(&desc
->lock
);
157 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
159 desc_set_defaults(irq
, desc
, node
, owner
);
164 free_percpu(desc
->kstat_irqs
);
170 static void free_desc(unsigned int irq
)
172 struct irq_desc
*desc
= irq_to_desc(irq
);
174 unregister_irq_proc(irq
, desc
);
177 * sparse_irq_lock protects also show_interrupts() and
178 * kstat_irq_usr(). Once we deleted the descriptor from the
179 * sparse tree we can free it. Access in proc will fail to
180 * lookup the descriptor.
182 mutex_lock(&sparse_irq_lock
);
183 delete_irq_desc(irq
);
184 mutex_unlock(&sparse_irq_lock
);
187 free_percpu(desc
->kstat_irqs
);
191 static int alloc_descs(unsigned int start
, unsigned int cnt
, int node
,
192 struct module
*owner
)
194 struct irq_desc
*desc
;
197 for (i
= 0; i
< cnt
; i
++) {
198 desc
= alloc_desc(start
+ i
, node
, owner
);
201 mutex_lock(&sparse_irq_lock
);
202 irq_insert_desc(start
+ i
, desc
);
203 mutex_unlock(&sparse_irq_lock
);
208 for (i
--; i
>= 0; i
--)
209 free_desc(start
+ i
);
211 mutex_lock(&sparse_irq_lock
);
212 bitmap_clear(allocated_irqs
, start
, cnt
);
213 mutex_unlock(&sparse_irq_lock
);
217 static int irq_expand_nr_irqs(unsigned int nr
)
219 if (nr
> IRQ_BITMAP_BITS
)
225 int __init
early_irq_init(void)
227 int i
, initcnt
, node
= first_online_node
;
228 struct irq_desc
*desc
;
230 init_irq_default_affinity();
232 /* Let arch update nr_irqs and return the nr of preallocated irqs */
233 initcnt
= arch_probe_nr_irqs();
234 printk(KERN_INFO
"NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS
, nr_irqs
, initcnt
);
236 if (WARN_ON(nr_irqs
> IRQ_BITMAP_BITS
))
237 nr_irqs
= IRQ_BITMAP_BITS
;
239 if (WARN_ON(initcnt
> IRQ_BITMAP_BITS
))
240 initcnt
= IRQ_BITMAP_BITS
;
242 if (initcnt
> nr_irqs
)
245 for (i
= 0; i
< initcnt
; i
++) {
246 desc
= alloc_desc(i
, node
, NULL
);
247 set_bit(i
, allocated_irqs
);
248 irq_insert_desc(i
, desc
);
250 return arch_early_irq_init();
253 #else /* !CONFIG_SPARSE_IRQ */
255 struct irq_desc irq_desc
[NR_IRQS
] __cacheline_aligned_in_smp
= {
256 [0 ... NR_IRQS
-1] = {
257 .handle_irq
= handle_bad_irq
,
259 .lock
= __RAW_SPIN_LOCK_UNLOCKED(irq_desc
->lock
),
263 int __init
early_irq_init(void)
265 int count
, i
, node
= first_online_node
;
266 struct irq_desc
*desc
;
268 init_irq_default_affinity();
270 printk(KERN_INFO
"NR_IRQS:%d\n", NR_IRQS
);
273 count
= ARRAY_SIZE(irq_desc
);
275 for (i
= 0; i
< count
; i
++) {
276 desc
[i
].kstat_irqs
= alloc_percpu(unsigned int);
277 alloc_masks(&desc
[i
], GFP_KERNEL
, node
);
278 raw_spin_lock_init(&desc
[i
].lock
);
279 lockdep_set_class(&desc
[i
].lock
, &irq_desc_lock_class
);
280 desc_set_defaults(i
, &desc
[i
], node
, NULL
);
282 return arch_early_irq_init();
285 struct irq_desc
*irq_to_desc(unsigned int irq
)
287 return (irq
< NR_IRQS
) ? irq_desc
+ irq
: NULL
;
289 EXPORT_SYMBOL(irq_to_desc
);
291 static void free_desc(unsigned int irq
)
293 struct irq_desc
*desc
= irq_to_desc(irq
);
296 raw_spin_lock_irqsave(&desc
->lock
, flags
);
297 desc_set_defaults(irq
, desc
, irq_desc_get_node(desc
), NULL
);
298 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
301 static inline int alloc_descs(unsigned int start
, unsigned int cnt
, int node
,
302 struct module
*owner
)
306 for (i
= 0; i
< cnt
; i
++) {
307 struct irq_desc
*desc
= irq_to_desc(start
+ i
);
314 static int irq_expand_nr_irqs(unsigned int nr
)
319 void irq_mark_irq(unsigned int irq
)
321 mutex_lock(&sparse_irq_lock
);
322 bitmap_set(allocated_irqs
, irq
, 1);
323 mutex_unlock(&sparse_irq_lock
);
326 #ifdef CONFIG_GENERIC_IRQ_LEGACY
327 void irq_init_desc(unsigned int irq
)
333 #endif /* !CONFIG_SPARSE_IRQ */
336 * generic_handle_irq - Invoke the handler for a particular irq
337 * @irq: The irq number to handle
340 int generic_handle_irq(unsigned int irq
)
342 struct irq_desc
*desc
= irq_to_desc(irq
);
346 generic_handle_irq_desc(irq
, desc
);
349 EXPORT_SYMBOL_GPL(generic_handle_irq
);
351 #ifdef CONFIG_HANDLE_DOMAIN_IRQ
353 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
354 * @domain: The domain where to perform the lookup
355 * @hwirq: The HW irq number to convert to a logical one
356 * @lookup: Whether to perform the domain lookup or not
357 * @regs: Register file coming from the low-level handling code
359 * Returns: 0 on success, or -EINVAL if conversion has failed
361 int __handle_domain_irq(struct irq_domain
*domain
, unsigned int hwirq
,
362 bool lookup
, struct pt_regs
*regs
)
364 struct pt_regs
*old_regs
= set_irq_regs(regs
);
365 unsigned int irq
= hwirq
;
370 #ifdef CONFIG_IRQ_DOMAIN
372 irq
= irq_find_mapping(domain
, hwirq
);
376 * Some hardware gives randomly wrong interrupts. Rather
377 * than crashing, do something sensible.
379 if (unlikely(!irq
|| irq
>= nr_irqs
)) {
383 generic_handle_irq(irq
);
387 set_irq_regs(old_regs
);
392 /* Dynamic interrupt handling */
395 * irq_free_descs - free irq descriptors
396 * @from: Start of descriptor range
397 * @cnt: Number of consecutive irqs to free
399 void irq_free_descs(unsigned int from
, unsigned int cnt
)
403 if (from
>= nr_irqs
|| (from
+ cnt
) > nr_irqs
)
406 for (i
= 0; i
< cnt
; i
++)
409 mutex_lock(&sparse_irq_lock
);
410 bitmap_clear(allocated_irqs
, from
, cnt
);
411 mutex_unlock(&sparse_irq_lock
);
413 EXPORT_SYMBOL_GPL(irq_free_descs
);
416 * irq_alloc_descs - allocate and initialize a range of irq descriptors
417 * @irq: Allocate for specific irq number if irq >= 0
418 * @from: Start the search from this irq number
419 * @cnt: Number of consecutive irqs to allocate.
420 * @node: Preferred node on which the irq descriptor should be allocated
421 * @owner: Owning module (can be NULL)
423 * Returns the first irq number or error code
426 __irq_alloc_descs(int irq
, unsigned int from
, unsigned int cnt
, int node
,
427 struct module
*owner
)
440 * For interrupts which are freely allocated the
441 * architecture can force a lower bound to the @from
442 * argument. x86 uses this to exclude the GSI space.
444 from
= arch_dynirq_lower_bound(from
);
447 mutex_lock(&sparse_irq_lock
);
449 start
= bitmap_find_next_zero_area(allocated_irqs
, IRQ_BITMAP_BITS
,
452 if (irq
>=0 && start
!= irq
)
455 if (start
+ cnt
> nr_irqs
) {
456 ret
= irq_expand_nr_irqs(start
+ cnt
);
461 bitmap_set(allocated_irqs
, start
, cnt
);
462 mutex_unlock(&sparse_irq_lock
);
463 return alloc_descs(start
, cnt
, node
, owner
);
466 mutex_unlock(&sparse_irq_lock
);
469 EXPORT_SYMBOL_GPL(__irq_alloc_descs
);
471 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
473 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
474 * @cnt: number of interrupts to allocate
475 * @node: node on which to allocate
477 * Returns an interrupt number > 0 or 0, if the allocation fails.
479 unsigned int irq_alloc_hwirqs(int cnt
, int node
)
481 int i
, irq
= __irq_alloc_descs(-1, 0, cnt
, node
, NULL
);
486 for (i
= irq
; cnt
> 0; i
++, cnt
--) {
487 if (arch_setup_hwirq(i
, node
))
489 irq_clear_status_flags(i
, _IRQ_NOREQUEST
);
494 for (i
--; i
>= irq
; i
--) {
495 irq_set_status_flags(i
, _IRQ_NOREQUEST
| _IRQ_NOPROBE
);
496 arch_teardown_hwirq(i
);
498 irq_free_descs(irq
, cnt
);
501 EXPORT_SYMBOL_GPL(irq_alloc_hwirqs
);
504 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
505 * @from: Free from irq number
506 * @cnt: number of interrupts to free
509 void irq_free_hwirqs(unsigned int from
, int cnt
)
513 for (i
= from
, j
= cnt
; j
> 0; i
++, j
--) {
514 irq_set_status_flags(i
, _IRQ_NOREQUEST
| _IRQ_NOPROBE
);
515 arch_teardown_hwirq(i
);
517 irq_free_descs(from
, cnt
);
519 EXPORT_SYMBOL_GPL(irq_free_hwirqs
);
523 * irq_get_next_irq - get next allocated irq number
524 * @offset: where to start the search
526 * Returns next irq number after offset or nr_irqs if none is found.
528 unsigned int irq_get_next_irq(unsigned int offset
)
530 return find_next_bit(allocated_irqs
, nr_irqs
, offset
);
534 __irq_get_desc_lock(unsigned int irq
, unsigned long *flags
, bool bus
,
537 struct irq_desc
*desc
= irq_to_desc(irq
);
540 if (check
& _IRQ_DESC_CHECK
) {
541 if ((check
& _IRQ_DESC_PERCPU
) &&
542 !irq_settings_is_per_cpu_devid(desc
))
545 if (!(check
& _IRQ_DESC_PERCPU
) &&
546 irq_settings_is_per_cpu_devid(desc
))
552 raw_spin_lock_irqsave(&desc
->lock
, *flags
);
557 void __irq_put_desc_unlock(struct irq_desc
*desc
, unsigned long flags
, bool bus
)
559 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
561 chip_bus_sync_unlock(desc
);
564 int irq_set_percpu_devid(unsigned int irq
)
566 struct irq_desc
*desc
= irq_to_desc(irq
);
571 if (desc
->percpu_enabled
)
574 desc
->percpu_enabled
= kzalloc(sizeof(*desc
->percpu_enabled
), GFP_KERNEL
);
576 if (!desc
->percpu_enabled
)
579 irq_set_percpu_devid_flags(irq
);
583 void kstat_incr_irq_this_cpu(unsigned int irq
)
585 kstat_incr_irqs_this_cpu(irq
, irq_to_desc(irq
));
589 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
590 * @irq: The interrupt number
591 * @cpu: The cpu number
593 * Returns the sum of interrupt counts on @cpu since boot for
594 * @irq. The caller must ensure that the interrupt is not removed
597 unsigned int kstat_irqs_cpu(unsigned int irq
, int cpu
)
599 struct irq_desc
*desc
= irq_to_desc(irq
);
601 return desc
&& desc
->kstat_irqs
?
602 *per_cpu_ptr(desc
->kstat_irqs
, cpu
) : 0;
606 * kstat_irqs - Get the statistics for an interrupt
607 * @irq: The interrupt number
609 * Returns the sum of interrupt counts on all cpus since boot for
610 * @irq. The caller must ensure that the interrupt is not removed
613 unsigned int kstat_irqs(unsigned int irq
)
615 struct irq_desc
*desc
= irq_to_desc(irq
);
617 unsigned int sum
= 0;
619 if (!desc
|| !desc
->kstat_irqs
)
621 for_each_possible_cpu(cpu
)
622 sum
+= *per_cpu_ptr(desc
->kstat_irqs
, cpu
);
627 * kstat_irqs_usr - Get the statistics for an interrupt
628 * @irq: The interrupt number
630 * Returns the sum of interrupt counts on all cpus since boot for
631 * @irq. Contrary to kstat_irqs() this can be called from any
632 * preemptible context. It's protected against concurrent removal of
633 * an interrupt descriptor when sparse irqs are enabled.
635 unsigned int kstat_irqs_usr(unsigned int irq
)
640 sum
= kstat_irqs(irq
);