2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * This file contains the interrupt descriptor management code
7 * Detailed information is available in Documentation/DocBook/genericirq
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
18 #include "internals.h"
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 static struct lock_class_key irq_desc_lock_class
;
25 #if defined(CONFIG_SMP)
26 static void __init
init_irq_default_affinity(void)
28 alloc_cpumask_var(&irq_default_affinity
, GFP_NOWAIT
);
29 cpumask_setall(irq_default_affinity
);
32 static void __init
init_irq_default_affinity(void)
38 static int alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
)
40 if (!zalloc_cpumask_var_node(&desc
->irq_data
.affinity
, gfp
, node
))
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc
->pending_mask
, gfp
, node
)) {
45 free_cpumask_var(desc
->irq_data
.affinity
);
52 static void desc_smp_init(struct irq_desc
*desc
, int node
)
54 desc
->irq_data
.node
= node
;
55 cpumask_copy(desc
->irq_data
.affinity
, irq_default_affinity
);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 cpumask_clear(desc
->pending_mask
);
61 static inline int desc_node(struct irq_desc
*desc
)
63 return desc
->irq_data
.node
;
68 alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
) { return 0; }
69 static inline void desc_smp_init(struct irq_desc
*desc
, int node
) { }
70 static inline int desc_node(struct irq_desc
*desc
) { return 0; }
73 static void desc_set_defaults(unsigned int irq
, struct irq_desc
*desc
, int node
)
77 desc
->irq_data
.irq
= irq
;
78 desc
->irq_data
.chip
= &no_irq_chip
;
79 desc
->irq_data
.chip_data
= NULL
;
80 desc
->irq_data
.handler_data
= NULL
;
81 desc
->irq_data
.msi_desc
= NULL
;
82 irq_settings_clr_and_set(desc
, ~0, _IRQ_DEFAULT_INIT_FLAGS
);
83 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
84 desc
->handle_irq
= handle_bad_irq
;
87 desc
->irqs_unhandled
= 0;
89 for_each_possible_cpu(cpu
)
90 *per_cpu_ptr(desc
->kstat_irqs
, cpu
) = 0;
91 desc_smp_init(desc
, node
);
94 int nr_irqs
= NR_IRQS
;
95 EXPORT_SYMBOL_GPL(nr_irqs
);
97 static DEFINE_MUTEX(sparse_irq_lock
);
98 static DECLARE_BITMAP(allocated_irqs
, IRQ_BITMAP_BITS
);
100 #ifdef CONFIG_SPARSE_IRQ
102 static RADIX_TREE(irq_desc_tree
, GFP_KERNEL
);
104 static void irq_insert_desc(unsigned int irq
, struct irq_desc
*desc
)
106 radix_tree_insert(&irq_desc_tree
, irq
, desc
);
109 struct irq_desc
*irq_to_desc(unsigned int irq
)
111 return radix_tree_lookup(&irq_desc_tree
, irq
);
114 static void delete_irq_desc(unsigned int irq
)
116 radix_tree_delete(&irq_desc_tree
, irq
);
120 static void free_masks(struct irq_desc
*desc
)
122 #ifdef CONFIG_GENERIC_PENDING_IRQ
123 free_cpumask_var(desc
->pending_mask
);
125 free_cpumask_var(desc
->irq_data
.affinity
);
128 static inline void free_masks(struct irq_desc
*desc
) { }
131 static struct irq_desc
*alloc_desc(int irq
, int node
)
133 struct irq_desc
*desc
;
134 gfp_t gfp
= GFP_KERNEL
;
136 desc
= kzalloc_node(sizeof(*desc
), gfp
, node
);
139 /* allocate based on nr_cpu_ids */
140 desc
->kstat_irqs
= alloc_percpu(unsigned int);
141 if (!desc
->kstat_irqs
)
144 if (alloc_masks(desc
, gfp
, node
))
147 raw_spin_lock_init(&desc
->lock
);
148 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
150 desc_set_defaults(irq
, desc
, node
);
155 free_percpu(desc
->kstat_irqs
);
161 static void free_desc(unsigned int irq
)
163 struct irq_desc
*desc
= irq_to_desc(irq
);
165 unregister_irq_proc(irq
, desc
);
167 mutex_lock(&sparse_irq_lock
);
168 delete_irq_desc(irq
);
169 mutex_unlock(&sparse_irq_lock
);
172 free_percpu(desc
->kstat_irqs
);
176 static int alloc_descs(unsigned int start
, unsigned int cnt
, int node
)
178 struct irq_desc
*desc
;
181 for (i
= 0; i
< cnt
; i
++) {
182 desc
= alloc_desc(start
+ i
, node
);
185 mutex_lock(&sparse_irq_lock
);
186 irq_insert_desc(start
+ i
, desc
);
187 mutex_unlock(&sparse_irq_lock
);
192 for (i
--; i
>= 0; i
--)
193 free_desc(start
+ i
);
195 mutex_lock(&sparse_irq_lock
);
196 bitmap_clear(allocated_irqs
, start
, cnt
);
197 mutex_unlock(&sparse_irq_lock
);
201 static int irq_expand_nr_irqs(unsigned int nr
)
203 if (nr
> IRQ_BITMAP_BITS
)
209 int __init
early_irq_init(void)
211 int i
, initcnt
, node
= first_online_node
;
212 struct irq_desc
*desc
;
214 init_irq_default_affinity();
216 /* Let arch update nr_irqs and return the nr of preallocated irqs */
217 initcnt
= arch_probe_nr_irqs();
218 printk(KERN_INFO
"NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS
, nr_irqs
, initcnt
);
220 if (WARN_ON(nr_irqs
> IRQ_BITMAP_BITS
))
221 nr_irqs
= IRQ_BITMAP_BITS
;
223 if (WARN_ON(initcnt
> IRQ_BITMAP_BITS
))
224 initcnt
= IRQ_BITMAP_BITS
;
226 if (initcnt
> nr_irqs
)
229 for (i
= 0; i
< initcnt
; i
++) {
230 desc
= alloc_desc(i
, node
);
231 set_bit(i
, allocated_irqs
);
232 irq_insert_desc(i
, desc
);
234 return arch_early_irq_init();
237 #else /* !CONFIG_SPARSE_IRQ */
239 struct irq_desc irq_desc
[NR_IRQS
] __cacheline_aligned_in_smp
= {
240 [0 ... NR_IRQS
-1] = {
241 .handle_irq
= handle_bad_irq
,
243 .lock
= __RAW_SPIN_LOCK_UNLOCKED(irq_desc
->lock
),
247 int __init
early_irq_init(void)
249 int count
, i
, node
= first_online_node
;
250 struct irq_desc
*desc
;
252 init_irq_default_affinity();
254 printk(KERN_INFO
"NR_IRQS:%d\n", NR_IRQS
);
257 count
= ARRAY_SIZE(irq_desc
);
259 for (i
= 0; i
< count
; i
++) {
260 desc
[i
].kstat_irqs
= alloc_percpu(unsigned int);
261 alloc_masks(&desc
[i
], GFP_KERNEL
, node
);
262 raw_spin_lock_init(&desc
[i
].lock
);
263 lockdep_set_class(&desc
[i
].lock
, &irq_desc_lock_class
);
264 desc_set_defaults(i
, &desc
[i
], node
);
266 return arch_early_irq_init();
269 struct irq_desc
*irq_to_desc(unsigned int irq
)
271 return (irq
< NR_IRQS
) ? irq_desc
+ irq
: NULL
;
274 static void free_desc(unsigned int irq
)
276 dynamic_irq_cleanup(irq
);
279 static inline int alloc_descs(unsigned int start
, unsigned int cnt
, int node
)
284 static int irq_expand_nr_irqs(unsigned int nr
)
289 #endif /* !CONFIG_SPARSE_IRQ */
292 * generic_handle_irq - Invoke the handler for a particular irq
293 * @irq: The irq number to handle
296 int generic_handle_irq(unsigned int irq
)
298 struct irq_desc
*desc
= irq_to_desc(irq
);
302 generic_handle_irq_desc(irq
, desc
);
305 EXPORT_SYMBOL_GPL(generic_handle_irq
);
307 /* Dynamic interrupt handling */
310 * irq_free_descs - free irq descriptors
311 * @from: Start of descriptor range
312 * @cnt: Number of consecutive irqs to free
314 void irq_free_descs(unsigned int from
, unsigned int cnt
)
318 if (from
>= nr_irqs
|| (from
+ cnt
) > nr_irqs
)
321 for (i
= 0; i
< cnt
; i
++)
324 mutex_lock(&sparse_irq_lock
);
325 bitmap_clear(allocated_irqs
, from
, cnt
);
326 mutex_unlock(&sparse_irq_lock
);
328 EXPORT_SYMBOL_GPL(irq_free_descs
);
331 * irq_alloc_descs - allocate and initialize a range of irq descriptors
332 * @irq: Allocate for specific irq number if irq >= 0
333 * @from: Start the search from this irq number
334 * @cnt: Number of consecutive irqs to allocate.
335 * @node: Preferred node on which the irq descriptor should be allocated
337 * Returns the first irq number or error code
340 irq_alloc_descs(int irq
, unsigned int from
, unsigned int cnt
, int node
)
353 mutex_lock(&sparse_irq_lock
);
355 start
= bitmap_find_next_zero_area(allocated_irqs
, IRQ_BITMAP_BITS
,
358 if (irq
>=0 && start
!= irq
)
361 if (start
+ cnt
> nr_irqs
) {
362 ret
= irq_expand_nr_irqs(start
+ cnt
);
367 bitmap_set(allocated_irqs
, start
, cnt
);
368 mutex_unlock(&sparse_irq_lock
);
369 return alloc_descs(start
, cnt
, node
);
372 mutex_unlock(&sparse_irq_lock
);
375 EXPORT_SYMBOL_GPL(irq_alloc_descs
);
378 * irq_reserve_irqs - mark irqs allocated
379 * @from: mark from irq number
380 * @cnt: number of irqs to mark
382 * Returns 0 on success or an appropriate error code
384 int irq_reserve_irqs(unsigned int from
, unsigned int cnt
)
389 if (!cnt
|| (from
+ cnt
) > nr_irqs
)
392 mutex_lock(&sparse_irq_lock
);
393 start
= bitmap_find_next_zero_area(allocated_irqs
, nr_irqs
, from
, cnt
, 0);
395 bitmap_set(allocated_irqs
, start
, cnt
);
398 mutex_unlock(&sparse_irq_lock
);
403 * irq_get_next_irq - get next allocated irq number
404 * @offset: where to start the search
406 * Returns next irq number after offset or nr_irqs if none is found.
408 unsigned int irq_get_next_irq(unsigned int offset
)
410 return find_next_bit(allocated_irqs
, nr_irqs
, offset
);
414 __irq_get_desc_lock(unsigned int irq
, unsigned long *flags
, bool bus
)
416 struct irq_desc
*desc
= irq_to_desc(irq
);
421 raw_spin_lock_irqsave(&desc
->lock
, *flags
);
426 void __irq_put_desc_unlock(struct irq_desc
*desc
, unsigned long flags
, bool bus
)
428 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
430 chip_bus_sync_unlock(desc
);
434 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
435 * @irq: irq number to initialize
437 void dynamic_irq_cleanup(unsigned int irq
)
439 struct irq_desc
*desc
= irq_to_desc(irq
);
442 raw_spin_lock_irqsave(&desc
->lock
, flags
);
443 desc_set_defaults(irq
, desc
, desc_node(desc
));
444 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
447 unsigned int kstat_irqs_cpu(unsigned int irq
, int cpu
)
449 struct irq_desc
*desc
= irq_to_desc(irq
);
451 return desc
&& desc
->kstat_irqs
?
452 *per_cpu_ptr(desc
->kstat_irqs
, cpu
) : 0;
455 unsigned int kstat_irqs(unsigned int irq
)
457 struct irq_desc
*desc
= irq_to_desc(irq
);
461 if (!desc
|| !desc
->kstat_irqs
)
463 for_each_possible_cpu(cpu
)
464 sum
+= *per_cpu_ptr(desc
->kstat_irqs
, cpu
);