2 * linux/kernel/irq/handle.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code.
9 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/kallsyms.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/rculist.h>
20 #include <linux/hash.h>
21 #include <trace/irq.h>
22 #include <linux/bootmem.h>
24 #include "internals.h"
27 * lockdep: we want to handle all irq_desc locks as a single lock-class:
29 struct lock_class_key irq_desc_lock_class
;
32 * handle_bad_irq - handle spurious and unhandled irqs
33 * @irq: the interrupt number
34 * @desc: description of the interrupt
36 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
38 void handle_bad_irq(unsigned int irq
, struct irq_desc
*desc
)
40 print_irq_desc(irq
, desc
);
41 kstat_incr_irqs_this_cpu(irq
, desc
);
45 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
46 static void __init
init_irq_default_affinity(void)
48 alloc_bootmem_cpumask_var(&irq_default_affinity
);
49 cpumask_setall(irq_default_affinity
);
52 static void __init
init_irq_default_affinity(void)
58 * Linux has a controller-independent interrupt architecture.
59 * Every controller has a 'controller-template', that is used
60 * by the main code to do the right thing. Each driver-visible
61 * interrupt source is transparently wired to the appropriate
62 * controller. Thus drivers need not be aware of the
63 * interrupt-controller.
65 * The code is designed to be easily extended with new/different
66 * interrupt controllers, without having to do assembly magic or
67 * having to touch the generic code.
69 * Controller mappings for all interrupt sources:
71 int nr_irqs
= NR_IRQS
;
72 EXPORT_SYMBOL_GPL(nr_irqs
);
74 #ifdef CONFIG_SPARSE_IRQ
76 static struct irq_desc irq_desc_init
= {
78 .status
= IRQ_DISABLED
,
80 .handle_irq
= handle_bad_irq
,
82 .lock
= RAW_SPIN_LOCK_UNLOCKED(irq_desc_init
.lock
),
85 void init_kstat_irqs(struct irq_desc
*desc
, int cpu
, int nr
)
90 node
= cpu_to_node(cpu
);
91 ptr
= kzalloc_node(nr
* sizeof(*desc
->kstat_irqs
), GFP_ATOMIC
, node
);
94 * don't overwite if can not get new one
95 * init_copy_kstat_irqs() could still use old one
98 printk(KERN_DEBUG
" alloc kstat_irqs on cpu %d node %d\n",
100 desc
->kstat_irqs
= ptr
;
104 static void init_one_irq_desc(int irq
, struct irq_desc
*desc
, int cpu
)
106 memcpy(desc
, &irq_desc_init
, sizeof(struct irq_desc
));
108 spin_lock_init(&desc
->lock
);
109 init_waitqueue_head(&desc
->wait_for_handler
);
114 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
115 init_kstat_irqs(desc
, cpu
, nr_cpu_ids
);
116 if (!desc
->kstat_irqs
) {
117 printk(KERN_ERR
"can not alloc kstat_irqs\n");
120 if (!init_alloc_desc_masks(desc
, cpu
, false)) {
121 printk(KERN_ERR
"can not alloc irq_desc cpumasks\n");
124 arch_init_chip_data(desc
, cpu
);
128 * Protect the sparse_irqs:
130 DEFINE_SPINLOCK(sparse_irq_lock
);
132 struct irq_desc
**irq_desc_ptrs __read_mostly
;
134 static struct irq_desc irq_desc_legacy
[NR_IRQS_LEGACY
] __cacheline_aligned_in_smp
= {
135 [0 ... NR_IRQS_LEGACY
-1] = {
137 .status
= IRQ_DISABLED
,
138 .chip
= &no_irq_chip
,
139 .handle_irq
= handle_bad_irq
,
141 .lock
= RAW_SPIN_LOCK_UNLOCKED(irq_desc_init
.lock
),
145 static unsigned int *kstat_irqs_legacy
;
147 int __init
early_irq_init(void)
149 struct irq_desc
*desc
;
153 init_irq_default_affinity();
155 /* initialize nr_irqs based on nr_cpu_ids */
156 arch_probe_nr_irqs();
157 printk(KERN_INFO
"NR_IRQS:%d nr_irqs:%d\n", NR_IRQS
, nr_irqs
);
159 desc
= irq_desc_legacy
;
160 legacy_count
= ARRAY_SIZE(irq_desc_legacy
);
162 /* allocate irq_desc_ptrs array based on nr_irqs */
163 irq_desc_ptrs
= alloc_bootmem(nr_irqs
* sizeof(void *));
165 /* allocate based on nr_cpu_ids */
166 /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
167 kstat_irqs_legacy
= alloc_bootmem(NR_IRQS_LEGACY
* nr_cpu_ids
*
170 for (i
= 0; i
< legacy_count
; i
++) {
172 desc
[i
].kstat_irqs
= kstat_irqs_legacy
+ i
* nr_cpu_ids
;
173 lockdep_set_class(&desc
[i
].lock
, &irq_desc_lock_class
);
174 init_alloc_desc_masks(&desc
[i
], 0, true);
175 irq_desc_ptrs
[i
] = desc
+ i
;
178 for (i
= legacy_count
; i
< nr_irqs
; i
++)
179 irq_desc_ptrs
[i
] = NULL
;
181 return arch_early_irq_init();
184 struct irq_desc
*irq_to_desc(unsigned int irq
)
186 if (irq_desc_ptrs
&& irq
< nr_irqs
)
187 return irq_desc_ptrs
[irq
];
192 struct irq_desc
*irq_to_desc_alloc_cpu(unsigned int irq
, int cpu
)
194 struct irq_desc
*desc
;
198 if (irq
>= nr_irqs
) {
199 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
204 desc
= irq_desc_ptrs
[irq
];
208 spin_lock_irqsave(&sparse_irq_lock
, flags
);
210 /* We have to check it to avoid races with another CPU */
211 desc
= irq_desc_ptrs
[irq
];
215 node
= cpu_to_node(cpu
);
216 desc
= kzalloc_node(sizeof(*desc
), GFP_ATOMIC
, node
);
217 printk(KERN_DEBUG
" alloc irq_desc for %d on cpu %d node %d\n",
220 printk(KERN_ERR
"can not alloc irq_desc\n");
223 init_one_irq_desc(irq
, desc
, cpu
);
225 irq_desc_ptrs
[irq
] = desc
;
228 spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
233 #else /* !CONFIG_SPARSE_IRQ */
235 struct irq_desc irq_desc
[NR_IRQS
] __cacheline_aligned_in_smp
= {
236 [0 ... NR_IRQS
-1] = {
237 .status
= IRQ_DISABLED
,
238 .chip
= &no_irq_chip
,
239 .handle_irq
= handle_bad_irq
,
241 .lock
= RAW_SPIN_LOCK_UNLOCKED(irq_desc
->lock
),
245 static unsigned int kstat_irqs_all
[NR_IRQS
][NR_CPUS
];
246 int __init
early_irq_init(void)
248 struct irq_desc
*desc
;
252 init_irq_default_affinity();
254 printk(KERN_INFO
"NR_IRQS:%d\n", NR_IRQS
);
257 count
= ARRAY_SIZE(irq_desc
);
259 for (i
= 0; i
< count
; i
++) {
261 init_alloc_desc_masks(&desc
[i
], 0, true);
262 desc
[i
].kstat_irqs
= kstat_irqs_all
[i
];
264 return arch_early_irq_init();
267 struct irq_desc
*irq_to_desc(unsigned int irq
)
269 return (irq
< NR_IRQS
) ? irq_desc
+ irq
: NULL
;
272 struct irq_desc
*irq_to_desc_alloc_cpu(unsigned int irq
, int cpu
)
274 return irq_to_desc(irq
);
276 #endif /* !CONFIG_SPARSE_IRQ */
278 void clear_kstat_irqs(struct irq_desc
*desc
)
280 memset(desc
->kstat_irqs
, 0, nr_cpu_ids
* sizeof(*(desc
->kstat_irqs
)));
284 * What should we do if we get a hw irq event on an illegal vector?
285 * Each architecture has to answer this themself.
287 static void ack_bad(unsigned int irq
)
289 struct irq_desc
*desc
= irq_to_desc(irq
);
291 print_irq_desc(irq
, desc
);
298 static void noop(unsigned int irq
)
302 static unsigned int noop_ret(unsigned int irq
)
308 * Generic no controller implementation
310 struct irq_chip no_irq_chip
= {
321 * Generic dummy implementation which can be used for
322 * real dumb interrupt sources
324 struct irq_chip dummy_irq_chip
= {
337 * Special, empty irq handler:
339 irqreturn_t
no_action(int cpl
, void *dev_id
)
344 DEFINE_TRACE(irq_handler_entry
);
345 DEFINE_TRACE(irq_handler_exit
);
348 * handle_IRQ_event - irq action chain handler
349 * @irq: the interrupt number
350 * @action: the interrupt action chain for this irq
352 * Handles the action chain of an irq event
354 irqreturn_t
handle_IRQ_event(unsigned int irq
, struct irqaction
*action
)
356 irqreturn_t ret
, retval
= IRQ_NONE
;
357 unsigned int status
= 0;
360 if (debug_direct_keyboard
&& irq
== 1)
365 * Unconditionally enable interrupts for threaded
368 if (!hardirq_count() || !(action
->flags
& IRQF_DISABLED
))
372 unsigned int preempt_count
= preempt_count();
374 trace_irq_handler_entry(irq
, action
);
375 ret
= action
->handler(irq
, action
->dev_id
);
376 trace_irq_handler_exit(irq
, action
, ret
);
378 if (preempt_count() != preempt_count
) {
379 print_symbol("BUG: unbalanced irq-handler preempt count"
381 (unsigned long) action
->handler
);
382 printk("entered with %08x, exited with %08x.\n",
383 preempt_count
, preempt_count());
385 preempt_count() = preempt_count
;
388 if (ret
== IRQ_HANDLED
)
389 status
|= action
->flags
;
391 action
= action
->next
;
394 if (status
& IRQF_SAMPLE_RANDOM
) {
396 add_interrupt_randomness(irq
);
401 if (debug_direct_keyboard
&& irq
== 1)
408 * Hack - used for development only.
410 int __read_mostly debug_direct_keyboard
= 0;
412 int __init
debug_direct_keyboard_setup(char *str
)
414 debug_direct_keyboard
= 1;
415 printk(KERN_INFO
"Switching IRQ 1 (keyboard) to to direct!\n");
416 #ifdef CONFIG_PREEMPT_RT
417 printk(KERN_INFO
"WARNING: kernel may easily crash this way!\n");
422 __setup("debug_direct_keyboard", debug_direct_keyboard_setup
);
424 int redirect_hardirq(struct irq_desc
*desc
)
429 if (!hardirq_preemption
|| (desc
->status
& IRQ_NODELAY
) ||
434 if (debug_direct_keyboard
&& desc
->irq
== 1)
438 BUG_ON(!irqs_disabled());
439 if (desc
->thread
&& desc
->thread
->state
!= TASK_RUNNING
)
440 wake_up_process(desc
->thread
);
445 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
447 #ifdef CONFIG_ENABLE_WARN_DEPRECATED
448 # warning __do_IRQ is deprecated. Please convert to proper flow handlers
452 * __do_IRQ - original all in one highlevel IRQ handler
453 * @irq: the interrupt number
455 * __do_IRQ handles all normal device IRQ's (the special
456 * SMP cross-CPU interrupts have their own specific
459 * This is the original x86 implementation which is used for every
462 unsigned int __do_IRQ(unsigned int irq
)
464 struct irq_desc
*desc
= irq_to_desc(irq
);
465 struct irqaction
*action
;
468 #ifdef CONFIG_PREEMPT_RT
469 printk(KERN_WARNING
"__do_IRQ called for irq %d. "
470 "PREEMPT_RT will crash your system soon\n", irq
);
471 printk(KERN_WARNING
"I hope you have a fire-extinguisher handy!\n");
473 kstat_incr_irqs_this_cpu(irq
, desc
);
475 if (CHECK_IRQ_PER_CPU(desc
->status
)) {
476 irqreturn_t action_ret
;
479 * No locking required for CPU-local interrupts:
481 if (desc
->chip
->ack
) {
482 desc
->chip
->ack(irq
);
484 desc
= irq_remap_to_desc(irq
, desc
);
486 if (likely(!(desc
->status
& IRQ_DISABLED
))) {
487 action_ret
= handle_IRQ_event(irq
, desc
->action
);
489 note_interrupt(irq
, desc
, action_ret
);
491 desc
->chip
->end(irq
);
495 * If the task is currently running in user mode, don't
496 * detect soft lockups. If CONFIG_DETECT_SOFTLOCKUP is not
497 * configured, this should be optimized out.
499 if (user_mode(get_irq_regs()))
500 touch_softlockup_watchdog();
502 spin_lock(&desc
->lock
);
503 if (desc
->chip
->ack
) {
504 desc
->chip
->ack(irq
);
505 desc
= irq_remap_to_desc(irq
, desc
);
508 * REPLAY is when Linux resends an IRQ that was dropped earlier
509 * WAITING is used by probe to mark irqs that are being tested
511 status
= desc
->status
& ~(IRQ_REPLAY
| IRQ_WAITING
);
512 status
|= IRQ_PENDING
; /* we _want_ to handle it */
515 * If the IRQ is disabled for whatever reason, we cannot
516 * use the action we have.
519 if (likely(!(status
& (IRQ_DISABLED
| IRQ_INPROGRESS
)))) {
520 action
= desc
->action
;
521 status
&= ~IRQ_PENDING
; /* we commit to handling */
522 status
|= IRQ_INPROGRESS
; /* we are handling it */
524 desc
->status
= status
;
527 * If there is no IRQ handler or it was disabled, exit early.
528 * Since we set PENDING, if another processor is handling
529 * a different instance of this same irq, the other processor
530 * will take care of it.
532 if (unlikely(!action
))
536 * Edge triggered interrupts need to remember
538 * This applies to any hw interrupts that allow a second
539 * instance of the same irq to arrive while we are in do_IRQ
540 * or in the handler. But the code here only handles the _second_
541 * instance of the irq, not the third or fourth. So it is mostly
542 * useful for irq hardware that does not mask cleanly in an
546 irqreturn_t action_ret
;
548 spin_unlock(&desc
->lock
);
550 action_ret
= handle_IRQ_event(irq
, action
);
552 note_interrupt(irq
, desc
, action_ret
);
554 spin_lock(&desc
->lock
);
555 if (likely(!(desc
->status
& IRQ_PENDING
)))
557 desc
->status
&= ~IRQ_PENDING
;
559 desc
->status
&= ~IRQ_INPROGRESS
;
563 * The ->end() handler has to deal with interrupts which got
564 * disabled while the handler was running.
566 desc
->chip
->end(irq
);
567 spin_unlock(&desc
->lock
);
573 void early_init_irq_lock_class(void)
575 struct irq_desc
*desc
;
578 for_each_irq_desc(i
, desc
) {
579 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
583 unsigned int kstat_irqs_cpu(unsigned int irq
, int cpu
)
585 struct irq_desc
*desc
= irq_to_desc(irq
);
586 return desc
? desc
->kstat_irqs
[cpu
] : 0;
588 EXPORT_SYMBOL(kstat_irqs_cpu
);