2 * linux/kernel/irq/spurious.c
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
6 * This file contains spurious interrupt handling.
9 #include <linux/jiffies.h>
10 #include <linux/irq.h>
11 #include <linux/module.h>
12 #include <linux/kallsyms.h>
13 #include <linux/interrupt.h>
14 #include <linux/moduleparam.h>
15 #include <linux/timer.h>
17 #include "internals.h"
19 static int irqfixup __read_mostly
;
21 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
22 static void poll_spurious_irqs(unsigned long dummy
);
23 static DEFINE_TIMER(poll_spurious_irq_timer
, poll_spurious_irqs
, 0, 0);
24 static int irq_poll_cpu
;
25 static atomic_t irq_poll_active
;
28 * We wait here for a poller to finish.
30 * If the poll runs on this CPU, then we yell loudly and return
31 * false. That will leave the interrupt line disabled in the worst
32 * case, but it should never happen.
34 * We wait until the poller is done and then recheck disabled and
35 * action (about to be disabled). Only if it's still active, we return
36 * true and let the handler run.
38 bool irq_wait_for_poll(struct irq_desc
*desc
)
40 if (WARN_ONCE(irq_poll_cpu
== smp_processor_id(),
41 "irq poll in progress on cpu %d for irq %d\n",
42 smp_processor_id(), desc
->irq_data
.irq
))
47 raw_spin_unlock(&desc
->lock
);
48 while (irqd_irq_inprogress(&desc
->irq_data
))
50 raw_spin_lock(&desc
->lock
);
51 } while (irqd_irq_inprogress(&desc
->irq_data
));
52 /* Might have been disabled in meantime */
53 return !irqd_irq_disabled(&desc
->irq_data
) && desc
->action
;
61 * Recovery handler for misrouted interrupts.
63 static int try_one_irq(int irq
, struct irq_desc
*desc
, bool force
)
65 irqreturn_t ret
= IRQ_NONE
;
66 struct irqaction
*action
;
68 raw_spin_lock(&desc
->lock
);
70 /* PER_CPU and nested thread interrupts are never polled */
71 if (irq_settings_is_per_cpu(desc
) || irq_settings_is_nested_thread(desc
))
75 * Do not poll disabled interrupts unless the spurious
76 * disabled poller asks explicitely.
78 if (irqd_irq_disabled(&desc
->irq_data
) && !force
)
82 * All handlers must agree on IRQF_SHARED, so we test just the
85 action
= desc
->action
;
86 if (!action
|| !(action
->flags
& IRQF_SHARED
) ||
87 (action
->flags
& __IRQF_TIMER
))
90 /* Already running on another processor */
91 if (irqd_irq_inprogress(&desc
->irq_data
)) {
93 * Already running: If it is shared get the other
94 * CPU to go looking for our mystery interrupt too
96 desc
->istate
|= IRQS_PENDING
;
100 /* Mark it poll in progress */
101 desc
->istate
|= IRQS_POLL_INPROGRESS
;
103 if (handle_irq_event(desc
) == IRQ_HANDLED
)
105 /* Make sure that there is still a valid action */
106 action
= desc
->action
;
107 } while ((desc
->istate
& IRQS_PENDING
) && action
);
108 desc
->istate
&= ~IRQS_POLL_INPROGRESS
;
110 raw_spin_unlock(&desc
->lock
);
111 return ret
== IRQ_HANDLED
;
114 static int misrouted_irq(int irq
)
116 struct irq_desc
*desc
;
119 if (atomic_inc_return(&irq_poll_active
) != 1)
122 irq_poll_cpu
= smp_processor_id();
124 for_each_irq_desc(i
, desc
) {
128 if (i
== irq
) /* Already tried */
131 if (try_one_irq(i
, desc
, false))
135 atomic_dec(&irq_poll_active
);
136 /* So the caller can adjust the irq error counts */
140 static void poll_spurious_irqs(unsigned long dummy
)
142 struct irq_desc
*desc
;
145 if (atomic_inc_return(&irq_poll_active
) != 1)
147 irq_poll_cpu
= smp_processor_id();
149 for_each_irq_desc(i
, desc
) {
155 /* Racy but it doesn't matter */
156 state
= desc
->istate
;
158 if (!(state
& IRQS_SPURIOUS_DISABLED
))
162 try_one_irq(i
, desc
, true);
166 atomic_dec(&irq_poll_active
);
167 mod_timer(&poll_spurious_irq_timer
,
168 jiffies
+ POLL_SPURIOUS_IRQ_INTERVAL
);
171 static inline int bad_action_ret(irqreturn_t action_ret
)
173 if (likely(action_ret
<= (IRQ_HANDLED
| IRQ_WAKE_THREAD
)))
179 * If 99,900 of the previous 100,000 interrupts have not been handled
180 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
181 * and try to turn the IRQ off.
183 * (The other 100-of-100,000 interrupts may have been a correctly
184 * functioning device sharing an IRQ with the failing one)
187 __report_bad_irq(unsigned int irq
, struct irq_desc
*desc
,
188 irqreturn_t action_ret
)
190 struct irqaction
*action
;
193 if (bad_action_ret(action_ret
)) {
194 printk(KERN_ERR
"irq event %d: bogus return value %x\n",
197 printk(KERN_ERR
"irq %d: nobody cared (try booting with "
198 "the \"irqpoll\" option)\n", irq
);
201 printk(KERN_ERR
"handlers:\n");
204 * We need to take desc->lock here. note_interrupt() is called
205 * w/o desc->lock held, but IRQ_PROGRESS set. We might race
206 * with something else removing an action. It's ok to take
207 * desc->lock here. See synchronize_irq().
209 raw_spin_lock_irqsave(&desc
->lock
, flags
);
210 action
= desc
->action
;
212 printk(KERN_ERR
"[<%p>] %pf", action
->handler
, action
->handler
);
213 if (action
->thread_fn
)
214 printk(KERN_CONT
" threaded [<%p>] %pf",
215 action
->thread_fn
, action
->thread_fn
);
216 printk(KERN_CONT
"\n");
217 action
= action
->next
;
219 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
223 report_bad_irq(unsigned int irq
, struct irq_desc
*desc
, irqreturn_t action_ret
)
225 static int count
= 100;
229 __report_bad_irq(irq
, desc
, action_ret
);
234 try_misrouted_irq(unsigned int irq
, struct irq_desc
*desc
,
235 irqreturn_t action_ret
)
237 struct irqaction
*action
;
242 /* We didn't actually handle the IRQ - see if it was misrouted? */
243 if (action_ret
== IRQ_NONE
)
247 * But for 'irqfixup == 2' we also do it for handled interrupts if
248 * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
249 * traditional PC timer interrupt.. Legacy)
258 * Since we don't get the descriptor lock, "action" can
259 * change under us. We don't really care, but we don't
260 * want to follow a NULL pointer. So tell the compiler to
261 * just load it once by using a barrier.
263 action
= desc
->action
;
265 return action
&& (action
->flags
& IRQF_IRQPOLL
);
268 #define SPURIOUS_DEFERRED 0x80000000
270 void note_interrupt(unsigned int irq
, struct irq_desc
*desc
,
271 irqreturn_t action_ret
)
273 if (desc
->istate
& IRQS_POLL_INPROGRESS
)
276 if (bad_action_ret(action_ret
)) {
277 report_bad_irq(irq
, desc
, action_ret
);
282 * We cannot call note_interrupt from the threaded handler
283 * because we need to look at the compound of all handlers
284 * (primary and threaded). Aside of that in the threaded
285 * shared case we have no serialization against an incoming
286 * hardware interrupt while we are dealing with a threaded
289 * So in case a thread is woken, we just note the fact and
290 * defer the analysis to the next hardware interrupt.
292 * The threaded handlers store whether they sucessfully
293 * handled an interrupt and we check whether that number
294 * changed versus the last invocation.
296 * We could handle all interrupts with the delayed by one
297 * mechanism, but for the non forced threaded case we'd just
298 * add pointless overhead to the straight hardirq interrupts
299 * for the sake of a few lines less code.
301 if (action_ret
& IRQ_WAKE_THREAD
) {
303 * There is a thread woken. Check whether one of the
304 * shared primary handlers returned IRQ_HANDLED. If
305 * not we defer the spurious detection to the next
308 if (action_ret
== IRQ_WAKE_THREAD
) {
311 * We use bit 31 of thread_handled_last to
312 * denote the deferred spurious detection
313 * active. No locking necessary as
314 * thread_handled_last is only accessed here
315 * and we have the guarantee that hard
316 * interrupts are not reentrant.
318 if (!(desc
->threads_handled_last
& SPURIOUS_DEFERRED
)) {
319 desc
->threads_handled_last
|= SPURIOUS_DEFERRED
;
323 * Check whether one of the threaded handlers
324 * returned IRQ_HANDLED since the last
325 * interrupt happened.
327 * For simplicity we just set bit 31, as it is
328 * set in threads_handled_last as well. So we
329 * avoid extra masking. And we really do not
330 * care about the high bits of the handled
331 * count. We just care about the count being
332 * different than the one we saw before.
334 handled
= atomic_read(&desc
->threads_handled
);
335 handled
|= SPURIOUS_DEFERRED
;
336 if (handled
!= desc
->threads_handled_last
) {
337 action_ret
= IRQ_HANDLED
;
339 * Note: We keep the SPURIOUS_DEFERRED
340 * bit set. We are handling the
341 * previous invocation right now.
342 * Keep it for the current one, so the
343 * next hardware interrupt will
346 desc
->threads_handled_last
= handled
;
349 * None of the threaded handlers felt
350 * responsible for the last interrupt
352 * We keep the SPURIOUS_DEFERRED bit
353 * set in threads_handled_last as we
354 * need to account for the current
357 action_ret
= IRQ_NONE
;
361 * One of the primary handlers returned
362 * IRQ_HANDLED. So we don't care about the
363 * threaded handlers on the same line. Clear
364 * the deferred detection bit.
366 * In theory we could/should check whether the
367 * deferred bit is set and take the result of
368 * the previous run into account here as
369 * well. But it's really not worth the
370 * trouble. If every other interrupt is
371 * handled we never trigger the spurious
372 * detector. And if this is just the one out
373 * of 100k unhandled ones which is handled
374 * then we merily delay the spurious detection
375 * by one hard interrupt. Not a real problem.
377 desc
->threads_handled_last
&= ~SPURIOUS_DEFERRED
;
381 if (unlikely(action_ret
== IRQ_NONE
)) {
383 * If we are seeing only the odd spurious IRQ caused by
384 * bus asynchronicity then don't eventually trigger an error,
385 * otherwise the counter becomes a doomsday timer for otherwise
388 if (time_after(jiffies
, desc
->last_unhandled
+ HZ
/10))
389 desc
->irqs_unhandled
= 1;
391 desc
->irqs_unhandled
++;
392 desc
->last_unhandled
= jiffies
;
395 if (unlikely(try_misrouted_irq(irq
, desc
, action_ret
))) {
396 int ok
= misrouted_irq(irq
);
397 if (action_ret
== IRQ_NONE
)
398 desc
->irqs_unhandled
-= ok
;
402 if (likely(desc
->irq_count
< 100000))
406 if (unlikely(desc
->irqs_unhandled
> 99900)) {
408 * The interrupt is stuck
410 __report_bad_irq(irq
, desc
, action_ret
);
414 printk(KERN_EMERG
"Disabling IRQ #%d\n", irq
);
415 desc
->istate
|= IRQS_SPURIOUS_DISABLED
;
419 mod_timer(&poll_spurious_irq_timer
,
420 jiffies
+ POLL_SPURIOUS_IRQ_INTERVAL
);
422 desc
->irqs_unhandled
= 0;
425 bool noirqdebug __read_mostly
;
427 int noirqdebug_setup(char *str
)
430 printk(KERN_INFO
"IRQ lockup detection disabled\n");
435 __setup("noirqdebug", noirqdebug_setup
);
436 module_param(noirqdebug
, bool, 0644);
437 MODULE_PARM_DESC(noirqdebug
, "Disable irq lockup detection when true");
439 static int __init
irqfixup_setup(char *str
)
442 printk(KERN_WARNING
"Misrouted IRQ fixup support enabled.\n");
443 printk(KERN_WARNING
"This may impact system performance.\n");
448 __setup("irqfixup", irqfixup_setup
);
449 module_param(irqfixup
, int, 0644);
451 static int __init
irqpoll_setup(char *str
)
454 printk(KERN_WARNING
"Misrouted IRQ fixup and polling support "
456 printk(KERN_WARNING
"This may significantly impact system "
461 __setup("irqpoll", irqpoll_setup
);