2 * linux/kernel/irq/manage.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
7 * This file contains driver APIs to the irq subsystem.
10 #include <linux/irq.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/random.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
18 #include "internals.h"
20 #ifdef CONFIG_IRQ_FORCED_THREADING
21 __read_mostly
bool force_irqthreads
;
23 static int __init
setup_forced_irqthreads(char *arg
)
25 force_irqthreads
= true;
28 early_param("threadirqs", setup_forced_irqthreads
);
32 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
33 * @irq: interrupt number to wait for
35 * This function waits for any pending IRQ handlers for this interrupt
36 * to complete before returning. If you use this function while
37 * holding a resource the IRQ handler may need you will deadlock.
39 * This function may be called - with care - from IRQ context.
41 void synchronize_irq(unsigned int irq
)
43 struct irq_desc
*desc
= irq_to_desc(irq
);
53 * Wait until we're out of the critical section. This might
54 * give the wrong answer due to the lack of memory barriers.
56 while (irqd_irq_inprogress(&desc
->irq_data
))
59 /* Ok, that indicated we're done: double-check carefully. */
60 raw_spin_lock_irqsave(&desc
->lock
, flags
);
61 inprogress
= irqd_irq_inprogress(&desc
->irq_data
);
62 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
64 /* Oops, that failed? */
68 * We made sure that no hardirq handler is running. Now verify
69 * that no threaded handlers are active.
71 wait_event(desc
->wait_for_threads
, !atomic_read(&desc
->threads_active
));
73 EXPORT_SYMBOL(synchronize_irq
);
76 cpumask_var_t irq_default_affinity
;
79 * irq_can_set_affinity - Check if the affinity of a given irq can be set
80 * @irq: Interrupt to check
83 int irq_can_set_affinity(unsigned int irq
)
85 struct irq_desc
*desc
= irq_to_desc(irq
);
87 if (!desc
|| !irqd_can_balance(&desc
->irq_data
) ||
88 !desc
->irq_data
.chip
|| !desc
->irq_data
.chip
->irq_set_affinity
)
95 * irq_set_thread_affinity - Notify irq threads to adjust affinity
96 * @desc: irq descriptor which has affitnity changed
98 * We just set IRQTF_AFFINITY and delegate the affinity setting
99 * to the interrupt thread itself. We can not call
100 * set_cpus_allowed_ptr() here as we hold desc->lock and this
101 * code can be called from hard interrupt context.
103 void irq_set_thread_affinity(struct irq_desc
*desc
)
105 struct irqaction
*action
= desc
->action
;
109 set_bit(IRQTF_AFFINITY
, &action
->thread_flags
);
110 action
= action
->next
;
114 #ifdef CONFIG_GENERIC_PENDING_IRQ
115 static inline bool irq_can_move_pcntxt(struct irq_data
*data
)
117 return irqd_can_move_in_process_context(data
);
119 static inline bool irq_move_pending(struct irq_data
*data
)
121 return irqd_is_setaffinity_pending(data
);
124 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
)
126 cpumask_copy(desc
->pending_mask
, mask
);
129 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
)
131 cpumask_copy(mask
, desc
->pending_mask
);
134 static inline bool irq_can_move_pcntxt(struct irq_data
*data
) { return true; }
135 static inline bool irq_move_pending(struct irq_data
*data
) { return false; }
137 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
) { }
139 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
) { }
142 int __irq_set_affinity_locked(struct irq_data
*data
, const struct cpumask
*mask
)
144 struct irq_chip
*chip
= irq_data_get_irq_chip(data
);
145 struct irq_desc
*desc
= irq_data_to_desc(data
);
148 if (!chip
|| !chip
->irq_set_affinity
)
151 if (irq_can_move_pcntxt(data
)) {
152 ret
= chip
->irq_set_affinity(data
, mask
, false);
154 case IRQ_SET_MASK_OK
:
155 cpumask_copy(data
->affinity
, mask
);
156 case IRQ_SET_MASK_OK_NOCOPY
:
157 irq_set_thread_affinity(desc
);
161 irqd_set_move_pending(data
);
162 irq_copy_pending(desc
, mask
);
165 if (desc
->affinity_notify
) {
166 kref_get(&desc
->affinity_notify
->kref
);
167 schedule_work(&desc
->affinity_notify
->work
);
169 irqd_set(data
, IRQD_AFFINITY_SET
);
175 * irq_set_affinity - Set the irq affinity of a given irq
176 * @irq: Interrupt to set affinity
180 int irq_set_affinity(unsigned int irq
, const struct cpumask
*mask
)
182 struct irq_desc
*desc
= irq_to_desc(irq
);
189 raw_spin_lock_irqsave(&desc
->lock
, flags
);
190 ret
= __irq_set_affinity_locked(irq_desc_get_irq_data(desc
), mask
);
191 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
195 int irq_set_affinity_hint(unsigned int irq
, const struct cpumask
*m
)
198 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
202 desc
->affinity_hint
= m
;
203 irq_put_desc_unlock(desc
, flags
);
206 EXPORT_SYMBOL_GPL(irq_set_affinity_hint
);
208 static void irq_affinity_notify(struct work_struct
*work
)
210 struct irq_affinity_notify
*notify
=
211 container_of(work
, struct irq_affinity_notify
, work
);
212 struct irq_desc
*desc
= irq_to_desc(notify
->irq
);
213 cpumask_var_t cpumask
;
216 if (!desc
|| !alloc_cpumask_var(&cpumask
, GFP_KERNEL
))
219 raw_spin_lock_irqsave(&desc
->lock
, flags
);
220 if (irq_move_pending(&desc
->irq_data
))
221 irq_get_pending(cpumask
, desc
);
223 cpumask_copy(cpumask
, desc
->irq_data
.affinity
);
224 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
226 notify
->notify(notify
, cpumask
);
228 free_cpumask_var(cpumask
);
230 kref_put(¬ify
->kref
, notify
->release
);
234 * irq_set_affinity_notifier - control notification of IRQ affinity changes
235 * @irq: Interrupt for which to enable/disable notification
236 * @notify: Context for notification, or %NULL to disable
237 * notification. Function pointers must be initialised;
238 * the other fields will be initialised by this function.
240 * Must be called in process context. Notification may only be enabled
241 * after the IRQ is allocated and must be disabled before the IRQ is
242 * freed using free_irq().
245 irq_set_affinity_notifier(unsigned int irq
, struct irq_affinity_notify
*notify
)
247 struct irq_desc
*desc
= irq_to_desc(irq
);
248 struct irq_affinity_notify
*old_notify
;
251 /* The release function is promised process context */
257 /* Complete initialisation of *notify */
260 kref_init(¬ify
->kref
);
261 INIT_WORK(¬ify
->work
, irq_affinity_notify
);
264 raw_spin_lock_irqsave(&desc
->lock
, flags
);
265 old_notify
= desc
->affinity_notify
;
266 desc
->affinity_notify
= notify
;
267 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
270 kref_put(&old_notify
->kref
, old_notify
->release
);
274 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier
);
276 #ifndef CONFIG_AUTO_IRQ_AFFINITY
278 * Generic version of the affinity autoselector.
281 setup_affinity(unsigned int irq
, struct irq_desc
*desc
, struct cpumask
*mask
)
283 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
284 struct cpumask
*set
= irq_default_affinity
;
287 /* Excludes PER_CPU and NO_BALANCE interrupts */
288 if (!irq_can_set_affinity(irq
))
292 * Preserve an userspace affinity setup, but make sure that
293 * one of the targets is online.
295 if (irqd_has_set(&desc
->irq_data
, IRQD_AFFINITY_SET
)) {
296 if (cpumask_intersects(desc
->irq_data
.affinity
,
298 set
= desc
->irq_data
.affinity
;
300 irqd_clear(&desc
->irq_data
, IRQD_AFFINITY_SET
);
303 cpumask_and(mask
, cpu_online_mask
, set
);
304 ret
= chip
->irq_set_affinity(&desc
->irq_data
, mask
, false);
306 case IRQ_SET_MASK_OK
:
307 cpumask_copy(desc
->irq_data
.affinity
, mask
);
308 case IRQ_SET_MASK_OK_NOCOPY
:
309 irq_set_thread_affinity(desc
);
315 setup_affinity(unsigned int irq
, struct irq_desc
*d
, struct cpumask
*mask
)
317 return irq_select_affinity(irq
);
322 * Called when affinity is set via /proc/irq
324 int irq_select_affinity_usr(unsigned int irq
, struct cpumask
*mask
)
326 struct irq_desc
*desc
= irq_to_desc(irq
);
330 raw_spin_lock_irqsave(&desc
->lock
, flags
);
331 ret
= setup_affinity(irq
, desc
, mask
);
332 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
338 setup_affinity(unsigned int irq
, struct irq_desc
*desc
, struct cpumask
*mask
)
344 void __disable_irq(struct irq_desc
*desc
, unsigned int irq
, bool suspend
)
347 if (!desc
->action
|| (desc
->action
->flags
& IRQF_NO_SUSPEND
))
349 desc
->istate
|= IRQS_SUSPENDED
;
356 static int __disable_irq_nosync(unsigned int irq
)
359 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
363 __disable_irq(desc
, irq
, false);
364 irq_put_desc_busunlock(desc
, flags
);
369 * disable_irq_nosync - disable an irq without waiting
370 * @irq: Interrupt to disable
372 * Disable the selected interrupt line. Disables and Enables are
374 * Unlike disable_irq(), this function does not ensure existing
375 * instances of the IRQ handler have completed before returning.
377 * This function may be called from IRQ context.
379 void disable_irq_nosync(unsigned int irq
)
381 __disable_irq_nosync(irq
);
383 EXPORT_SYMBOL(disable_irq_nosync
);
386 * disable_irq - disable an irq and wait for completion
387 * @irq: Interrupt to disable
389 * Disable the selected interrupt line. Enables and Disables are
391 * This function waits for any pending IRQ handlers for this interrupt
392 * to complete before returning. If you use this function while
393 * holding a resource the IRQ handler may need you will deadlock.
395 * This function may be called - with care - from IRQ context.
397 void disable_irq(unsigned int irq
)
399 if (!__disable_irq_nosync(irq
))
400 synchronize_irq(irq
);
402 EXPORT_SYMBOL(disable_irq
);
404 void __enable_irq(struct irq_desc
*desc
, unsigned int irq
, bool resume
)
407 if (!(desc
->istate
& IRQS_SUSPENDED
)) {
410 if (!(desc
->action
->flags
& IRQF_FORCE_RESUME
))
412 /* Pretend that it got disabled ! */
415 desc
->istate
&= ~IRQS_SUSPENDED
;
418 switch (desc
->depth
) {
421 WARN(1, KERN_WARNING
"Unbalanced enable for IRQ %d\n", irq
);
424 if (desc
->istate
& IRQS_SUSPENDED
)
426 /* Prevent probing on this irq: */
427 irq_settings_set_noprobe(desc
);
429 check_irq_resend(desc
, irq
);
438 * enable_irq - enable handling of an irq
439 * @irq: Interrupt to enable
441 * Undoes the effect of one call to disable_irq(). If this
442 * matches the last disable, processing of interrupts on this
443 * IRQ line is re-enabled.
445 * This function may be called from IRQ context only when
446 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
448 void enable_irq(unsigned int irq
)
451 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
455 if (WARN(!desc
->irq_data
.chip
,
456 KERN_ERR
"enable_irq before setup/request_irq: irq %u\n", irq
))
459 __enable_irq(desc
, irq
, false);
461 irq_put_desc_busunlock(desc
, flags
);
463 EXPORT_SYMBOL(enable_irq
);
465 static int set_irq_wake_real(unsigned int irq
, unsigned int on
)
467 struct irq_desc
*desc
= irq_to_desc(irq
);
470 if (irq_desc_get_chip(desc
)->flags
& IRQCHIP_SKIP_SET_WAKE
)
473 if (desc
->irq_data
.chip
->irq_set_wake
)
474 ret
= desc
->irq_data
.chip
->irq_set_wake(&desc
->irq_data
, on
);
480 * irq_set_irq_wake - control irq power management wakeup
481 * @irq: interrupt to control
482 * @on: enable/disable power management wakeup
484 * Enable/disable power management wakeup mode, which is
485 * disabled by default. Enables and disables must match,
486 * just as they match for non-wakeup mode support.
488 * Wakeup mode lets this IRQ wake the system from sleep
489 * states like "suspend to RAM".
491 int irq_set_irq_wake(unsigned int irq
, unsigned int on
)
494 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
500 /* wakeup-capable irqs can be shared between drivers that
501 * don't need to have the same sleep mode behaviors.
504 if (desc
->wake_depth
++ == 0) {
505 ret
= set_irq_wake_real(irq
, on
);
507 desc
->wake_depth
= 0;
509 irqd_set(&desc
->irq_data
, IRQD_WAKEUP_STATE
);
512 if (desc
->wake_depth
== 0) {
513 WARN(1, "Unbalanced IRQ %d wake disable\n", irq
);
514 } else if (--desc
->wake_depth
== 0) {
515 ret
= set_irq_wake_real(irq
, on
);
517 desc
->wake_depth
= 1;
519 irqd_clear(&desc
->irq_data
, IRQD_WAKEUP_STATE
);
522 irq_put_desc_busunlock(desc
, flags
);
525 EXPORT_SYMBOL(irq_set_irq_wake
);
528 * Internal function that tells the architecture code whether a
529 * particular irq has been exclusively allocated or is available
532 int can_request_irq(unsigned int irq
, unsigned long irqflags
)
535 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
541 if (irq_settings_can_request(desc
)) {
543 if (irqflags
& desc
->action
->flags
& IRQF_SHARED
)
546 irq_put_desc_unlock(desc
, flags
);
550 int __irq_set_trigger(struct irq_desc
*desc
, unsigned int irq
,
553 struct irq_chip
*chip
= desc
->irq_data
.chip
;
556 if (!chip
|| !chip
->irq_set_type
) {
558 * IRQF_TRIGGER_* but the PIC does not support multiple
561 pr_debug("No set_type function for IRQ %d (%s)\n", irq
,
562 chip
? (chip
->name
? : "unknown") : "unknown");
566 flags
&= IRQ_TYPE_SENSE_MASK
;
568 if (chip
->flags
& IRQCHIP_SET_TYPE_MASKED
) {
569 if (!irqd_irq_masked(&desc
->irq_data
))
571 if (!irqd_irq_disabled(&desc
->irq_data
))
575 /* caller masked out all except trigger mode flags */
576 ret
= chip
->irq_set_type(&desc
->irq_data
, flags
);
579 case IRQ_SET_MASK_OK
:
580 irqd_clear(&desc
->irq_data
, IRQD_TRIGGER_MASK
);
581 irqd_set(&desc
->irq_data
, flags
);
583 case IRQ_SET_MASK_OK_NOCOPY
:
584 flags
= irqd_get_trigger_type(&desc
->irq_data
);
585 irq_settings_set_trigger_mask(desc
, flags
);
586 irqd_clear(&desc
->irq_data
, IRQD_LEVEL
);
587 irq_settings_clr_level(desc
);
588 if (flags
& IRQ_TYPE_LEVEL_MASK
) {
589 irq_settings_set_level(desc
);
590 irqd_set(&desc
->irq_data
, IRQD_LEVEL
);
596 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
597 flags
, irq
, chip
->irq_set_type
);
605 * Default primary interrupt handler for threaded interrupts. Is
606 * assigned as primary handler when request_threaded_irq is called
607 * with handler == NULL. Useful for oneshot interrupts.
609 static irqreturn_t
irq_default_primary_handler(int irq
, void *dev_id
)
611 return IRQ_WAKE_THREAD
;
615 * Primary handler for nested threaded interrupts. Should never be
618 static irqreturn_t
irq_nested_primary_handler(int irq
, void *dev_id
)
620 WARN(1, "Primary handler called for nested irq %d\n", irq
);
624 static int irq_wait_for_interrupt(struct irqaction
*action
)
626 while (!kthread_should_stop()) {
627 set_current_state(TASK_INTERRUPTIBLE
);
629 if (test_and_clear_bit(IRQTF_RUNTHREAD
,
630 &action
->thread_flags
)) {
631 __set_current_state(TASK_RUNNING
);
640 * Oneshot interrupts keep the irq line masked until the threaded
641 * handler finished. unmask if the interrupt has not been disabled and
644 static void irq_finalize_oneshot(struct irq_desc
*desc
,
645 struct irqaction
*action
, bool force
)
647 if (!(desc
->istate
& IRQS_ONESHOT
))
651 raw_spin_lock_irq(&desc
->lock
);
654 * Implausible though it may be we need to protect us against
655 * the following scenario:
657 * The thread is faster done than the hard interrupt handler
658 * on the other CPU. If we unmask the irq line then the
659 * interrupt can come in again and masks the line, leaves due
660 * to IRQS_INPROGRESS and the irq line is masked forever.
662 * This also serializes the state of shared oneshot handlers
663 * versus "desc->threads_onehsot |= action->thread_mask;" in
664 * irq_wake_thread(). See the comment there which explains the
667 if (unlikely(irqd_irq_inprogress(&desc
->irq_data
))) {
668 raw_spin_unlock_irq(&desc
->lock
);
669 chip_bus_sync_unlock(desc
);
675 * Now check again, whether the thread should run. Otherwise
676 * we would clear the threads_oneshot bit of this thread which
679 if (!force
&& test_bit(IRQTF_RUNTHREAD
, &action
->thread_flags
))
682 desc
->threads_oneshot
&= ~action
->thread_mask
;
684 if (!desc
->threads_oneshot
&& !irqd_irq_disabled(&desc
->irq_data
) &&
685 irqd_irq_masked(&desc
->irq_data
))
689 raw_spin_unlock_irq(&desc
->lock
);
690 chip_bus_sync_unlock(desc
);
695 * Check whether we need to chasnge the affinity of the interrupt thread.
698 irq_thread_check_affinity(struct irq_desc
*desc
, struct irqaction
*action
)
702 if (!test_and_clear_bit(IRQTF_AFFINITY
, &action
->thread_flags
))
706 * In case we are out of memory we set IRQTF_AFFINITY again and
707 * try again next time
709 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
710 set_bit(IRQTF_AFFINITY
, &action
->thread_flags
);
714 raw_spin_lock_irq(&desc
->lock
);
715 cpumask_copy(mask
, desc
->irq_data
.affinity
);
716 raw_spin_unlock_irq(&desc
->lock
);
718 set_cpus_allowed_ptr(current
, mask
);
719 free_cpumask_var(mask
);
723 irq_thread_check_affinity(struct irq_desc
*desc
, struct irqaction
*action
) { }
727 * Interrupts which are not explicitely requested as threaded
728 * interrupts rely on the implicit bh/preempt disable of the hard irq
729 * context. So we need to disable bh here to avoid deadlocks and other
733 irq_forced_thread_fn(struct irq_desc
*desc
, struct irqaction
*action
)
738 ret
= action
->thread_fn(action
->irq
, action
->dev_id
);
739 irq_finalize_oneshot(desc
, action
, false);
745 * Interrupts explicitely requested as threaded interupts want to be
746 * preemtible - many of them need to sleep and wait for slow busses to
749 static irqreturn_t
irq_thread_fn(struct irq_desc
*desc
,
750 struct irqaction
*action
)
754 ret
= action
->thread_fn(action
->irq
, action
->dev_id
);
755 irq_finalize_oneshot(desc
, action
, false);
760 * Interrupt handler thread
762 static int irq_thread(void *data
)
764 static const struct sched_param param
= {
765 .sched_priority
= MAX_USER_RT_PRIO
/2,
767 struct irqaction
*action
= data
;
768 struct irq_desc
*desc
= irq_to_desc(action
->irq
);
769 irqreturn_t (*handler_fn
)(struct irq_desc
*desc
,
770 struct irqaction
*action
);
773 if (force_irqthreads
& test_bit(IRQTF_FORCED_THREAD
,
774 &action
->thread_flags
))
775 handler_fn
= irq_forced_thread_fn
;
777 handler_fn
= irq_thread_fn
;
779 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
780 current
->irqaction
= action
;
782 while (!irq_wait_for_interrupt(action
)) {
784 irq_thread_check_affinity(desc
, action
);
786 atomic_inc(&desc
->threads_active
);
788 raw_spin_lock_irq(&desc
->lock
);
789 if (unlikely(irqd_irq_disabled(&desc
->irq_data
))) {
791 * CHECKME: We might need a dedicated
792 * IRQ_THREAD_PENDING flag here, which
793 * retriggers the thread in check_irq_resend()
794 * but AFAICT IRQS_PENDING should be fine as it
795 * retriggers the interrupt itself --- tglx
797 desc
->istate
|= IRQS_PENDING
;
798 raw_spin_unlock_irq(&desc
->lock
);
800 irqreturn_t action_ret
;
802 raw_spin_unlock_irq(&desc
->lock
);
803 action_ret
= handler_fn(desc
, action
);
805 note_interrupt(action
->irq
, desc
, action_ret
);
808 wake
= atomic_dec_and_test(&desc
->threads_active
);
810 if (wake
&& waitqueue_active(&desc
->wait_for_threads
))
811 wake_up(&desc
->wait_for_threads
);
814 /* Prevent a stale desc->threads_oneshot */
815 irq_finalize_oneshot(desc
, action
, true);
818 * Clear irqaction. Otherwise exit_irq_thread() would make
819 * fuzz about an active irq thread going into nirvana.
821 current
->irqaction
= NULL
;
826 * Called from do_exit()
828 void exit_irq_thread(void)
830 struct task_struct
*tsk
= current
;
831 struct irq_desc
*desc
;
837 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
838 tsk
->comm
? tsk
->comm
: "", tsk
->pid
, tsk
->irqaction
->irq
);
840 desc
= irq_to_desc(tsk
->irqaction
->irq
);
843 * Prevent a stale desc->threads_oneshot. Must be called
844 * before setting the IRQTF_DIED flag.
846 irq_finalize_oneshot(desc
, tsk
->irqaction
, true);
849 * Set the THREAD DIED flag to prevent further wakeups of the
850 * soon to be gone threaded handler.
852 set_bit(IRQTF_DIED
, &tsk
->irqaction
->flags
);
855 static void irq_setup_forced_threading(struct irqaction
*new)
857 if (!force_irqthreads
)
859 if (new->flags
& (IRQF_NO_THREAD
| IRQF_PERCPU
| IRQF_ONESHOT
))
862 new->flags
|= IRQF_ONESHOT
;
864 if (!new->thread_fn
) {
865 set_bit(IRQTF_FORCED_THREAD
, &new->thread_flags
);
866 new->thread_fn
= new->handler
;
867 new->handler
= irq_default_primary_handler
;
872 * Internal function to register an irqaction - typically used to
873 * allocate special interrupts that are part of the architecture.
876 __setup_irq(unsigned int irq
, struct irq_desc
*desc
, struct irqaction
*new)
878 struct irqaction
*old
, **old_ptr
;
879 const char *old_name
= NULL
;
880 unsigned long flags
, thread_mask
= 0;
881 int ret
, nested
, shared
= 0;
887 if (desc
->irq_data
.chip
== &no_irq_chip
)
889 if (!try_module_get(desc
->owner
))
892 * Some drivers like serial.c use request_irq() heavily,
893 * so we have to be careful not to interfere with a
896 if (new->flags
& IRQF_SAMPLE_RANDOM
) {
898 * This function might sleep, we want to call it first,
899 * outside of the atomic block.
900 * Yes, this might clear the entropy pool if the wrong
901 * driver is attempted to be loaded, without actually
902 * installing a new handler, but is this really a problem,
903 * only the sysadmin is able to do this.
905 rand_initialize_irq(irq
);
909 * Check whether the interrupt nests into another interrupt
912 nested
= irq_settings_is_nested_thread(desc
);
914 if (!new->thread_fn
) {
919 * Replace the primary handler which was provided from
920 * the driver for non nested interrupt handling by the
921 * dummy function which warns when called.
923 new->handler
= irq_nested_primary_handler
;
925 if (irq_settings_can_thread(desc
))
926 irq_setup_forced_threading(new);
930 * Create a handler thread when a thread function is supplied
931 * and the interrupt does not nest into another interrupt
934 if (new->thread_fn
&& !nested
) {
935 struct task_struct
*t
;
937 t
= kthread_create(irq_thread
, new, "irq/%d-%s", irq
,
944 * We keep the reference to the task struct even if
945 * the thread dies to avoid that the interrupt code
946 * references an already freed task_struct.
952 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
958 * The following block of code has to be executed atomically
960 raw_spin_lock_irqsave(&desc
->lock
, flags
);
961 old_ptr
= &desc
->action
;
965 * Can't share interrupts unless both agree to and are
966 * the same type (level, edge, polarity). So both flag
967 * fields must have IRQF_SHARED set and the bits which
968 * set the trigger type must match. Also all must
971 if (!((old
->flags
& new->flags
) & IRQF_SHARED
) ||
972 ((old
->flags
^ new->flags
) & IRQF_TRIGGER_MASK
) ||
973 ((old
->flags
^ new->flags
) & IRQF_ONESHOT
)) {
974 old_name
= old
->name
;
978 /* All handlers must agree on per-cpuness */
979 if ((old
->flags
& IRQF_PERCPU
) !=
980 (new->flags
& IRQF_PERCPU
))
983 /* add new interrupt at end of irq queue */
985 thread_mask
|= old
->thread_mask
;
986 old_ptr
= &old
->next
;
993 * Setup the thread mask for this irqaction. Unlikely to have
994 * 32 resp 64 irqs sharing one line, but who knows.
996 if (new->flags
& IRQF_ONESHOT
&& thread_mask
== ~0UL) {
1000 new->thread_mask
= 1 << ffz(thread_mask
);
1003 init_waitqueue_head(&desc
->wait_for_threads
);
1005 /* Setup the type (level, edge polarity) if configured: */
1006 if (new->flags
& IRQF_TRIGGER_MASK
) {
1007 ret
= __irq_set_trigger(desc
, irq
,
1008 new->flags
& IRQF_TRIGGER_MASK
);
1014 desc
->istate
&= ~(IRQS_AUTODETECT
| IRQS_SPURIOUS_DISABLED
| \
1015 IRQS_ONESHOT
| IRQS_WAITING
);
1016 irqd_clear(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
1018 if (new->flags
& IRQF_PERCPU
) {
1019 irqd_set(&desc
->irq_data
, IRQD_PER_CPU
);
1020 irq_settings_set_per_cpu(desc
);
1023 if (new->flags
& IRQF_ONESHOT
)
1024 desc
->istate
|= IRQS_ONESHOT
;
1026 if (irq_settings_can_autoenable(desc
))
1029 /* Undo nested disables: */
1032 /* Exclude IRQ from balancing if requested */
1033 if (new->flags
& IRQF_NOBALANCING
) {
1034 irq_settings_set_no_balancing(desc
);
1035 irqd_set(&desc
->irq_data
, IRQD_NO_BALANCING
);
1038 /* Set default affinity mask once everything is setup */
1039 setup_affinity(irq
, desc
, mask
);
1041 } else if (new->flags
& IRQF_TRIGGER_MASK
) {
1042 unsigned int nmsk
= new->flags
& IRQF_TRIGGER_MASK
;
1043 unsigned int omsk
= irq_settings_get_trigger_mask(desc
);
1046 /* hope the handler works with current trigger mode */
1047 pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1054 /* Reset broken irq detection when installing new handler */
1055 desc
->irq_count
= 0;
1056 desc
->irqs_unhandled
= 0;
1059 * Check whether we disabled the irq via the spurious handler
1060 * before. Reenable it and give it another chance.
1062 if (shared
&& (desc
->istate
& IRQS_SPURIOUS_DISABLED
)) {
1063 desc
->istate
&= ~IRQS_SPURIOUS_DISABLED
;
1064 __enable_irq(desc
, irq
, false);
1067 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1070 * Strictly no need to wake it up, but hung_task complains
1071 * when no hard interrupt wakes the thread up.
1074 wake_up_process(new->thread
);
1076 register_irq_proc(irq
, desc
);
1078 register_handler_proc(irq
, new);
1079 free_cpumask_var(mask
);
1084 #ifdef CONFIG_DEBUG_SHIRQ
1085 if (!(new->flags
& IRQF_PROBE_SHARED
)) {
1086 printk(KERN_ERR
"IRQ handler type mismatch for IRQ %d\n", irq
);
1088 printk(KERN_ERR
"current handler: %s\n", old_name
);
1095 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1096 free_cpumask_var(mask
);
1100 struct task_struct
*t
= new->thread
;
1103 if (likely(!test_bit(IRQTF_DIED
, &new->thread_flags
)))
1108 module_put(desc
->owner
);
1113 * setup_irq - setup an interrupt
1114 * @irq: Interrupt line to setup
1115 * @act: irqaction for the interrupt
1117 * Used to statically setup interrupts in the early boot process.
1119 int setup_irq(unsigned int irq
, struct irqaction
*act
)
1122 struct irq_desc
*desc
= irq_to_desc(irq
);
1124 if (WARN_ON(irq_settings_is_per_cpu_devid(desc
)))
1126 chip_bus_lock(desc
);
1127 retval
= __setup_irq(irq
, desc
, act
);
1128 chip_bus_sync_unlock(desc
);
1132 EXPORT_SYMBOL_GPL(setup_irq
);
1135 * Internal function to unregister an irqaction - used to free
1136 * regular and special interrupts that are part of the architecture.
1138 static struct irqaction
*__free_irq(unsigned int irq
, void *dev_id
)
1140 struct irq_desc
*desc
= irq_to_desc(irq
);
1141 struct irqaction
*action
, **action_ptr
;
1142 unsigned long flags
;
1144 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq
);
1149 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1152 * There can be multiple actions per IRQ descriptor, find the right
1153 * one based on the dev_id:
1155 action_ptr
= &desc
->action
;
1157 action
= *action_ptr
;
1160 WARN(1, "Trying to free already-free IRQ %d\n", irq
);
1161 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1166 if (action
->dev_id
== dev_id
)
1168 action_ptr
= &action
->next
;
1171 /* Found it - now remove it from the list of entries: */
1172 *action_ptr
= action
->next
;
1174 /* Currently used only by UML, might disappear one day: */
1175 #ifdef CONFIG_IRQ_RELEASE_METHOD
1176 if (desc
->irq_data
.chip
->release
)
1177 desc
->irq_data
.chip
->release(irq
, dev_id
);
1180 /* If this was the last handler, shut down the IRQ line: */
1185 /* make sure affinity_hint is cleaned up */
1186 if (WARN_ON_ONCE(desc
->affinity_hint
))
1187 desc
->affinity_hint
= NULL
;
1190 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1192 unregister_handler_proc(irq
, action
);
1194 /* Make sure it's not being used on another CPU: */
1195 synchronize_irq(irq
);
1197 #ifdef CONFIG_DEBUG_SHIRQ
1199 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1200 * event to happen even now it's being freed, so let's make sure that
1201 * is so by doing an extra call to the handler ....
1203 * ( We do this after actually deregistering it, to make sure that a
1204 * 'real' IRQ doesn't run in * parallel with our fake. )
1206 if (action
->flags
& IRQF_SHARED
) {
1207 local_irq_save(flags
);
1208 action
->handler(irq
, dev_id
);
1209 local_irq_restore(flags
);
1213 if (action
->thread
) {
1214 if (!test_bit(IRQTF_DIED
, &action
->thread_flags
))
1215 kthread_stop(action
->thread
);
1216 put_task_struct(action
->thread
);
1219 module_put(desc
->owner
);
1224 * remove_irq - free an interrupt
1225 * @irq: Interrupt line to free
1226 * @act: irqaction for the interrupt
1228 * Used to remove interrupts statically setup by the early boot process.
1230 void remove_irq(unsigned int irq
, struct irqaction
*act
)
1232 struct irq_desc
*desc
= irq_to_desc(irq
);
1234 if (desc
&& !WARN_ON(irq_settings_is_per_cpu_devid(desc
)))
1235 __free_irq(irq
, act
->dev_id
);
1237 EXPORT_SYMBOL_GPL(remove_irq
);
1240 * free_irq - free an interrupt allocated with request_irq
1241 * @irq: Interrupt line to free
1242 * @dev_id: Device identity to free
1244 * Remove an interrupt handler. The handler is removed and if the
1245 * interrupt line is no longer in use by any driver it is disabled.
1246 * On a shared IRQ the caller must ensure the interrupt is disabled
1247 * on the card it drives before calling this function. The function
1248 * does not return until any executing interrupts for this IRQ
1251 * This function must not be called from interrupt context.
1253 void free_irq(unsigned int irq
, void *dev_id
)
1255 struct irq_desc
*desc
= irq_to_desc(irq
);
1257 if (!desc
|| WARN_ON(irq_settings_is_per_cpu_devid(desc
)))
1261 if (WARN_ON(desc
->affinity_notify
))
1262 desc
->affinity_notify
= NULL
;
1265 chip_bus_lock(desc
);
1266 kfree(__free_irq(irq
, dev_id
));
1267 chip_bus_sync_unlock(desc
);
1269 EXPORT_SYMBOL(free_irq
);
1272 * request_threaded_irq - allocate an interrupt line
1273 * @irq: Interrupt line to allocate
1274 * @handler: Function to be called when the IRQ occurs.
1275 * Primary handler for threaded interrupts
1276 * If NULL and thread_fn != NULL the default
1277 * primary handler is installed
1278 * @thread_fn: Function called from the irq handler thread
1279 * If NULL, no irq thread is created
1280 * @irqflags: Interrupt type flags
1281 * @devname: An ascii name for the claiming device
1282 * @dev_id: A cookie passed back to the handler function
1284 * This call allocates interrupt resources and enables the
1285 * interrupt line and IRQ handling. From the point this
1286 * call is made your handler function may be invoked. Since
1287 * your handler function must clear any interrupt the board
1288 * raises, you must take care both to initialise your hardware
1289 * and to set up the interrupt handler in the right order.
1291 * If you want to set up a threaded irq handler for your device
1292 * then you need to supply @handler and @thread_fn. @handler ist
1293 * still called in hard interrupt context and has to check
1294 * whether the interrupt originates from the device. If yes it
1295 * needs to disable the interrupt on the device and return
1296 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1297 * @thread_fn. This split handler design is necessary to support
1298 * shared interrupts.
1300 * Dev_id must be globally unique. Normally the address of the
1301 * device data structure is used as the cookie. Since the handler
1302 * receives this value it makes sense to use it.
1304 * If your interrupt is shared you must pass a non NULL dev_id
1305 * as this is required when freeing the interrupt.
1309 * IRQF_SHARED Interrupt is shared
1310 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1311 * IRQF_TRIGGER_* Specify active edge(s) or level
1314 int request_threaded_irq(unsigned int irq
, irq_handler_t handler
,
1315 irq_handler_t thread_fn
, unsigned long irqflags
,
1316 const char *devname
, void *dev_id
)
1318 struct irqaction
*action
;
1319 struct irq_desc
*desc
;
1323 * Sanity-check: shared interrupts must pass in a real dev-ID,
1324 * otherwise we'll have trouble later trying to figure out
1325 * which interrupt is which (messes up the interrupt freeing
1328 if ((irqflags
& IRQF_SHARED
) && !dev_id
)
1331 desc
= irq_to_desc(irq
);
1335 if (!irq_settings_can_request(desc
) ||
1336 WARN_ON(irq_settings_is_per_cpu_devid(desc
)))
1342 handler
= irq_default_primary_handler
;
1345 action
= kzalloc(sizeof(struct irqaction
), GFP_KERNEL
);
1349 action
->handler
= handler
;
1350 action
->thread_fn
= thread_fn
;
1351 action
->flags
= irqflags
;
1352 action
->name
= devname
;
1353 action
->dev_id
= dev_id
;
1355 chip_bus_lock(desc
);
1356 retval
= __setup_irq(irq
, desc
, action
);
1357 chip_bus_sync_unlock(desc
);
1362 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1363 if (!retval
&& (irqflags
& IRQF_SHARED
)) {
1365 * It's a shared IRQ -- the driver ought to be prepared for it
1366 * to happen immediately, so let's make sure....
1367 * We disable the irq to make sure that a 'real' IRQ doesn't
1368 * run in parallel with our fake.
1370 unsigned long flags
;
1373 local_irq_save(flags
);
1375 handler(irq
, dev_id
);
1377 local_irq_restore(flags
);
1383 EXPORT_SYMBOL(request_threaded_irq
);
1386 * request_any_context_irq - allocate an interrupt line
1387 * @irq: Interrupt line to allocate
1388 * @handler: Function to be called when the IRQ occurs.
1389 * Threaded handler for threaded interrupts.
1390 * @flags: Interrupt type flags
1391 * @name: An ascii name for the claiming device
1392 * @dev_id: A cookie passed back to the handler function
1394 * This call allocates interrupt resources and enables the
1395 * interrupt line and IRQ handling. It selects either a
1396 * hardirq or threaded handling method depending on the
1399 * On failure, it returns a negative value. On success,
1400 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1402 int request_any_context_irq(unsigned int irq
, irq_handler_t handler
,
1403 unsigned long flags
, const char *name
, void *dev_id
)
1405 struct irq_desc
*desc
= irq_to_desc(irq
);
1411 if (irq_settings_is_nested_thread(desc
)) {
1412 ret
= request_threaded_irq(irq
, NULL
, handler
,
1413 flags
, name
, dev_id
);
1414 return !ret
? IRQC_IS_NESTED
: ret
;
1417 ret
= request_irq(irq
, handler
, flags
, name
, dev_id
);
1418 return !ret
? IRQC_IS_HARDIRQ
: ret
;
1420 EXPORT_SYMBOL_GPL(request_any_context_irq
);
1422 void enable_percpu_irq(unsigned int irq
, unsigned int type
)
1424 unsigned int cpu
= smp_processor_id();
1425 unsigned long flags
;
1426 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, IRQ_GET_DESC_CHECK_PERCPU
);
1431 type
&= IRQ_TYPE_SENSE_MASK
;
1432 if (type
!= IRQ_TYPE_NONE
) {
1435 ret
= __irq_set_trigger(desc
, irq
, type
);
1438 WARN(1, "failed to set type for IRQ%d\n", irq
);
1443 irq_percpu_enable(desc
, cpu
);
1445 irq_put_desc_unlock(desc
, flags
);
1448 void disable_percpu_irq(unsigned int irq
)
1450 unsigned int cpu
= smp_processor_id();
1451 unsigned long flags
;
1452 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, IRQ_GET_DESC_CHECK_PERCPU
);
1457 irq_percpu_disable(desc
, cpu
);
1458 irq_put_desc_unlock(desc
, flags
);
1462 * Internal function to unregister a percpu irqaction.
1464 static struct irqaction
*__free_percpu_irq(unsigned int irq
, void __percpu
*dev_id
)
1466 struct irq_desc
*desc
= irq_to_desc(irq
);
1467 struct irqaction
*action
;
1468 unsigned long flags
;
1470 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq
);
1475 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1477 action
= desc
->action
;
1478 if (!action
|| action
->percpu_dev_id
!= dev_id
) {
1479 WARN(1, "Trying to free already-free IRQ %d\n", irq
);
1483 if (!cpumask_empty(desc
->percpu_enabled
)) {
1484 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1485 irq
, cpumask_first(desc
->percpu_enabled
));
1489 /* Found it - now remove it from the list of entries: */
1490 desc
->action
= NULL
;
1492 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1494 unregister_handler_proc(irq
, action
);
1496 module_put(desc
->owner
);
1500 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1505 * remove_percpu_irq - free a per-cpu interrupt
1506 * @irq: Interrupt line to free
1507 * @act: irqaction for the interrupt
1509 * Used to remove interrupts statically setup by the early boot process.
1511 void remove_percpu_irq(unsigned int irq
, struct irqaction
*act
)
1513 struct irq_desc
*desc
= irq_to_desc(irq
);
1515 if (desc
&& irq_settings_is_per_cpu_devid(desc
))
1516 __free_percpu_irq(irq
, act
->percpu_dev_id
);
1520 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1521 * @irq: Interrupt line to free
1522 * @dev_id: Device identity to free
1524 * Remove a percpu interrupt handler. The handler is removed, but
1525 * the interrupt line is not disabled. This must be done on each
1526 * CPU before calling this function. The function does not return
1527 * until any executing interrupts for this IRQ have completed.
1529 * This function must not be called from interrupt context.
1531 void free_percpu_irq(unsigned int irq
, void __percpu
*dev_id
)
1533 struct irq_desc
*desc
= irq_to_desc(irq
);
1535 if (!desc
|| !irq_settings_is_per_cpu_devid(desc
))
1538 chip_bus_lock(desc
);
1539 kfree(__free_percpu_irq(irq
, dev_id
));
1540 chip_bus_sync_unlock(desc
);
1544 * setup_percpu_irq - setup a per-cpu interrupt
1545 * @irq: Interrupt line to setup
1546 * @act: irqaction for the interrupt
1548 * Used to statically setup per-cpu interrupts in the early boot process.
1550 int setup_percpu_irq(unsigned int irq
, struct irqaction
*act
)
1552 struct irq_desc
*desc
= irq_to_desc(irq
);
1555 if (!desc
|| !irq_settings_is_per_cpu_devid(desc
))
1557 chip_bus_lock(desc
);
1558 retval
= __setup_irq(irq
, desc
, act
);
1559 chip_bus_sync_unlock(desc
);
1565 * request_percpu_irq - allocate a percpu interrupt line
1566 * @irq: Interrupt line to allocate
1567 * @handler: Function to be called when the IRQ occurs.
1568 * @devname: An ascii name for the claiming device
1569 * @dev_id: A percpu cookie passed back to the handler function
1571 * This call allocates interrupt resources, but doesn't
1572 * automatically enable the interrupt. It has to be done on each
1573 * CPU using enable_percpu_irq().
1575 * Dev_id must be globally unique. It is a per-cpu variable, and
1576 * the handler gets called with the interrupted CPU's instance of
1579 int request_percpu_irq(unsigned int irq
, irq_handler_t handler
,
1580 const char *devname
, void __percpu
*dev_id
)
1582 struct irqaction
*action
;
1583 struct irq_desc
*desc
;
1589 desc
= irq_to_desc(irq
);
1590 if (!desc
|| !irq_settings_can_request(desc
) ||
1591 !irq_settings_is_per_cpu_devid(desc
))
1594 action
= kzalloc(sizeof(struct irqaction
), GFP_KERNEL
);
1598 action
->handler
= handler
;
1599 action
->flags
= IRQF_PERCPU
;
1600 action
->name
= devname
;
1601 action
->percpu_dev_id
= dev_id
;
1603 chip_bus_lock(desc
);
1604 retval
= __setup_irq(irq
, desc
, action
);
1605 chip_bus_sync_unlock(desc
);