x86/xen: resume timer irqs early
[linux/fpc-iii.git] / kernel / irq / manage.c
blob75a976a8ed58b576dff587e7bf86b763418fcfc3
1 /*
2 * linux/kernel/irq/manage.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
7 * This file contains driver APIs to the irq subsystem.
8 */
10 #define pr_fmt(fmt) "genirq: " fmt
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/task_work.h>
22 #include "internals.h"
24 #ifdef CONFIG_IRQ_FORCED_THREADING
25 __read_mostly bool force_irqthreads;
27 static int __init setup_forced_irqthreads(char *arg)
29 force_irqthreads = true;
30 return 0;
32 early_param("threadirqs", setup_forced_irqthreads);
33 #endif
35 /**
36 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
37 * @irq: interrupt number to wait for
39 * This function waits for any pending IRQ handlers for this interrupt
40 * to complete before returning. If you use this function while
41 * holding a resource the IRQ handler may need you will deadlock.
43 * This function may be called - with care - from IRQ context.
45 void synchronize_irq(unsigned int irq)
47 struct irq_desc *desc = irq_to_desc(irq);
48 bool inprogress;
50 if (!desc)
51 return;
53 do {
54 unsigned long flags;
57 * Wait until we're out of the critical section. This might
58 * give the wrong answer due to the lack of memory barriers.
60 while (irqd_irq_inprogress(&desc->irq_data))
61 cpu_relax();
63 /* Ok, that indicated we're done: double-check carefully. */
64 raw_spin_lock_irqsave(&desc->lock, flags);
65 inprogress = irqd_irq_inprogress(&desc->irq_data);
66 raw_spin_unlock_irqrestore(&desc->lock, flags);
68 /* Oops, that failed? */
69 } while (inprogress);
72 * We made sure that no hardirq handler is running. Now verify
73 * that no threaded handlers are active.
75 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
77 EXPORT_SYMBOL(synchronize_irq);
79 #ifdef CONFIG_SMP
80 cpumask_var_t irq_default_affinity;
82 /**
83 * irq_can_set_affinity - Check if the affinity of a given irq can be set
84 * @irq: Interrupt to check
87 int irq_can_set_affinity(unsigned int irq)
89 struct irq_desc *desc = irq_to_desc(irq);
91 if (!desc || !irqd_can_balance(&desc->irq_data) ||
92 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
93 return 0;
95 return 1;
98 /**
99 * irq_set_thread_affinity - Notify irq threads to adjust affinity
100 * @desc: irq descriptor which has affitnity changed
102 * We just set IRQTF_AFFINITY and delegate the affinity setting
103 * to the interrupt thread itself. We can not call
104 * set_cpus_allowed_ptr() here as we hold desc->lock and this
105 * code can be called from hard interrupt context.
107 void irq_set_thread_affinity(struct irq_desc *desc)
109 struct irqaction *action = desc->action;
111 while (action) {
112 if (action->thread)
113 set_bit(IRQTF_AFFINITY, &action->thread_flags);
114 action = action->next;
118 #ifdef CONFIG_GENERIC_PENDING_IRQ
119 static inline bool irq_can_move_pcntxt(struct irq_data *data)
121 return irqd_can_move_in_process_context(data);
123 static inline bool irq_move_pending(struct irq_data *data)
125 return irqd_is_setaffinity_pending(data);
127 static inline void
128 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
130 cpumask_copy(desc->pending_mask, mask);
132 static inline void
133 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
135 cpumask_copy(mask, desc->pending_mask);
137 #else
138 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
139 static inline bool irq_move_pending(struct irq_data *data) { return false; }
140 static inline void
141 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
142 static inline void
143 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
144 #endif
146 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
147 bool force)
149 struct irq_desc *desc = irq_data_to_desc(data);
150 struct irq_chip *chip = irq_data_get_irq_chip(data);
151 int ret;
153 ret = chip->irq_set_affinity(data, mask, force);
154 switch (ret) {
155 case IRQ_SET_MASK_OK:
156 cpumask_copy(data->affinity, mask);
157 case IRQ_SET_MASK_OK_NOCOPY:
158 irq_set_thread_affinity(desc);
159 ret = 0;
162 return ret;
165 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
166 bool force)
168 struct irq_chip *chip = irq_data_get_irq_chip(data);
169 struct irq_desc *desc = irq_data_to_desc(data);
170 int ret = 0;
172 if (!chip || !chip->irq_set_affinity)
173 return -EINVAL;
175 if (irq_can_move_pcntxt(data)) {
176 ret = irq_do_set_affinity(data, mask, force);
177 } else {
178 irqd_set_move_pending(data);
179 irq_copy_pending(desc, mask);
182 if (desc->affinity_notify) {
183 kref_get(&desc->affinity_notify->kref);
184 schedule_work(&desc->affinity_notify->work);
186 irqd_set(data, IRQD_AFFINITY_SET);
188 return ret;
191 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
193 struct irq_desc *desc = irq_to_desc(irq);
194 unsigned long flags;
195 int ret;
197 if (!desc)
198 return -EINVAL;
200 raw_spin_lock_irqsave(&desc->lock, flags);
201 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
202 raw_spin_unlock_irqrestore(&desc->lock, flags);
203 return ret;
206 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
208 unsigned long flags;
209 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
211 if (!desc)
212 return -EINVAL;
213 desc->affinity_hint = m;
214 irq_put_desc_unlock(desc, flags);
215 return 0;
217 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
219 static void irq_affinity_notify(struct work_struct *work)
221 struct irq_affinity_notify *notify =
222 container_of(work, struct irq_affinity_notify, work);
223 struct irq_desc *desc = irq_to_desc(notify->irq);
224 cpumask_var_t cpumask;
225 unsigned long flags;
227 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
228 goto out;
230 raw_spin_lock_irqsave(&desc->lock, flags);
231 if (irq_move_pending(&desc->irq_data))
232 irq_get_pending(cpumask, desc);
233 else
234 cpumask_copy(cpumask, desc->irq_data.affinity);
235 raw_spin_unlock_irqrestore(&desc->lock, flags);
237 notify->notify(notify, cpumask);
239 free_cpumask_var(cpumask);
240 out:
241 kref_put(&notify->kref, notify->release);
245 * irq_set_affinity_notifier - control notification of IRQ affinity changes
246 * @irq: Interrupt for which to enable/disable notification
247 * @notify: Context for notification, or %NULL to disable
248 * notification. Function pointers must be initialised;
249 * the other fields will be initialised by this function.
251 * Must be called in process context. Notification may only be enabled
252 * after the IRQ is allocated and must be disabled before the IRQ is
253 * freed using free_irq().
256 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
258 struct irq_desc *desc = irq_to_desc(irq);
259 struct irq_affinity_notify *old_notify;
260 unsigned long flags;
262 /* The release function is promised process context */
263 might_sleep();
265 if (!desc)
266 return -EINVAL;
268 /* Complete initialisation of *notify */
269 if (notify) {
270 notify->irq = irq;
271 kref_init(&notify->kref);
272 INIT_WORK(&notify->work, irq_affinity_notify);
275 raw_spin_lock_irqsave(&desc->lock, flags);
276 old_notify = desc->affinity_notify;
277 desc->affinity_notify = notify;
278 raw_spin_unlock_irqrestore(&desc->lock, flags);
280 if (old_notify)
281 kref_put(&old_notify->kref, old_notify->release);
283 return 0;
285 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
287 #ifndef CONFIG_AUTO_IRQ_AFFINITY
289 * Generic version of the affinity autoselector.
291 static int
292 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
294 struct cpumask *set = irq_default_affinity;
295 int node = desc->irq_data.node;
297 /* Excludes PER_CPU and NO_BALANCE interrupts */
298 if (!irq_can_set_affinity(irq))
299 return 0;
302 * Preserve an userspace affinity setup, but make sure that
303 * one of the targets is online.
305 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
306 if (cpumask_intersects(desc->irq_data.affinity,
307 cpu_online_mask))
308 set = desc->irq_data.affinity;
309 else
310 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
313 cpumask_and(mask, cpu_online_mask, set);
314 if (node != NUMA_NO_NODE) {
315 const struct cpumask *nodemask = cpumask_of_node(node);
317 /* make sure at least one of the cpus in nodemask is online */
318 if (cpumask_intersects(mask, nodemask))
319 cpumask_and(mask, mask, nodemask);
321 irq_do_set_affinity(&desc->irq_data, mask, false);
322 return 0;
324 #else
325 static inline int
326 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
328 return irq_select_affinity(irq);
330 #endif
333 * Called when affinity is set via /proc/irq
335 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
337 struct irq_desc *desc = irq_to_desc(irq);
338 unsigned long flags;
339 int ret;
341 raw_spin_lock_irqsave(&desc->lock, flags);
342 ret = setup_affinity(irq, desc, mask);
343 raw_spin_unlock_irqrestore(&desc->lock, flags);
344 return ret;
347 #else
348 static inline int
349 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
351 return 0;
353 #endif
355 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
357 if (suspend) {
358 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
359 return;
360 desc->istate |= IRQS_SUSPENDED;
363 if (!desc->depth++)
364 irq_disable(desc);
367 static int __disable_irq_nosync(unsigned int irq)
369 unsigned long flags;
370 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
372 if (!desc)
373 return -EINVAL;
374 __disable_irq(desc, irq, false);
375 irq_put_desc_busunlock(desc, flags);
376 return 0;
380 * disable_irq_nosync - disable an irq without waiting
381 * @irq: Interrupt to disable
383 * Disable the selected interrupt line. Disables and Enables are
384 * nested.
385 * Unlike disable_irq(), this function does not ensure existing
386 * instances of the IRQ handler have completed before returning.
388 * This function may be called from IRQ context.
390 void disable_irq_nosync(unsigned int irq)
392 __disable_irq_nosync(irq);
394 EXPORT_SYMBOL(disable_irq_nosync);
397 * disable_irq - disable an irq and wait for completion
398 * @irq: Interrupt to disable
400 * Disable the selected interrupt line. Enables and Disables are
401 * nested.
402 * This function waits for any pending IRQ handlers for this interrupt
403 * to complete before returning. If you use this function while
404 * holding a resource the IRQ handler may need you will deadlock.
406 * This function may be called - with care - from IRQ context.
408 void disable_irq(unsigned int irq)
410 if (!__disable_irq_nosync(irq))
411 synchronize_irq(irq);
413 EXPORT_SYMBOL(disable_irq);
415 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
417 if (resume) {
418 if (!(desc->istate & IRQS_SUSPENDED)) {
419 if (!desc->action)
420 return;
421 if (!(desc->action->flags & IRQF_FORCE_RESUME))
422 return;
423 /* Pretend that it got disabled ! */
424 desc->depth++;
426 desc->istate &= ~IRQS_SUSPENDED;
429 switch (desc->depth) {
430 case 0:
431 err_out:
432 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
433 break;
434 case 1: {
435 if (desc->istate & IRQS_SUSPENDED)
436 goto err_out;
437 /* Prevent probing on this irq: */
438 irq_settings_set_noprobe(desc);
439 irq_enable(desc);
440 check_irq_resend(desc, irq);
441 /* fall-through */
443 default:
444 desc->depth--;
449 * enable_irq - enable handling of an irq
450 * @irq: Interrupt to enable
452 * Undoes the effect of one call to disable_irq(). If this
453 * matches the last disable, processing of interrupts on this
454 * IRQ line is re-enabled.
456 * This function may be called from IRQ context only when
457 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
459 void enable_irq(unsigned int irq)
461 unsigned long flags;
462 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
464 if (!desc)
465 return;
466 if (WARN(!desc->irq_data.chip,
467 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
468 goto out;
470 __enable_irq(desc, irq, false);
471 out:
472 irq_put_desc_busunlock(desc, flags);
474 EXPORT_SYMBOL(enable_irq);
476 static int set_irq_wake_real(unsigned int irq, unsigned int on)
478 struct irq_desc *desc = irq_to_desc(irq);
479 int ret = -ENXIO;
481 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
482 return 0;
484 if (desc->irq_data.chip->irq_set_wake)
485 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
487 return ret;
491 * irq_set_irq_wake - control irq power management wakeup
492 * @irq: interrupt to control
493 * @on: enable/disable power management wakeup
495 * Enable/disable power management wakeup mode, which is
496 * disabled by default. Enables and disables must match,
497 * just as they match for non-wakeup mode support.
499 * Wakeup mode lets this IRQ wake the system from sleep
500 * states like "suspend to RAM".
502 int irq_set_irq_wake(unsigned int irq, unsigned int on)
504 unsigned long flags;
505 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
506 int ret = 0;
508 if (!desc)
509 return -EINVAL;
511 /* wakeup-capable irqs can be shared between drivers that
512 * don't need to have the same sleep mode behaviors.
514 if (on) {
515 if (desc->wake_depth++ == 0) {
516 ret = set_irq_wake_real(irq, on);
517 if (ret)
518 desc->wake_depth = 0;
519 else
520 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
522 } else {
523 if (desc->wake_depth == 0) {
524 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
525 } else if (--desc->wake_depth == 0) {
526 ret = set_irq_wake_real(irq, on);
527 if (ret)
528 desc->wake_depth = 1;
529 else
530 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
533 irq_put_desc_busunlock(desc, flags);
534 return ret;
536 EXPORT_SYMBOL(irq_set_irq_wake);
539 * Internal function that tells the architecture code whether a
540 * particular irq has been exclusively allocated or is available
541 * for driver use.
543 int can_request_irq(unsigned int irq, unsigned long irqflags)
545 unsigned long flags;
546 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
547 int canrequest = 0;
549 if (!desc)
550 return 0;
552 if (irq_settings_can_request(desc)) {
553 if (!desc->action ||
554 irqflags & desc->action->flags & IRQF_SHARED)
555 canrequest = 1;
557 irq_put_desc_unlock(desc, flags);
558 return canrequest;
561 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
562 unsigned long flags)
564 struct irq_chip *chip = desc->irq_data.chip;
565 int ret, unmask = 0;
567 if (!chip || !chip->irq_set_type) {
569 * IRQF_TRIGGER_* but the PIC does not support multiple
570 * flow-types?
572 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
573 chip ? (chip->name ? : "unknown") : "unknown");
574 return 0;
577 flags &= IRQ_TYPE_SENSE_MASK;
579 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
580 if (!irqd_irq_masked(&desc->irq_data))
581 mask_irq(desc);
582 if (!irqd_irq_disabled(&desc->irq_data))
583 unmask = 1;
586 /* caller masked out all except trigger mode flags */
587 ret = chip->irq_set_type(&desc->irq_data, flags);
589 switch (ret) {
590 case IRQ_SET_MASK_OK:
591 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
592 irqd_set(&desc->irq_data, flags);
594 case IRQ_SET_MASK_OK_NOCOPY:
595 flags = irqd_get_trigger_type(&desc->irq_data);
596 irq_settings_set_trigger_mask(desc, flags);
597 irqd_clear(&desc->irq_data, IRQD_LEVEL);
598 irq_settings_clr_level(desc);
599 if (flags & IRQ_TYPE_LEVEL_MASK) {
600 irq_settings_set_level(desc);
601 irqd_set(&desc->irq_data, IRQD_LEVEL);
604 ret = 0;
605 break;
606 default:
607 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
608 flags, irq, chip->irq_set_type);
610 if (unmask)
611 unmask_irq(desc);
612 return ret;
615 #ifdef CONFIG_HARDIRQS_SW_RESEND
616 int irq_set_parent(int irq, int parent_irq)
618 unsigned long flags;
619 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
621 if (!desc)
622 return -EINVAL;
624 desc->parent_irq = parent_irq;
626 irq_put_desc_unlock(desc, flags);
627 return 0;
629 #endif
632 * Default primary interrupt handler for threaded interrupts. Is
633 * assigned as primary handler when request_threaded_irq is called
634 * with handler == NULL. Useful for oneshot interrupts.
636 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
638 return IRQ_WAKE_THREAD;
642 * Primary handler for nested threaded interrupts. Should never be
643 * called.
645 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
647 WARN(1, "Primary handler called for nested irq %d\n", irq);
648 return IRQ_NONE;
651 static int irq_wait_for_interrupt(struct irqaction *action)
653 set_current_state(TASK_INTERRUPTIBLE);
655 while (!kthread_should_stop()) {
657 if (test_and_clear_bit(IRQTF_RUNTHREAD,
658 &action->thread_flags)) {
659 __set_current_state(TASK_RUNNING);
660 return 0;
662 schedule();
663 set_current_state(TASK_INTERRUPTIBLE);
665 __set_current_state(TASK_RUNNING);
666 return -1;
670 * Oneshot interrupts keep the irq line masked until the threaded
671 * handler finished. unmask if the interrupt has not been disabled and
672 * is marked MASKED.
674 static void irq_finalize_oneshot(struct irq_desc *desc,
675 struct irqaction *action)
677 if (!(desc->istate & IRQS_ONESHOT))
678 return;
679 again:
680 chip_bus_lock(desc);
681 raw_spin_lock_irq(&desc->lock);
684 * Implausible though it may be we need to protect us against
685 * the following scenario:
687 * The thread is faster done than the hard interrupt handler
688 * on the other CPU. If we unmask the irq line then the
689 * interrupt can come in again and masks the line, leaves due
690 * to IRQS_INPROGRESS and the irq line is masked forever.
692 * This also serializes the state of shared oneshot handlers
693 * versus "desc->threads_onehsot |= action->thread_mask;" in
694 * irq_wake_thread(). See the comment there which explains the
695 * serialization.
697 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
698 raw_spin_unlock_irq(&desc->lock);
699 chip_bus_sync_unlock(desc);
700 cpu_relax();
701 goto again;
705 * Now check again, whether the thread should run. Otherwise
706 * we would clear the threads_oneshot bit of this thread which
707 * was just set.
709 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
710 goto out_unlock;
712 desc->threads_oneshot &= ~action->thread_mask;
714 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
715 irqd_irq_masked(&desc->irq_data))
716 unmask_irq(desc);
718 out_unlock:
719 raw_spin_unlock_irq(&desc->lock);
720 chip_bus_sync_unlock(desc);
723 #ifdef CONFIG_SMP
725 * Check whether we need to chasnge the affinity of the interrupt thread.
727 static void
728 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
730 cpumask_var_t mask;
731 bool valid = true;
733 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
734 return;
737 * In case we are out of memory we set IRQTF_AFFINITY again and
738 * try again next time
740 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
741 set_bit(IRQTF_AFFINITY, &action->thread_flags);
742 return;
745 raw_spin_lock_irq(&desc->lock);
747 * This code is triggered unconditionally. Check the affinity
748 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
750 if (desc->irq_data.affinity)
751 cpumask_copy(mask, desc->irq_data.affinity);
752 else
753 valid = false;
754 raw_spin_unlock_irq(&desc->lock);
756 if (valid)
757 set_cpus_allowed_ptr(current, mask);
758 free_cpumask_var(mask);
760 #else
761 static inline void
762 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
763 #endif
766 * Interrupts which are not explicitely requested as threaded
767 * interrupts rely on the implicit bh/preempt disable of the hard irq
768 * context. So we need to disable bh here to avoid deadlocks and other
769 * side effects.
771 static irqreturn_t
772 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
774 irqreturn_t ret;
776 local_bh_disable();
777 ret = action->thread_fn(action->irq, action->dev_id);
778 irq_finalize_oneshot(desc, action);
779 local_bh_enable();
780 return ret;
784 * Interrupts explicitely requested as threaded interupts want to be
785 * preemtible - many of them need to sleep and wait for slow busses to
786 * complete.
788 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
789 struct irqaction *action)
791 irqreturn_t ret;
793 ret = action->thread_fn(action->irq, action->dev_id);
794 irq_finalize_oneshot(desc, action);
795 return ret;
798 static void wake_threads_waitq(struct irq_desc *desc)
800 if (atomic_dec_and_test(&desc->threads_active))
801 wake_up(&desc->wait_for_threads);
804 static void irq_thread_dtor(struct callback_head *unused)
806 struct task_struct *tsk = current;
807 struct irq_desc *desc;
808 struct irqaction *action;
810 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
811 return;
813 action = kthread_data(tsk);
815 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
816 tsk->comm, tsk->pid, action->irq);
819 desc = irq_to_desc(action->irq);
821 * If IRQTF_RUNTHREAD is set, we need to decrement
822 * desc->threads_active and wake possible waiters.
824 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
825 wake_threads_waitq(desc);
827 /* Prevent a stale desc->threads_oneshot */
828 irq_finalize_oneshot(desc, action);
832 * Interrupt handler thread
834 static int irq_thread(void *data)
836 struct callback_head on_exit_work;
837 struct irqaction *action = data;
838 struct irq_desc *desc = irq_to_desc(action->irq);
839 irqreturn_t (*handler_fn)(struct irq_desc *desc,
840 struct irqaction *action);
842 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
843 &action->thread_flags))
844 handler_fn = irq_forced_thread_fn;
845 else
846 handler_fn = irq_thread_fn;
848 init_task_work(&on_exit_work, irq_thread_dtor);
849 task_work_add(current, &on_exit_work, false);
851 irq_thread_check_affinity(desc, action);
853 while (!irq_wait_for_interrupt(action)) {
854 irqreturn_t action_ret;
856 irq_thread_check_affinity(desc, action);
858 action_ret = handler_fn(desc, action);
859 if (action_ret == IRQ_HANDLED)
860 atomic_inc(&desc->threads_handled);
862 wake_threads_waitq(desc);
866 * This is the regular exit path. __free_irq() is stopping the
867 * thread via kthread_stop() after calling
868 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
869 * oneshot mask bit can be set. We cannot verify that as we
870 * cannot touch the oneshot mask at this point anymore as
871 * __setup_irq() might have given out currents thread_mask
872 * again.
874 task_work_cancel(current, irq_thread_dtor);
875 return 0;
878 static void irq_setup_forced_threading(struct irqaction *new)
880 if (!force_irqthreads)
881 return;
882 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
883 return;
885 new->flags |= IRQF_ONESHOT;
887 if (!new->thread_fn) {
888 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
889 new->thread_fn = new->handler;
890 new->handler = irq_default_primary_handler;
895 * Internal function to register an irqaction - typically used to
896 * allocate special interrupts that are part of the architecture.
898 static int
899 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
901 struct irqaction *old, **old_ptr;
902 unsigned long flags, thread_mask = 0;
903 int ret, nested, shared = 0;
904 cpumask_var_t mask;
906 if (!desc)
907 return -EINVAL;
909 if (desc->irq_data.chip == &no_irq_chip)
910 return -ENOSYS;
911 if (!try_module_get(desc->owner))
912 return -ENODEV;
915 * Check whether the interrupt nests into another interrupt
916 * thread.
918 nested = irq_settings_is_nested_thread(desc);
919 if (nested) {
920 if (!new->thread_fn) {
921 ret = -EINVAL;
922 goto out_mput;
925 * Replace the primary handler which was provided from
926 * the driver for non nested interrupt handling by the
927 * dummy function which warns when called.
929 new->handler = irq_nested_primary_handler;
930 } else {
931 if (irq_settings_can_thread(desc))
932 irq_setup_forced_threading(new);
936 * Create a handler thread when a thread function is supplied
937 * and the interrupt does not nest into another interrupt
938 * thread.
940 if (new->thread_fn && !nested) {
941 struct task_struct *t;
942 static const struct sched_param param = {
943 .sched_priority = MAX_USER_RT_PRIO/2,
946 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
947 new->name);
948 if (IS_ERR(t)) {
949 ret = PTR_ERR(t);
950 goto out_mput;
953 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
956 * We keep the reference to the task struct even if
957 * the thread dies to avoid that the interrupt code
958 * references an already freed task_struct.
960 get_task_struct(t);
961 new->thread = t;
963 * Tell the thread to set its affinity. This is
964 * important for shared interrupt handlers as we do
965 * not invoke setup_affinity() for the secondary
966 * handlers as everything is already set up. Even for
967 * interrupts marked with IRQF_NO_BALANCE this is
968 * correct as we want the thread to move to the cpu(s)
969 * on which the requesting code placed the interrupt.
971 set_bit(IRQTF_AFFINITY, &new->thread_flags);
974 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
975 ret = -ENOMEM;
976 goto out_thread;
980 * Drivers are often written to work w/o knowledge about the
981 * underlying irq chip implementation, so a request for a
982 * threaded irq without a primary hard irq context handler
983 * requires the ONESHOT flag to be set. Some irq chips like
984 * MSI based interrupts are per se one shot safe. Check the
985 * chip flags, so we can avoid the unmask dance at the end of
986 * the threaded handler for those.
988 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
989 new->flags &= ~IRQF_ONESHOT;
992 * The following block of code has to be executed atomically
994 raw_spin_lock_irqsave(&desc->lock, flags);
995 old_ptr = &desc->action;
996 old = *old_ptr;
997 if (old) {
999 * Can't share interrupts unless both agree to and are
1000 * the same type (level, edge, polarity). So both flag
1001 * fields must have IRQF_SHARED set and the bits which
1002 * set the trigger type must match. Also all must
1003 * agree on ONESHOT.
1005 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1006 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1007 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1008 goto mismatch;
1010 /* All handlers must agree on per-cpuness */
1011 if ((old->flags & IRQF_PERCPU) !=
1012 (new->flags & IRQF_PERCPU))
1013 goto mismatch;
1015 /* add new interrupt at end of irq queue */
1016 do {
1018 * Or all existing action->thread_mask bits,
1019 * so we can find the next zero bit for this
1020 * new action.
1022 thread_mask |= old->thread_mask;
1023 old_ptr = &old->next;
1024 old = *old_ptr;
1025 } while (old);
1026 shared = 1;
1030 * Setup the thread mask for this irqaction for ONESHOT. For
1031 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1032 * conditional in irq_wake_thread().
1034 if (new->flags & IRQF_ONESHOT) {
1036 * Unlikely to have 32 resp 64 irqs sharing one line,
1037 * but who knows.
1039 if (thread_mask == ~0UL) {
1040 ret = -EBUSY;
1041 goto out_mask;
1044 * The thread_mask for the action is or'ed to
1045 * desc->thread_active to indicate that the
1046 * IRQF_ONESHOT thread handler has been woken, but not
1047 * yet finished. The bit is cleared when a thread
1048 * completes. When all threads of a shared interrupt
1049 * line have completed desc->threads_active becomes
1050 * zero and the interrupt line is unmasked. See
1051 * handle.c:irq_wake_thread() for further information.
1053 * If no thread is woken by primary (hard irq context)
1054 * interrupt handlers, then desc->threads_active is
1055 * also checked for zero to unmask the irq line in the
1056 * affected hard irq flow handlers
1057 * (handle_[fasteoi|level]_irq).
1059 * The new action gets the first zero bit of
1060 * thread_mask assigned. See the loop above which or's
1061 * all existing action->thread_mask bits.
1063 new->thread_mask = 1 << ffz(thread_mask);
1065 } else if (new->handler == irq_default_primary_handler &&
1066 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1068 * The interrupt was requested with handler = NULL, so
1069 * we use the default primary handler for it. But it
1070 * does not have the oneshot flag set. In combination
1071 * with level interrupts this is deadly, because the
1072 * default primary handler just wakes the thread, then
1073 * the irq lines is reenabled, but the device still
1074 * has the level irq asserted. Rinse and repeat....
1076 * While this works for edge type interrupts, we play
1077 * it safe and reject unconditionally because we can't
1078 * say for sure which type this interrupt really
1079 * has. The type flags are unreliable as the
1080 * underlying chip implementation can override them.
1082 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1083 irq);
1084 ret = -EINVAL;
1085 goto out_mask;
1088 if (!shared) {
1089 init_waitqueue_head(&desc->wait_for_threads);
1091 /* Setup the type (level, edge polarity) if configured: */
1092 if (new->flags & IRQF_TRIGGER_MASK) {
1093 ret = __irq_set_trigger(desc, irq,
1094 new->flags & IRQF_TRIGGER_MASK);
1096 if (ret)
1097 goto out_mask;
1100 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1101 IRQS_ONESHOT | IRQS_WAITING);
1102 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1104 if (new->flags & IRQF_PERCPU) {
1105 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1106 irq_settings_set_per_cpu(desc);
1109 if (new->flags & IRQF_ONESHOT)
1110 desc->istate |= IRQS_ONESHOT;
1112 if (irq_settings_can_autoenable(desc))
1113 irq_startup(desc, true);
1114 else
1115 /* Undo nested disables: */
1116 desc->depth = 1;
1118 /* Exclude IRQ from balancing if requested */
1119 if (new->flags & IRQF_NOBALANCING) {
1120 irq_settings_set_no_balancing(desc);
1121 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1124 /* Set default affinity mask once everything is setup */
1125 setup_affinity(irq, desc, mask);
1127 } else if (new->flags & IRQF_TRIGGER_MASK) {
1128 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1129 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1131 if (nmsk != omsk)
1132 /* hope the handler works with current trigger mode */
1133 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1134 irq, nmsk, omsk);
1137 new->irq = irq;
1138 *old_ptr = new;
1140 /* Reset broken irq detection when installing new handler */
1141 desc->irq_count = 0;
1142 desc->irqs_unhandled = 0;
1145 * Check whether we disabled the irq via the spurious handler
1146 * before. Reenable it and give it another chance.
1148 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1149 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1150 __enable_irq(desc, irq, false);
1153 raw_spin_unlock_irqrestore(&desc->lock, flags);
1156 * Strictly no need to wake it up, but hung_task complains
1157 * when no hard interrupt wakes the thread up.
1159 if (new->thread)
1160 wake_up_process(new->thread);
1162 register_irq_proc(irq, desc);
1163 new->dir = NULL;
1164 register_handler_proc(irq, new);
1165 free_cpumask_var(mask);
1167 return 0;
1169 mismatch:
1170 if (!(new->flags & IRQF_PROBE_SHARED)) {
1171 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1172 irq, new->flags, new->name, old->flags, old->name);
1173 #ifdef CONFIG_DEBUG_SHIRQ
1174 dump_stack();
1175 #endif
1177 ret = -EBUSY;
1179 out_mask:
1180 raw_spin_unlock_irqrestore(&desc->lock, flags);
1181 free_cpumask_var(mask);
1183 out_thread:
1184 if (new->thread) {
1185 struct task_struct *t = new->thread;
1187 new->thread = NULL;
1188 kthread_stop(t);
1189 put_task_struct(t);
1191 out_mput:
1192 module_put(desc->owner);
1193 return ret;
1197 * setup_irq - setup an interrupt
1198 * @irq: Interrupt line to setup
1199 * @act: irqaction for the interrupt
1201 * Used to statically setup interrupts in the early boot process.
1203 int setup_irq(unsigned int irq, struct irqaction *act)
1205 int retval;
1206 struct irq_desc *desc = irq_to_desc(irq);
1208 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1209 return -EINVAL;
1210 chip_bus_lock(desc);
1211 retval = __setup_irq(irq, desc, act);
1212 chip_bus_sync_unlock(desc);
1214 return retval;
1216 EXPORT_SYMBOL_GPL(setup_irq);
1219 * Internal function to unregister an irqaction - used to free
1220 * regular and special interrupts that are part of the architecture.
1222 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1224 struct irq_desc *desc = irq_to_desc(irq);
1225 struct irqaction *action, **action_ptr;
1226 unsigned long flags;
1228 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1230 if (!desc)
1231 return NULL;
1233 raw_spin_lock_irqsave(&desc->lock, flags);
1236 * There can be multiple actions per IRQ descriptor, find the right
1237 * one based on the dev_id:
1239 action_ptr = &desc->action;
1240 for (;;) {
1241 action = *action_ptr;
1243 if (!action) {
1244 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1245 raw_spin_unlock_irqrestore(&desc->lock, flags);
1247 return NULL;
1250 if (action->dev_id == dev_id)
1251 break;
1252 action_ptr = &action->next;
1255 /* Found it - now remove it from the list of entries: */
1256 *action_ptr = action->next;
1258 /* If this was the last handler, shut down the IRQ line: */
1259 if (!desc->action)
1260 irq_shutdown(desc);
1262 #ifdef CONFIG_SMP
1263 /* make sure affinity_hint is cleaned up */
1264 if (WARN_ON_ONCE(desc->affinity_hint))
1265 desc->affinity_hint = NULL;
1266 #endif
1268 raw_spin_unlock_irqrestore(&desc->lock, flags);
1270 unregister_handler_proc(irq, action);
1272 /* Make sure it's not being used on another CPU: */
1273 synchronize_irq(irq);
1275 #ifdef CONFIG_DEBUG_SHIRQ
1277 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1278 * event to happen even now it's being freed, so let's make sure that
1279 * is so by doing an extra call to the handler ....
1281 * ( We do this after actually deregistering it, to make sure that a
1282 * 'real' IRQ doesn't run in * parallel with our fake. )
1284 if (action->flags & IRQF_SHARED) {
1285 local_irq_save(flags);
1286 action->handler(irq, dev_id);
1287 local_irq_restore(flags);
1289 #endif
1291 if (action->thread) {
1292 kthread_stop(action->thread);
1293 put_task_struct(action->thread);
1296 module_put(desc->owner);
1297 return action;
1301 * remove_irq - free an interrupt
1302 * @irq: Interrupt line to free
1303 * @act: irqaction for the interrupt
1305 * Used to remove interrupts statically setup by the early boot process.
1307 void remove_irq(unsigned int irq, struct irqaction *act)
1309 struct irq_desc *desc = irq_to_desc(irq);
1311 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1312 __free_irq(irq, act->dev_id);
1314 EXPORT_SYMBOL_GPL(remove_irq);
1317 * free_irq - free an interrupt allocated with request_irq
1318 * @irq: Interrupt line to free
1319 * @dev_id: Device identity to free
1321 * Remove an interrupt handler. The handler is removed and if the
1322 * interrupt line is no longer in use by any driver it is disabled.
1323 * On a shared IRQ the caller must ensure the interrupt is disabled
1324 * on the card it drives before calling this function. The function
1325 * does not return until any executing interrupts for this IRQ
1326 * have completed.
1328 * This function must not be called from interrupt context.
1330 void free_irq(unsigned int irq, void *dev_id)
1332 struct irq_desc *desc = irq_to_desc(irq);
1334 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1335 return;
1337 #ifdef CONFIG_SMP
1338 if (WARN_ON(desc->affinity_notify))
1339 desc->affinity_notify = NULL;
1340 #endif
1342 chip_bus_lock(desc);
1343 kfree(__free_irq(irq, dev_id));
1344 chip_bus_sync_unlock(desc);
1346 EXPORT_SYMBOL(free_irq);
1349 * request_threaded_irq - allocate an interrupt line
1350 * @irq: Interrupt line to allocate
1351 * @handler: Function to be called when the IRQ occurs.
1352 * Primary handler for threaded interrupts
1353 * If NULL and thread_fn != NULL the default
1354 * primary handler is installed
1355 * @thread_fn: Function called from the irq handler thread
1356 * If NULL, no irq thread is created
1357 * @irqflags: Interrupt type flags
1358 * @devname: An ascii name for the claiming device
1359 * @dev_id: A cookie passed back to the handler function
1361 * This call allocates interrupt resources and enables the
1362 * interrupt line and IRQ handling. From the point this
1363 * call is made your handler function may be invoked. Since
1364 * your handler function must clear any interrupt the board
1365 * raises, you must take care both to initialise your hardware
1366 * and to set up the interrupt handler in the right order.
1368 * If you want to set up a threaded irq handler for your device
1369 * then you need to supply @handler and @thread_fn. @handler is
1370 * still called in hard interrupt context and has to check
1371 * whether the interrupt originates from the device. If yes it
1372 * needs to disable the interrupt on the device and return
1373 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1374 * @thread_fn. This split handler design is necessary to support
1375 * shared interrupts.
1377 * Dev_id must be globally unique. Normally the address of the
1378 * device data structure is used as the cookie. Since the handler
1379 * receives this value it makes sense to use it.
1381 * If your interrupt is shared you must pass a non NULL dev_id
1382 * as this is required when freeing the interrupt.
1384 * Flags:
1386 * IRQF_SHARED Interrupt is shared
1387 * IRQF_TRIGGER_* Specify active edge(s) or level
1390 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1391 irq_handler_t thread_fn, unsigned long irqflags,
1392 const char *devname, void *dev_id)
1394 struct irqaction *action;
1395 struct irq_desc *desc;
1396 int retval;
1399 * Sanity-check: shared interrupts must pass in a real dev-ID,
1400 * otherwise we'll have trouble later trying to figure out
1401 * which interrupt is which (messes up the interrupt freeing
1402 * logic etc).
1404 if ((irqflags & IRQF_SHARED) && !dev_id)
1405 return -EINVAL;
1407 desc = irq_to_desc(irq);
1408 if (!desc)
1409 return -EINVAL;
1411 if (!irq_settings_can_request(desc) ||
1412 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1413 return -EINVAL;
1415 if (!handler) {
1416 if (!thread_fn)
1417 return -EINVAL;
1418 handler = irq_default_primary_handler;
1421 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1422 if (!action)
1423 return -ENOMEM;
1425 action->handler = handler;
1426 action->thread_fn = thread_fn;
1427 action->flags = irqflags;
1428 action->name = devname;
1429 action->dev_id = dev_id;
1431 chip_bus_lock(desc);
1432 retval = __setup_irq(irq, desc, action);
1433 chip_bus_sync_unlock(desc);
1435 if (retval)
1436 kfree(action);
1438 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1439 if (!retval && (irqflags & IRQF_SHARED)) {
1441 * It's a shared IRQ -- the driver ought to be prepared for it
1442 * to happen immediately, so let's make sure....
1443 * We disable the irq to make sure that a 'real' IRQ doesn't
1444 * run in parallel with our fake.
1446 unsigned long flags;
1448 disable_irq(irq);
1449 local_irq_save(flags);
1451 handler(irq, dev_id);
1453 local_irq_restore(flags);
1454 enable_irq(irq);
1456 #endif
1457 return retval;
1459 EXPORT_SYMBOL(request_threaded_irq);
1462 * request_any_context_irq - allocate an interrupt line
1463 * @irq: Interrupt line to allocate
1464 * @handler: Function to be called when the IRQ occurs.
1465 * Threaded handler for threaded interrupts.
1466 * @flags: Interrupt type flags
1467 * @name: An ascii name for the claiming device
1468 * @dev_id: A cookie passed back to the handler function
1470 * This call allocates interrupt resources and enables the
1471 * interrupt line and IRQ handling. It selects either a
1472 * hardirq or threaded handling method depending on the
1473 * context.
1475 * On failure, it returns a negative value. On success,
1476 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1478 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1479 unsigned long flags, const char *name, void *dev_id)
1481 struct irq_desc *desc = irq_to_desc(irq);
1482 int ret;
1484 if (!desc)
1485 return -EINVAL;
1487 if (irq_settings_is_nested_thread(desc)) {
1488 ret = request_threaded_irq(irq, NULL, handler,
1489 flags, name, dev_id);
1490 return !ret ? IRQC_IS_NESTED : ret;
1493 ret = request_irq(irq, handler, flags, name, dev_id);
1494 return !ret ? IRQC_IS_HARDIRQ : ret;
1496 EXPORT_SYMBOL_GPL(request_any_context_irq);
1498 void enable_percpu_irq(unsigned int irq, unsigned int type)
1500 unsigned int cpu = smp_processor_id();
1501 unsigned long flags;
1502 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1504 if (!desc)
1505 return;
1507 type &= IRQ_TYPE_SENSE_MASK;
1508 if (type != IRQ_TYPE_NONE) {
1509 int ret;
1511 ret = __irq_set_trigger(desc, irq, type);
1513 if (ret) {
1514 WARN(1, "failed to set type for IRQ%d\n", irq);
1515 goto out;
1519 irq_percpu_enable(desc, cpu);
1520 out:
1521 irq_put_desc_unlock(desc, flags);
1523 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1525 void disable_percpu_irq(unsigned int irq)
1527 unsigned int cpu = smp_processor_id();
1528 unsigned long flags;
1529 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1531 if (!desc)
1532 return;
1534 irq_percpu_disable(desc, cpu);
1535 irq_put_desc_unlock(desc, flags);
1537 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1540 * Internal function to unregister a percpu irqaction.
1542 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1544 struct irq_desc *desc = irq_to_desc(irq);
1545 struct irqaction *action;
1546 unsigned long flags;
1548 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1550 if (!desc)
1551 return NULL;
1553 raw_spin_lock_irqsave(&desc->lock, flags);
1555 action = desc->action;
1556 if (!action || action->percpu_dev_id != dev_id) {
1557 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1558 goto bad;
1561 if (!cpumask_empty(desc->percpu_enabled)) {
1562 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1563 irq, cpumask_first(desc->percpu_enabled));
1564 goto bad;
1567 /* Found it - now remove it from the list of entries: */
1568 desc->action = NULL;
1570 raw_spin_unlock_irqrestore(&desc->lock, flags);
1572 unregister_handler_proc(irq, action);
1574 module_put(desc->owner);
1575 return action;
1577 bad:
1578 raw_spin_unlock_irqrestore(&desc->lock, flags);
1579 return NULL;
1583 * remove_percpu_irq - free a per-cpu interrupt
1584 * @irq: Interrupt line to free
1585 * @act: irqaction for the interrupt
1587 * Used to remove interrupts statically setup by the early boot process.
1589 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1591 struct irq_desc *desc = irq_to_desc(irq);
1593 if (desc && irq_settings_is_per_cpu_devid(desc))
1594 __free_percpu_irq(irq, act->percpu_dev_id);
1598 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1599 * @irq: Interrupt line to free
1600 * @dev_id: Device identity to free
1602 * Remove a percpu interrupt handler. The handler is removed, but
1603 * the interrupt line is not disabled. This must be done on each
1604 * CPU before calling this function. The function does not return
1605 * until any executing interrupts for this IRQ have completed.
1607 * This function must not be called from interrupt context.
1609 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1611 struct irq_desc *desc = irq_to_desc(irq);
1613 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1614 return;
1616 chip_bus_lock(desc);
1617 kfree(__free_percpu_irq(irq, dev_id));
1618 chip_bus_sync_unlock(desc);
1622 * setup_percpu_irq - setup a per-cpu interrupt
1623 * @irq: Interrupt line to setup
1624 * @act: irqaction for the interrupt
1626 * Used to statically setup per-cpu interrupts in the early boot process.
1628 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1630 struct irq_desc *desc = irq_to_desc(irq);
1631 int retval;
1633 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1634 return -EINVAL;
1635 chip_bus_lock(desc);
1636 retval = __setup_irq(irq, desc, act);
1637 chip_bus_sync_unlock(desc);
1639 return retval;
1643 * request_percpu_irq - allocate a percpu interrupt line
1644 * @irq: Interrupt line to allocate
1645 * @handler: Function to be called when the IRQ occurs.
1646 * @devname: An ascii name for the claiming device
1647 * @dev_id: A percpu cookie passed back to the handler function
1649 * This call allocates interrupt resources, but doesn't
1650 * automatically enable the interrupt. It has to be done on each
1651 * CPU using enable_percpu_irq().
1653 * Dev_id must be globally unique. It is a per-cpu variable, and
1654 * the handler gets called with the interrupted CPU's instance of
1655 * that variable.
1657 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1658 const char *devname, void __percpu *dev_id)
1660 struct irqaction *action;
1661 struct irq_desc *desc;
1662 int retval;
1664 if (!dev_id)
1665 return -EINVAL;
1667 desc = irq_to_desc(irq);
1668 if (!desc || !irq_settings_can_request(desc) ||
1669 !irq_settings_is_per_cpu_devid(desc))
1670 return -EINVAL;
1672 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1673 if (!action)
1674 return -ENOMEM;
1676 action->handler = handler;
1677 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1678 action->name = devname;
1679 action->percpu_dev_id = dev_id;
1681 chip_bus_lock(desc);
1682 retval = __setup_irq(irq, desc, action);
1683 chip_bus_sync_unlock(desc);
1685 if (retval)
1686 kfree(action);
1688 return retval;