2 * linux/kernel/irq/chip.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code, for irq-chip
10 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
19 #include <trace/events/irq.h>
21 #include "internals.h"
24 * irq_set_chip - set the irq chip for an irq
26 * @chip: pointer to irq chip description structure
28 int irq_set_chip(unsigned int irq
, struct irq_chip
*chip
)
31 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
39 desc
->irq_data
.chip
= chip
;
40 irq_put_desc_unlock(desc
, flags
);
42 * For !CONFIG_SPARSE_IRQ make the irq show up in
48 EXPORT_SYMBOL(irq_set_chip
);
51 * irq_set_type - set the irq trigger type for an irq
53 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
55 int irq_set_irq_type(unsigned int irq
, unsigned int type
)
58 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
64 type
&= IRQ_TYPE_SENSE_MASK
;
65 ret
= __irq_set_trigger(desc
, irq
, type
);
66 irq_put_desc_busunlock(desc
, flags
);
69 EXPORT_SYMBOL(irq_set_irq_type
);
72 * irq_set_handler_data - set irq handler data for an irq
73 * @irq: Interrupt number
74 * @data: Pointer to interrupt specific data
76 * Set the hardware irq controller data for an irq
78 int irq_set_handler_data(unsigned int irq
, void *data
)
81 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
85 desc
->irq_data
.handler_data
= data
;
86 irq_put_desc_unlock(desc
, flags
);
89 EXPORT_SYMBOL(irq_set_handler_data
);
92 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
93 * @irq_base: Interrupt number base
94 * @irq_offset: Interrupt number offset
95 * @entry: Pointer to MSI descriptor data
97 * Set the MSI descriptor entry for an irq at offset
99 int irq_set_msi_desc_off(unsigned int irq_base
, unsigned int irq_offset
,
100 struct msi_desc
*entry
)
103 struct irq_desc
*desc
= irq_get_desc_lock(irq_base
+ irq_offset
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
107 desc
->irq_data
.msi_desc
= entry
;
108 if (entry
&& !irq_offset
)
109 entry
->irq
= irq_base
;
110 irq_put_desc_unlock(desc
, flags
);
115 * irq_set_msi_desc - set MSI descriptor data for an irq
116 * @irq: Interrupt number
117 * @entry: Pointer to MSI descriptor data
119 * Set the MSI descriptor entry for an irq
121 int irq_set_msi_desc(unsigned int irq
, struct msi_desc
*entry
)
123 return irq_set_msi_desc_off(irq
, 0, entry
);
127 * irq_set_chip_data - set irq chip data for an irq
128 * @irq: Interrupt number
129 * @data: Pointer to chip specific data
131 * Set the hardware irq chip data for an irq
133 int irq_set_chip_data(unsigned int irq
, void *data
)
136 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
140 desc
->irq_data
.chip_data
= data
;
141 irq_put_desc_unlock(desc
, flags
);
144 EXPORT_SYMBOL(irq_set_chip_data
);
146 struct irq_data
*irq_get_irq_data(unsigned int irq
)
148 struct irq_desc
*desc
= irq_to_desc(irq
);
150 return desc
? &desc
->irq_data
: NULL
;
152 EXPORT_SYMBOL_GPL(irq_get_irq_data
);
154 static void irq_state_clr_disabled(struct irq_desc
*desc
)
156 irqd_clear(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
159 static void irq_state_set_disabled(struct irq_desc
*desc
)
161 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
164 static void irq_state_clr_masked(struct irq_desc
*desc
)
166 irqd_clear(&desc
->irq_data
, IRQD_IRQ_MASKED
);
169 static void irq_state_set_masked(struct irq_desc
*desc
)
171 irqd_set(&desc
->irq_data
, IRQD_IRQ_MASKED
);
174 int irq_startup(struct irq_desc
*desc
, bool resend
)
178 irq_state_clr_disabled(desc
);
181 if (desc
->irq_data
.chip
->irq_startup
) {
182 ret
= desc
->irq_data
.chip
->irq_startup(&desc
->irq_data
);
183 irq_state_clr_masked(desc
);
188 check_irq_resend(desc
, desc
->irq_data
.irq
);
192 void irq_shutdown(struct irq_desc
*desc
)
194 irq_state_set_disabled(desc
);
196 if (desc
->irq_data
.chip
->irq_shutdown
)
197 desc
->irq_data
.chip
->irq_shutdown(&desc
->irq_data
);
198 else if (desc
->irq_data
.chip
->irq_disable
)
199 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
201 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
202 irq_state_set_masked(desc
);
205 void irq_enable(struct irq_desc
*desc
)
207 irq_state_clr_disabled(desc
);
208 if (desc
->irq_data
.chip
->irq_enable
)
209 desc
->irq_data
.chip
->irq_enable(&desc
->irq_data
);
211 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
212 irq_state_clr_masked(desc
);
216 * irq_disable - Mark interrupt disabled
217 * @desc: irq descriptor which should be disabled
219 * If the chip does not implement the irq_disable callback, we
220 * use a lazy disable approach. That means we mark the interrupt
221 * disabled, but leave the hardware unmasked. That's an
222 * optimization because we avoid the hardware access for the
223 * common case where no interrupt happens after we marked it
224 * disabled. If an interrupt happens, then the interrupt flow
225 * handler masks the line at the hardware level and marks it
228 void irq_disable(struct irq_desc
*desc
)
230 irq_state_set_disabled(desc
);
231 if (desc
->irq_data
.chip
->irq_disable
) {
232 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
233 irq_state_set_masked(desc
);
237 void irq_percpu_enable(struct irq_desc
*desc
, unsigned int cpu
)
239 if (desc
->irq_data
.chip
->irq_enable
)
240 desc
->irq_data
.chip
->irq_enable(&desc
->irq_data
);
242 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
243 cpumask_set_cpu(cpu
, desc
->percpu_enabled
);
246 void irq_percpu_disable(struct irq_desc
*desc
, unsigned int cpu
)
248 if (desc
->irq_data
.chip
->irq_disable
)
249 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
251 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
252 cpumask_clear_cpu(cpu
, desc
->percpu_enabled
);
255 static inline void mask_ack_irq(struct irq_desc
*desc
)
257 if (desc
->irq_data
.chip
->irq_mask_ack
)
258 desc
->irq_data
.chip
->irq_mask_ack(&desc
->irq_data
);
260 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
261 if (desc
->irq_data
.chip
->irq_ack
)
262 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
264 irq_state_set_masked(desc
);
267 void mask_irq(struct irq_desc
*desc
)
269 if (desc
->irq_data
.chip
->irq_mask
) {
270 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
271 irq_state_set_masked(desc
);
275 void unmask_irq(struct irq_desc
*desc
)
277 if (desc
->irq_data
.chip
->irq_unmask
) {
278 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
279 irq_state_clr_masked(desc
);
283 void unmask_threaded_irq(struct irq_desc
*desc
)
285 struct irq_chip
*chip
= desc
->irq_data
.chip
;
287 if (chip
->flags
& IRQCHIP_EOI_THREADED
)
288 chip
->irq_eoi(&desc
->irq_data
);
290 if (chip
->irq_unmask
) {
291 chip
->irq_unmask(&desc
->irq_data
);
292 irq_state_clr_masked(desc
);
297 * handle_nested_irq - Handle a nested irq from a irq thread
298 * @irq: the interrupt number
300 * Handle interrupts which are nested into a threaded interrupt
301 * handler. The handler function is called inside the calling
304 void handle_nested_irq(unsigned int irq
)
306 struct irq_desc
*desc
= irq_to_desc(irq
);
307 struct irqaction
*action
;
308 irqreturn_t action_ret
;
312 raw_spin_lock_irq(&desc
->lock
);
314 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
315 kstat_incr_irqs_this_cpu(irq
, desc
);
317 action
= desc
->action
;
318 if (unlikely(!action
|| irqd_irq_disabled(&desc
->irq_data
))) {
319 desc
->istate
|= IRQS_PENDING
;
323 irqd_set(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
324 raw_spin_unlock_irq(&desc
->lock
);
326 action_ret
= action
->thread_fn(action
->irq
, action
->dev_id
);
328 note_interrupt(irq
, desc
, action_ret
);
330 raw_spin_lock_irq(&desc
->lock
);
331 irqd_clear(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
334 raw_spin_unlock_irq(&desc
->lock
);
336 EXPORT_SYMBOL_GPL(handle_nested_irq
);
338 static bool irq_check_poll(struct irq_desc
*desc
)
340 if (!(desc
->istate
& IRQS_POLL_INPROGRESS
))
342 return irq_wait_for_poll(desc
);
345 static bool irq_may_run(struct irq_desc
*desc
)
347 unsigned int mask
= IRQD_IRQ_INPROGRESS
| IRQD_WAKEUP_ARMED
;
350 * If the interrupt is not in progress and is not an armed
351 * wakeup interrupt, proceed.
353 if (!irqd_has_set(&desc
->irq_data
, mask
))
357 * If the interrupt is an armed wakeup source, mark it pending
358 * and suspended, disable it and notify the pm core about the
361 if (irq_pm_check_wakeup(desc
))
365 * Handle a potential concurrent poll on a different core.
367 return irq_check_poll(desc
);
371 * handle_simple_irq - Simple and software-decoded IRQs.
372 * @irq: the interrupt number
373 * @desc: the interrupt description structure for this irq
375 * Simple interrupts are either sent from a demultiplexing interrupt
376 * handler or come from hardware, where no interrupt hardware control
379 * Note: The caller is expected to handle the ack, clear, mask and
380 * unmask issues if necessary.
383 handle_simple_irq(unsigned int irq
, struct irq_desc
*desc
)
385 raw_spin_lock(&desc
->lock
);
387 if (!irq_may_run(desc
))
390 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
391 kstat_incr_irqs_this_cpu(irq
, desc
);
393 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
394 desc
->istate
|= IRQS_PENDING
;
398 handle_irq_event(desc
);
401 raw_spin_unlock(&desc
->lock
);
403 EXPORT_SYMBOL_GPL(handle_simple_irq
);
406 * Called unconditionally from handle_level_irq() and only for oneshot
407 * interrupts from handle_fasteoi_irq()
409 static void cond_unmask_irq(struct irq_desc
*desc
)
412 * We need to unmask in the following cases:
413 * - Standard level irq (IRQF_ONESHOT is not set)
414 * - Oneshot irq which did not wake the thread (caused by a
415 * spurious interrupt or a primary handler handling it
418 if (!irqd_irq_disabled(&desc
->irq_data
) &&
419 irqd_irq_masked(&desc
->irq_data
) && !desc
->threads_oneshot
)
424 * handle_level_irq - Level type irq handler
425 * @irq: the interrupt number
426 * @desc: the interrupt description structure for this irq
428 * Level type interrupts are active as long as the hardware line has
429 * the active level. This may require to mask the interrupt and unmask
430 * it after the associated handler has acknowledged the device, so the
431 * interrupt line is back to inactive.
434 handle_level_irq(unsigned int irq
, struct irq_desc
*desc
)
436 raw_spin_lock(&desc
->lock
);
439 if (!irq_may_run(desc
))
442 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
443 kstat_incr_irqs_this_cpu(irq
, desc
);
446 * If its disabled or no action available
447 * keep it masked and get out of here
449 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
450 desc
->istate
|= IRQS_PENDING
;
454 handle_irq_event(desc
);
456 cond_unmask_irq(desc
);
459 raw_spin_unlock(&desc
->lock
);
461 EXPORT_SYMBOL_GPL(handle_level_irq
);
463 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
464 static inline void preflow_handler(struct irq_desc
*desc
)
466 if (desc
->preflow_handler
)
467 desc
->preflow_handler(&desc
->irq_data
);
470 static inline void preflow_handler(struct irq_desc
*desc
) { }
473 static void cond_unmask_eoi_irq(struct irq_desc
*desc
, struct irq_chip
*chip
)
475 if (!(desc
->istate
& IRQS_ONESHOT
)) {
476 chip
->irq_eoi(&desc
->irq_data
);
480 * We need to unmask in the following cases:
481 * - Oneshot irq which did not wake the thread (caused by a
482 * spurious interrupt or a primary handler handling it
485 if (!irqd_irq_disabled(&desc
->irq_data
) &&
486 irqd_irq_masked(&desc
->irq_data
) && !desc
->threads_oneshot
) {
487 chip
->irq_eoi(&desc
->irq_data
);
489 } else if (!(chip
->flags
& IRQCHIP_EOI_THREADED
)) {
490 chip
->irq_eoi(&desc
->irq_data
);
495 * handle_fasteoi_irq - irq handler for transparent controllers
496 * @irq: the interrupt number
497 * @desc: the interrupt description structure for this irq
499 * Only a single callback will be issued to the chip: an ->eoi()
500 * call when the interrupt has been serviced. This enables support
501 * for modern forms of interrupt handlers, which handle the flow
502 * details in hardware, transparently.
505 handle_fasteoi_irq(unsigned int irq
, struct irq_desc
*desc
)
507 struct irq_chip
*chip
= desc
->irq_data
.chip
;
509 raw_spin_lock(&desc
->lock
);
511 if (!irq_may_run(desc
))
514 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
515 kstat_incr_irqs_this_cpu(irq
, desc
);
518 * If its disabled or no action available
519 * then mask it and get out of here:
521 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
522 desc
->istate
|= IRQS_PENDING
;
527 if (desc
->istate
& IRQS_ONESHOT
)
530 preflow_handler(desc
);
531 handle_irq_event(desc
);
533 cond_unmask_eoi_irq(desc
, chip
);
535 raw_spin_unlock(&desc
->lock
);
538 if (!(chip
->flags
& IRQCHIP_EOI_IF_HANDLED
))
539 chip
->irq_eoi(&desc
->irq_data
);
540 raw_spin_unlock(&desc
->lock
);
542 EXPORT_SYMBOL_GPL(handle_fasteoi_irq
);
545 * handle_edge_irq - edge type IRQ handler
546 * @irq: the interrupt number
547 * @desc: the interrupt description structure for this irq
549 * Interrupt occures on the falling and/or rising edge of a hardware
550 * signal. The occurrence is latched into the irq controller hardware
551 * and must be acked in order to be reenabled. After the ack another
552 * interrupt can happen on the same source even before the first one
553 * is handled by the associated event handler. If this happens it
554 * might be necessary to disable (mask) the interrupt depending on the
555 * controller hardware. This requires to reenable the interrupt inside
556 * of the loop which handles the interrupts which have arrived while
557 * the handler was running. If all pending interrupts are handled, the
561 handle_edge_irq(unsigned int irq
, struct irq_desc
*desc
)
563 raw_spin_lock(&desc
->lock
);
565 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
567 if (!irq_may_run(desc
)) {
568 desc
->istate
|= IRQS_PENDING
;
574 * If its disabled or no action available then mask it and get
577 if (irqd_irq_disabled(&desc
->irq_data
) || !desc
->action
) {
578 desc
->istate
|= IRQS_PENDING
;
583 kstat_incr_irqs_this_cpu(irq
, desc
);
585 /* Start handling the irq */
586 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
589 if (unlikely(!desc
->action
)) {
595 * When another irq arrived while we were handling
596 * one, we could have masked the irq.
597 * Renable it, if it was not disabled in meantime.
599 if (unlikely(desc
->istate
& IRQS_PENDING
)) {
600 if (!irqd_irq_disabled(&desc
->irq_data
) &&
601 irqd_irq_masked(&desc
->irq_data
))
605 handle_irq_event(desc
);
607 } while ((desc
->istate
& IRQS_PENDING
) &&
608 !irqd_irq_disabled(&desc
->irq_data
));
611 raw_spin_unlock(&desc
->lock
);
613 EXPORT_SYMBOL(handle_edge_irq
);
615 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
617 * handle_edge_eoi_irq - edge eoi type IRQ handler
618 * @irq: the interrupt number
619 * @desc: the interrupt description structure for this irq
621 * Similar as the above handle_edge_irq, but using eoi and w/o the
624 void handle_edge_eoi_irq(unsigned int irq
, struct irq_desc
*desc
)
626 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
628 raw_spin_lock(&desc
->lock
);
630 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
632 if (!irq_may_run(desc
)) {
633 desc
->istate
|= IRQS_PENDING
;
638 * If its disabled or no action available then mask it and get
641 if (irqd_irq_disabled(&desc
->irq_data
) || !desc
->action
) {
642 desc
->istate
|= IRQS_PENDING
;
646 kstat_incr_irqs_this_cpu(irq
, desc
);
649 if (unlikely(!desc
->action
))
652 handle_irq_event(desc
);
654 } while ((desc
->istate
& IRQS_PENDING
) &&
655 !irqd_irq_disabled(&desc
->irq_data
));
658 chip
->irq_eoi(&desc
->irq_data
);
659 raw_spin_unlock(&desc
->lock
);
664 * handle_percpu_irq - Per CPU local irq handler
665 * @irq: the interrupt number
666 * @desc: the interrupt description structure for this irq
668 * Per CPU interrupts on SMP machines without locking requirements
671 handle_percpu_irq(unsigned int irq
, struct irq_desc
*desc
)
673 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
675 kstat_incr_irqs_this_cpu(irq
, desc
);
678 chip
->irq_ack(&desc
->irq_data
);
680 handle_irq_event_percpu(desc
, desc
->action
);
683 chip
->irq_eoi(&desc
->irq_data
);
687 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
688 * @irq: the interrupt number
689 * @desc: the interrupt description structure for this irq
691 * Per CPU interrupts on SMP machines without locking requirements. Same as
692 * handle_percpu_irq() above but with the following extras:
694 * action->percpu_dev_id is a pointer to percpu variables which
695 * contain the real device id for the cpu on which this handler is
698 void handle_percpu_devid_irq(unsigned int irq
, struct irq_desc
*desc
)
700 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
701 struct irqaction
*action
= desc
->action
;
702 void *dev_id
= raw_cpu_ptr(action
->percpu_dev_id
);
705 kstat_incr_irqs_this_cpu(irq
, desc
);
708 chip
->irq_ack(&desc
->irq_data
);
710 trace_irq_handler_entry(irq
, action
);
711 res
= action
->handler(irq
, dev_id
);
712 trace_irq_handler_exit(irq
, action
, res
);
715 chip
->irq_eoi(&desc
->irq_data
);
719 __irq_set_handler(unsigned int irq
, irq_flow_handler_t handle
, int is_chained
,
723 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, 0);
729 handle
= handle_bad_irq
;
731 if (WARN_ON(desc
->irq_data
.chip
== &no_irq_chip
))
736 if (handle
== handle_bad_irq
) {
737 if (desc
->irq_data
.chip
!= &no_irq_chip
)
739 irq_state_set_disabled(desc
);
742 desc
->handle_irq
= handle
;
745 if (handle
!= handle_bad_irq
&& is_chained
) {
746 irq_settings_set_noprobe(desc
);
747 irq_settings_set_norequest(desc
);
748 irq_settings_set_nothread(desc
);
749 irq_startup(desc
, true);
752 irq_put_desc_busunlock(desc
, flags
);
754 EXPORT_SYMBOL_GPL(__irq_set_handler
);
757 irq_set_chip_and_handler_name(unsigned int irq
, struct irq_chip
*chip
,
758 irq_flow_handler_t handle
, const char *name
)
760 irq_set_chip(irq
, chip
);
761 __irq_set_handler(irq
, handle
, 0, name
);
763 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name
);
765 void irq_modify_status(unsigned int irq
, unsigned long clr
, unsigned long set
)
768 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
772 irq_settings_clr_and_set(desc
, clr
, set
);
774 irqd_clear(&desc
->irq_data
, IRQD_NO_BALANCING
| IRQD_PER_CPU
|
775 IRQD_TRIGGER_MASK
| IRQD_LEVEL
| IRQD_MOVE_PCNTXT
);
776 if (irq_settings_has_no_balance_set(desc
))
777 irqd_set(&desc
->irq_data
, IRQD_NO_BALANCING
);
778 if (irq_settings_is_per_cpu(desc
))
779 irqd_set(&desc
->irq_data
, IRQD_PER_CPU
);
780 if (irq_settings_can_move_pcntxt(desc
))
781 irqd_set(&desc
->irq_data
, IRQD_MOVE_PCNTXT
);
782 if (irq_settings_is_level(desc
))
783 irqd_set(&desc
->irq_data
, IRQD_LEVEL
);
785 irqd_set(&desc
->irq_data
, irq_settings_get_trigger_mask(desc
));
787 irq_put_desc_unlock(desc
, flags
);
789 EXPORT_SYMBOL_GPL(irq_modify_status
);
792 * irq_cpu_online - Invoke all irq_cpu_online functions.
794 * Iterate through all irqs and invoke the chip.irq_cpu_online()
797 void irq_cpu_online(void)
799 struct irq_desc
*desc
;
800 struct irq_chip
*chip
;
804 for_each_active_irq(irq
) {
805 desc
= irq_to_desc(irq
);
809 raw_spin_lock_irqsave(&desc
->lock
, flags
);
811 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
812 if (chip
&& chip
->irq_cpu_online
&&
813 (!(chip
->flags
& IRQCHIP_ONOFFLINE_ENABLED
) ||
814 !irqd_irq_disabled(&desc
->irq_data
)))
815 chip
->irq_cpu_online(&desc
->irq_data
);
817 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
822 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
824 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
827 void irq_cpu_offline(void)
829 struct irq_desc
*desc
;
830 struct irq_chip
*chip
;
834 for_each_active_irq(irq
) {
835 desc
= irq_to_desc(irq
);
839 raw_spin_lock_irqsave(&desc
->lock
, flags
);
841 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
842 if (chip
&& chip
->irq_cpu_offline
&&
843 (!(chip
->flags
& IRQCHIP_ONOFFLINE_ENABLED
) ||
844 !irqd_irq_disabled(&desc
->irq_data
)))
845 chip
->irq_cpu_offline(&desc
->irq_data
);
847 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);