2 * linux/kernel/irq/chip.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code, for irq-chip
10 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
19 #include <trace/events/irq.h>
21 #include "internals.h"
24 * irq_set_chip - set the irq chip for an irq
26 * @chip: pointer to irq chip description structure
28 int irq_set_chip(unsigned int irq
, struct irq_chip
*chip
)
31 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
39 desc
->irq_data
.chip
= chip
;
40 irq_put_desc_unlock(desc
, flags
);
42 * For !CONFIG_SPARSE_IRQ make the irq show up in
48 EXPORT_SYMBOL(irq_set_chip
);
51 * irq_set_type - set the irq trigger type for an irq
53 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
55 int irq_set_irq_type(unsigned int irq
, unsigned int type
)
58 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
64 type
&= IRQ_TYPE_SENSE_MASK
;
65 ret
= __irq_set_trigger(desc
, irq
, type
);
66 irq_put_desc_busunlock(desc
, flags
);
69 EXPORT_SYMBOL(irq_set_irq_type
);
72 * irq_set_handler_data - set irq handler data for an irq
73 * @irq: Interrupt number
74 * @data: Pointer to interrupt specific data
76 * Set the hardware irq controller data for an irq
78 int irq_set_handler_data(unsigned int irq
, void *data
)
81 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
85 desc
->irq_data
.handler_data
= data
;
86 irq_put_desc_unlock(desc
, flags
);
89 EXPORT_SYMBOL(irq_set_handler_data
);
92 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
93 * @irq_base: Interrupt number base
94 * @irq_offset: Interrupt number offset
95 * @entry: Pointer to MSI descriptor data
97 * Set the MSI descriptor entry for an irq at offset
99 int irq_set_msi_desc_off(unsigned int irq_base
, unsigned int irq_offset
,
100 struct msi_desc
*entry
)
103 struct irq_desc
*desc
= irq_get_desc_lock(irq_base
+ irq_offset
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
107 desc
->irq_data
.msi_desc
= entry
;
108 if (entry
&& !irq_offset
)
109 entry
->irq
= irq_base
;
110 irq_put_desc_unlock(desc
, flags
);
115 * irq_set_msi_desc - set MSI descriptor data for an irq
116 * @irq: Interrupt number
117 * @entry: Pointer to MSI descriptor data
119 * Set the MSI descriptor entry for an irq
121 int irq_set_msi_desc(unsigned int irq
, struct msi_desc
*entry
)
123 return irq_set_msi_desc_off(irq
, 0, entry
);
127 * irq_set_chip_data - set irq chip data for an irq
128 * @irq: Interrupt number
129 * @data: Pointer to chip specific data
131 * Set the hardware irq chip data for an irq
133 int irq_set_chip_data(unsigned int irq
, void *data
)
136 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
140 desc
->irq_data
.chip_data
= data
;
141 irq_put_desc_unlock(desc
, flags
);
144 EXPORT_SYMBOL(irq_set_chip_data
);
146 struct irq_data
*irq_get_irq_data(unsigned int irq
)
148 struct irq_desc
*desc
= irq_to_desc(irq
);
150 return desc
? &desc
->irq_data
: NULL
;
152 EXPORT_SYMBOL_GPL(irq_get_irq_data
);
154 static void irq_state_clr_disabled(struct irq_desc
*desc
)
156 irqd_clear(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
159 static void irq_state_set_disabled(struct irq_desc
*desc
)
161 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
164 static void irq_state_clr_masked(struct irq_desc
*desc
)
166 irqd_clear(&desc
->irq_data
, IRQD_IRQ_MASKED
);
169 static void irq_state_set_masked(struct irq_desc
*desc
)
171 irqd_set(&desc
->irq_data
, IRQD_IRQ_MASKED
);
174 int irq_startup(struct irq_desc
*desc
, bool resend
)
178 irq_state_clr_disabled(desc
);
181 if (desc
->irq_data
.chip
->irq_startup
) {
182 ret
= desc
->irq_data
.chip
->irq_startup(&desc
->irq_data
);
183 irq_state_clr_masked(desc
);
188 check_irq_resend(desc
, desc
->irq_data
.irq
);
192 void irq_shutdown(struct irq_desc
*desc
)
194 irq_state_set_disabled(desc
);
196 if (desc
->irq_data
.chip
->irq_shutdown
)
197 desc
->irq_data
.chip
->irq_shutdown(&desc
->irq_data
);
198 else if (desc
->irq_data
.chip
->irq_disable
)
199 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
201 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
202 irq_state_set_masked(desc
);
205 void irq_enable(struct irq_desc
*desc
)
207 irq_state_clr_disabled(desc
);
208 if (desc
->irq_data
.chip
->irq_enable
)
209 desc
->irq_data
.chip
->irq_enable(&desc
->irq_data
);
211 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
212 irq_state_clr_masked(desc
);
216 * irq_disable - Mark interrupt disabled
217 * @desc: irq descriptor which should be disabled
219 * If the chip does not implement the irq_disable callback, we
220 * use a lazy disable approach. That means we mark the interrupt
221 * disabled, but leave the hardware unmasked. That's an
222 * optimization because we avoid the hardware access for the
223 * common case where no interrupt happens after we marked it
224 * disabled. If an interrupt happens, then the interrupt flow
225 * handler masks the line at the hardware level and marks it
228 void irq_disable(struct irq_desc
*desc
)
230 irq_state_set_disabled(desc
);
231 if (desc
->irq_data
.chip
->irq_disable
) {
232 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
233 irq_state_set_masked(desc
);
237 void irq_percpu_enable(struct irq_desc
*desc
, unsigned int cpu
)
239 if (desc
->irq_data
.chip
->irq_enable
)
240 desc
->irq_data
.chip
->irq_enable(&desc
->irq_data
);
242 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
243 cpumask_set_cpu(cpu
, desc
->percpu_enabled
);
246 void irq_percpu_disable(struct irq_desc
*desc
, unsigned int cpu
)
248 if (desc
->irq_data
.chip
->irq_disable
)
249 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
251 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
252 cpumask_clear_cpu(cpu
, desc
->percpu_enabled
);
255 static inline void mask_ack_irq(struct irq_desc
*desc
)
257 if (desc
->irq_data
.chip
->irq_mask_ack
)
258 desc
->irq_data
.chip
->irq_mask_ack(&desc
->irq_data
);
260 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
261 if (desc
->irq_data
.chip
->irq_ack
)
262 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
264 irq_state_set_masked(desc
);
267 void mask_irq(struct irq_desc
*desc
)
269 if (desc
->irq_data
.chip
->irq_mask
) {
270 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
271 irq_state_set_masked(desc
);
275 void unmask_irq(struct irq_desc
*desc
)
277 if (desc
->irq_data
.chip
->irq_unmask
) {
278 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
279 irq_state_clr_masked(desc
);
283 void unmask_threaded_irq(struct irq_desc
*desc
)
285 struct irq_chip
*chip
= desc
->irq_data
.chip
;
287 if (chip
->flags
& IRQCHIP_EOI_THREADED
)
288 chip
->irq_eoi(&desc
->irq_data
);
290 if (chip
->irq_unmask
) {
291 chip
->irq_unmask(&desc
->irq_data
);
292 irq_state_clr_masked(desc
);
297 * handle_nested_irq - Handle a nested irq from a irq thread
298 * @irq: the interrupt number
300 * Handle interrupts which are nested into a threaded interrupt
301 * handler. The handler function is called inside the calling
304 void handle_nested_irq(unsigned int irq
)
306 struct irq_desc
*desc
= irq_to_desc(irq
);
307 struct irqaction
*action
;
308 irqreturn_t action_ret
;
312 raw_spin_lock_irq(&desc
->lock
);
314 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
315 kstat_incr_irqs_this_cpu(irq
, desc
);
317 action
= desc
->action
;
318 if (unlikely(!action
|| irqd_irq_disabled(&desc
->irq_data
))) {
319 desc
->istate
|= IRQS_PENDING
;
323 irqd_set(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
324 raw_spin_unlock_irq(&desc
->lock
);
326 action_ret
= action
->thread_fn(action
->irq
, action
->dev_id
);
328 note_interrupt(irq
, desc
, action_ret
);
330 raw_spin_lock_irq(&desc
->lock
);
331 irqd_clear(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
334 raw_spin_unlock_irq(&desc
->lock
);
336 EXPORT_SYMBOL_GPL(handle_nested_irq
);
338 static bool irq_check_poll(struct irq_desc
*desc
)
340 if (!(desc
->istate
& IRQS_POLL_INPROGRESS
))
342 return irq_wait_for_poll(desc
);
346 * handle_simple_irq - Simple and software-decoded IRQs.
347 * @irq: the interrupt number
348 * @desc: the interrupt description structure for this irq
350 * Simple interrupts are either sent from a demultiplexing interrupt
351 * handler or come from hardware, where no interrupt hardware control
354 * Note: The caller is expected to handle the ack, clear, mask and
355 * unmask issues if necessary.
358 handle_simple_irq(unsigned int irq
, struct irq_desc
*desc
)
360 raw_spin_lock(&desc
->lock
);
362 if (unlikely(irqd_irq_inprogress(&desc
->irq_data
)))
363 if (!irq_check_poll(desc
))
366 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
367 kstat_incr_irqs_this_cpu(irq
, desc
);
369 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
370 desc
->istate
|= IRQS_PENDING
;
374 handle_irq_event(desc
);
377 raw_spin_unlock(&desc
->lock
);
379 EXPORT_SYMBOL_GPL(handle_simple_irq
);
382 * Called unconditionally from handle_level_irq() and only for oneshot
383 * interrupts from handle_fasteoi_irq()
385 static void cond_unmask_irq(struct irq_desc
*desc
)
388 * We need to unmask in the following cases:
389 * - Standard level irq (IRQF_ONESHOT is not set)
390 * - Oneshot irq which did not wake the thread (caused by a
391 * spurious interrupt or a primary handler handling it
394 if (!irqd_irq_disabled(&desc
->irq_data
) &&
395 irqd_irq_masked(&desc
->irq_data
) && !desc
->threads_oneshot
)
400 * handle_level_irq - Level type irq handler
401 * @irq: the interrupt number
402 * @desc: the interrupt description structure for this irq
404 * Level type interrupts are active as long as the hardware line has
405 * the active level. This may require to mask the interrupt and unmask
406 * it after the associated handler has acknowledged the device, so the
407 * interrupt line is back to inactive.
410 handle_level_irq(unsigned int irq
, struct irq_desc
*desc
)
412 raw_spin_lock(&desc
->lock
);
415 if (unlikely(irqd_irq_inprogress(&desc
->irq_data
)))
416 if (!irq_check_poll(desc
))
419 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
420 kstat_incr_irqs_this_cpu(irq
, desc
);
423 * If its disabled or no action available
424 * keep it masked and get out of here
426 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
427 desc
->istate
|= IRQS_PENDING
;
431 handle_irq_event(desc
);
433 cond_unmask_irq(desc
);
436 raw_spin_unlock(&desc
->lock
);
438 EXPORT_SYMBOL_GPL(handle_level_irq
);
440 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
441 static inline void preflow_handler(struct irq_desc
*desc
)
443 if (desc
->preflow_handler
)
444 desc
->preflow_handler(&desc
->irq_data
);
447 static inline void preflow_handler(struct irq_desc
*desc
) { }
450 static void cond_unmask_eoi_irq(struct irq_desc
*desc
, struct irq_chip
*chip
)
452 if (!(desc
->istate
& IRQS_ONESHOT
)) {
453 chip
->irq_eoi(&desc
->irq_data
);
457 * We need to unmask in the following cases:
458 * - Oneshot irq which did not wake the thread (caused by a
459 * spurious interrupt or a primary handler handling it
462 if (!irqd_irq_disabled(&desc
->irq_data
) &&
463 irqd_irq_masked(&desc
->irq_data
) && !desc
->threads_oneshot
) {
464 chip
->irq_eoi(&desc
->irq_data
);
466 } else if (!(chip
->flags
& IRQCHIP_EOI_THREADED
)) {
467 chip
->irq_eoi(&desc
->irq_data
);
472 * handle_fasteoi_irq - irq handler for transparent controllers
473 * @irq: the interrupt number
474 * @desc: the interrupt description structure for this irq
476 * Only a single callback will be issued to the chip: an ->eoi()
477 * call when the interrupt has been serviced. This enables support
478 * for modern forms of interrupt handlers, which handle the flow
479 * details in hardware, transparently.
482 handle_fasteoi_irq(unsigned int irq
, struct irq_desc
*desc
)
484 struct irq_chip
*chip
= desc
->irq_data
.chip
;
486 raw_spin_lock(&desc
->lock
);
488 if (unlikely(irqd_irq_inprogress(&desc
->irq_data
)))
489 if (!irq_check_poll(desc
))
492 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
493 kstat_incr_irqs_this_cpu(irq
, desc
);
496 * If its disabled or no action available
497 * then mask it and get out of here:
499 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
500 desc
->istate
|= IRQS_PENDING
;
505 if (desc
->istate
& IRQS_ONESHOT
)
508 preflow_handler(desc
);
509 handle_irq_event(desc
);
511 cond_unmask_eoi_irq(desc
, chip
);
513 raw_spin_unlock(&desc
->lock
);
516 if (!(chip
->flags
& IRQCHIP_EOI_IF_HANDLED
))
517 chip
->irq_eoi(&desc
->irq_data
);
518 raw_spin_unlock(&desc
->lock
);
522 * handle_edge_irq - edge type IRQ handler
523 * @irq: the interrupt number
524 * @desc: the interrupt description structure for this irq
526 * Interrupt occures on the falling and/or rising edge of a hardware
527 * signal. The occurrence is latched into the irq controller hardware
528 * and must be acked in order to be reenabled. After the ack another
529 * interrupt can happen on the same source even before the first one
530 * is handled by the associated event handler. If this happens it
531 * might be necessary to disable (mask) the interrupt depending on the
532 * controller hardware. This requires to reenable the interrupt inside
533 * of the loop which handles the interrupts which have arrived while
534 * the handler was running. If all pending interrupts are handled, the
538 handle_edge_irq(unsigned int irq
, struct irq_desc
*desc
)
540 raw_spin_lock(&desc
->lock
);
542 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
544 * If we're currently running this IRQ, or its disabled,
545 * we shouldn't process the IRQ. Mark it pending, handle
546 * the necessary masking and go out
548 if (unlikely(irqd_irq_disabled(&desc
->irq_data
) ||
549 irqd_irq_inprogress(&desc
->irq_data
) || !desc
->action
)) {
550 if (!irq_check_poll(desc
)) {
551 desc
->istate
|= IRQS_PENDING
;
556 kstat_incr_irqs_this_cpu(irq
, desc
);
558 /* Start handling the irq */
559 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
562 if (unlikely(!desc
->action
)) {
568 * When another irq arrived while we were handling
569 * one, we could have masked the irq.
570 * Renable it, if it was not disabled in meantime.
572 if (unlikely(desc
->istate
& IRQS_PENDING
)) {
573 if (!irqd_irq_disabled(&desc
->irq_data
) &&
574 irqd_irq_masked(&desc
->irq_data
))
578 handle_irq_event(desc
);
580 } while ((desc
->istate
& IRQS_PENDING
) &&
581 !irqd_irq_disabled(&desc
->irq_data
));
584 raw_spin_unlock(&desc
->lock
);
586 EXPORT_SYMBOL(handle_edge_irq
);
588 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
590 * handle_edge_eoi_irq - edge eoi type IRQ handler
591 * @irq: the interrupt number
592 * @desc: the interrupt description structure for this irq
594 * Similar as the above handle_edge_irq, but using eoi and w/o the
597 void handle_edge_eoi_irq(unsigned int irq
, struct irq_desc
*desc
)
599 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
601 raw_spin_lock(&desc
->lock
);
603 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
605 * If we're currently running this IRQ, or its disabled,
606 * we shouldn't process the IRQ. Mark it pending, handle
607 * the necessary masking and go out
609 if (unlikely(irqd_irq_disabled(&desc
->irq_data
) ||
610 irqd_irq_inprogress(&desc
->irq_data
) || !desc
->action
)) {
611 if (!irq_check_poll(desc
)) {
612 desc
->istate
|= IRQS_PENDING
;
616 kstat_incr_irqs_this_cpu(irq
, desc
);
619 if (unlikely(!desc
->action
))
622 handle_irq_event(desc
);
624 } while ((desc
->istate
& IRQS_PENDING
) &&
625 !irqd_irq_disabled(&desc
->irq_data
));
628 chip
->irq_eoi(&desc
->irq_data
);
629 raw_spin_unlock(&desc
->lock
);
634 * handle_percpu_irq - Per CPU local irq handler
635 * @irq: the interrupt number
636 * @desc: the interrupt description structure for this irq
638 * Per CPU interrupts on SMP machines without locking requirements
641 handle_percpu_irq(unsigned int irq
, struct irq_desc
*desc
)
643 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
645 kstat_incr_irqs_this_cpu(irq
, desc
);
648 chip
->irq_ack(&desc
->irq_data
);
650 handle_irq_event_percpu(desc
, desc
->action
);
653 chip
->irq_eoi(&desc
->irq_data
);
657 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
658 * @irq: the interrupt number
659 * @desc: the interrupt description structure for this irq
661 * Per CPU interrupts on SMP machines without locking requirements. Same as
662 * handle_percpu_irq() above but with the following extras:
664 * action->percpu_dev_id is a pointer to percpu variables which
665 * contain the real device id for the cpu on which this handler is
668 void handle_percpu_devid_irq(unsigned int irq
, struct irq_desc
*desc
)
670 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
671 struct irqaction
*action
= desc
->action
;
672 void *dev_id
= __this_cpu_ptr(action
->percpu_dev_id
);
675 kstat_incr_irqs_this_cpu(irq
, desc
);
678 chip
->irq_ack(&desc
->irq_data
);
680 trace_irq_handler_entry(irq
, action
);
681 res
= action
->handler(irq
, dev_id
);
682 trace_irq_handler_exit(irq
, action
, res
);
685 chip
->irq_eoi(&desc
->irq_data
);
689 __irq_set_handler(unsigned int irq
, irq_flow_handler_t handle
, int is_chained
,
693 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, 0);
699 handle
= handle_bad_irq
;
701 if (WARN_ON(desc
->irq_data
.chip
== &no_irq_chip
))
706 if (handle
== handle_bad_irq
) {
707 if (desc
->irq_data
.chip
!= &no_irq_chip
)
709 irq_state_set_disabled(desc
);
712 desc
->handle_irq
= handle
;
715 if (handle
!= handle_bad_irq
&& is_chained
) {
716 irq_settings_set_noprobe(desc
);
717 irq_settings_set_norequest(desc
);
718 irq_settings_set_nothread(desc
);
719 irq_startup(desc
, true);
722 irq_put_desc_busunlock(desc
, flags
);
724 EXPORT_SYMBOL_GPL(__irq_set_handler
);
727 irq_set_chip_and_handler_name(unsigned int irq
, struct irq_chip
*chip
,
728 irq_flow_handler_t handle
, const char *name
)
730 irq_set_chip(irq
, chip
);
731 __irq_set_handler(irq
, handle
, 0, name
);
733 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name
);
735 void irq_modify_status(unsigned int irq
, unsigned long clr
, unsigned long set
)
738 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
742 irq_settings_clr_and_set(desc
, clr
, set
);
744 irqd_clear(&desc
->irq_data
, IRQD_NO_BALANCING
| IRQD_PER_CPU
|
745 IRQD_TRIGGER_MASK
| IRQD_LEVEL
| IRQD_MOVE_PCNTXT
);
746 if (irq_settings_has_no_balance_set(desc
))
747 irqd_set(&desc
->irq_data
, IRQD_NO_BALANCING
);
748 if (irq_settings_is_per_cpu(desc
))
749 irqd_set(&desc
->irq_data
, IRQD_PER_CPU
);
750 if (irq_settings_can_move_pcntxt(desc
))
751 irqd_set(&desc
->irq_data
, IRQD_MOVE_PCNTXT
);
752 if (irq_settings_is_level(desc
))
753 irqd_set(&desc
->irq_data
, IRQD_LEVEL
);
755 irqd_set(&desc
->irq_data
, irq_settings_get_trigger_mask(desc
));
757 irq_put_desc_unlock(desc
, flags
);
759 EXPORT_SYMBOL_GPL(irq_modify_status
);
762 * irq_cpu_online - Invoke all irq_cpu_online functions.
764 * Iterate through all irqs and invoke the chip.irq_cpu_online()
767 void irq_cpu_online(void)
769 struct irq_desc
*desc
;
770 struct irq_chip
*chip
;
774 for_each_active_irq(irq
) {
775 desc
= irq_to_desc(irq
);
779 raw_spin_lock_irqsave(&desc
->lock
, flags
);
781 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
782 if (chip
&& chip
->irq_cpu_online
&&
783 (!(chip
->flags
& IRQCHIP_ONOFFLINE_ENABLED
) ||
784 !irqd_irq_disabled(&desc
->irq_data
)))
785 chip
->irq_cpu_online(&desc
->irq_data
);
787 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
792 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
794 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
797 void irq_cpu_offline(void)
799 struct irq_desc
*desc
;
800 struct irq_chip
*chip
;
804 for_each_active_irq(irq
) {
805 desc
= irq_to_desc(irq
);
809 raw_spin_lock_irqsave(&desc
->lock
, flags
);
811 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
812 if (chip
&& chip
->irq_cpu_offline
&&
813 (!(chip
->flags
& IRQCHIP_ONOFFLINE_ENABLED
) ||
814 !irqd_irq_disabled(&desc
->irq_data
)))
815 chip
->irq_cpu_offline(&desc
->irq_data
);
817 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);