1 /* SPDX-License-Identifier: GPL-2.0 */
3 * IRQ subsystem internal functions and variables:
5 * Do not ever include this file from anything else than
6 * kernel/irq/. Do not even think about using any information outside
7 * of this file for your non core code.
9 #include <linux/irqdesc.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/clock.h>
14 #ifdef CONFIG_SPARSE_IRQ
15 # define MAX_SPARSE_IRQS INT_MAX
17 # define MAX_SPARSE_IRQS NR_IRQS
20 #define istate core_internal_state__do_not_mess_with_it
22 extern bool noirqdebug
;
24 extern struct irqaction chained_action
;
27 * Bits used by threaded handlers:
28 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
29 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
30 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
31 * IRQTF_FORCED_THREAD - irq action is force threaded
32 * IRQTF_READY - signals that irq thread is ready
43 * Bit masks for desc->core_internal_state__do_not_mess_with_it
45 * IRQS_AUTODETECT - autodetection in progress
46 * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
48 * IRQS_POLL_INPROGRESS - polling in progress
49 * IRQS_ONESHOT - irq is not unmasked in primary handler
50 * IRQS_REPLAY - irq has been resent and will not be resent
51 * again until the handler has run and cleared
53 * IRQS_WAITING - irq is waiting
54 * IRQS_PENDING - irq needs to be resent and should be resent
55 * at the next available opportunity.
56 * IRQS_SUSPENDED - irq is suspended
57 * IRQS_NMI - irq line is used to deliver NMIs
58 * IRQS_SYSFS - descriptor has been added to sysfs
61 IRQS_AUTODETECT
= 0x00000001,
62 IRQS_SPURIOUS_DISABLED
= 0x00000002,
63 IRQS_POLL_INPROGRESS
= 0x00000008,
64 IRQS_ONESHOT
= 0x00000020,
65 IRQS_REPLAY
= 0x00000040,
66 IRQS_WAITING
= 0x00000080,
67 IRQS_PENDING
= 0x00000200,
68 IRQS_SUSPENDED
= 0x00000800,
69 IRQS_TIMINGS
= 0x00001000,
70 IRQS_NMI
= 0x00002000,
71 IRQS_SYSFS
= 0x00004000,
77 extern int __irq_set_trigger(struct irq_desc
*desc
, unsigned long flags
);
78 extern void __disable_irq(struct irq_desc
*desc
);
79 extern void __enable_irq(struct irq_desc
*desc
);
81 #define IRQ_RESEND true
82 #define IRQ_NORESEND false
84 #define IRQ_START_FORCE true
85 #define IRQ_START_COND false
87 extern int irq_activate(struct irq_desc
*desc
);
88 extern int irq_activate_and_startup(struct irq_desc
*desc
, bool resend
);
89 extern int irq_startup(struct irq_desc
*desc
, bool resend
, bool force
);
91 extern void irq_shutdown(struct irq_desc
*desc
);
92 extern void irq_shutdown_and_deactivate(struct irq_desc
*desc
);
93 extern void irq_enable(struct irq_desc
*desc
);
94 extern void irq_disable(struct irq_desc
*desc
);
95 extern void irq_percpu_enable(struct irq_desc
*desc
, unsigned int cpu
);
96 extern void irq_percpu_disable(struct irq_desc
*desc
, unsigned int cpu
);
97 extern void mask_irq(struct irq_desc
*desc
);
98 extern void unmask_irq(struct irq_desc
*desc
);
99 extern void unmask_threaded_irq(struct irq_desc
*desc
);
101 extern unsigned int kstat_irqs_desc(struct irq_desc
*desc
, const struct cpumask
*cpumask
);
103 #ifdef CONFIG_SPARSE_IRQ
104 static inline void irq_mark_irq(unsigned int irq
) { }
106 extern void irq_mark_irq(unsigned int irq
);
109 extern int __irq_get_irqchip_state(struct irq_data
*data
,
110 enum irqchip_irq_state which
,
113 irqreturn_t
__handle_irq_event_percpu(struct irq_desc
*desc
);
114 irqreturn_t
handle_irq_event_percpu(struct irq_desc
*desc
);
115 irqreturn_t
handle_irq_event(struct irq_desc
*desc
);
117 /* Resending of interrupts :*/
118 int check_irq_resend(struct irq_desc
*desc
, bool inject
);
119 void clear_irq_resend(struct irq_desc
*desc
);
120 void irq_resend_init(struct irq_desc
*desc
);
121 bool irq_wait_for_poll(struct irq_desc
*desc
);
122 void __irq_wake_thread(struct irq_desc
*desc
, struct irqaction
*action
);
124 void wake_threads_waitq(struct irq_desc
*desc
);
126 #ifdef CONFIG_PROC_FS
127 extern void register_irq_proc(unsigned int irq
, struct irq_desc
*desc
);
128 extern void unregister_irq_proc(unsigned int irq
, struct irq_desc
*desc
);
129 extern void register_handler_proc(unsigned int irq
, struct irqaction
*action
);
130 extern void unregister_handler_proc(unsigned int irq
, struct irqaction
*action
);
132 static inline void register_irq_proc(unsigned int irq
, struct irq_desc
*desc
) { }
133 static inline void unregister_irq_proc(unsigned int irq
, struct irq_desc
*desc
) { }
134 static inline void register_handler_proc(unsigned int irq
,
135 struct irqaction
*action
) { }
136 static inline void unregister_handler_proc(unsigned int irq
,
137 struct irqaction
*action
) { }
140 extern bool irq_can_set_affinity_usr(unsigned int irq
);
142 extern void irq_set_thread_affinity(struct irq_desc
*desc
);
144 extern int irq_do_set_affinity(struct irq_data
*data
,
145 const struct cpumask
*dest
, bool force
);
148 extern int irq_setup_affinity(struct irq_desc
*desc
);
150 static inline int irq_setup_affinity(struct irq_desc
*desc
) { return 0; }
153 /* Inline functions for support of irq chips on slow busses */
154 static inline void chip_bus_lock(struct irq_desc
*desc
)
156 if (unlikely(desc
->irq_data
.chip
->irq_bus_lock
))
157 desc
->irq_data
.chip
->irq_bus_lock(&desc
->irq_data
);
160 static inline void chip_bus_sync_unlock(struct irq_desc
*desc
)
162 if (unlikely(desc
->irq_data
.chip
->irq_bus_sync_unlock
))
163 desc
->irq_data
.chip
->irq_bus_sync_unlock(&desc
->irq_data
);
166 #define _IRQ_DESC_CHECK (1 << 0)
167 #define _IRQ_DESC_PERCPU (1 << 1)
169 #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
170 #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
172 #define for_each_action_of_desc(desc, act) \
173 for (act = desc->action; act; act = act->next)
176 __irq_get_desc_lock(unsigned int irq
, unsigned long *flags
, bool bus
,
178 void __irq_put_desc_unlock(struct irq_desc
*desc
, unsigned long flags
, bool bus
);
180 static inline struct irq_desc
*
181 irq_get_desc_buslock(unsigned int irq
, unsigned long *flags
, unsigned int check
)
183 return __irq_get_desc_lock(irq
, flags
, true, check
);
187 irq_put_desc_busunlock(struct irq_desc
*desc
, unsigned long flags
)
189 __irq_put_desc_unlock(desc
, flags
, true);
192 static inline struct irq_desc
*
193 irq_get_desc_lock(unsigned int irq
, unsigned long *flags
, unsigned int check
)
195 return __irq_get_desc_lock(irq
, flags
, false, check
);
199 irq_put_desc_unlock(struct irq_desc
*desc
, unsigned long flags
)
201 __irq_put_desc_unlock(desc
, flags
, false);
204 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
206 static inline unsigned int irqd_get(struct irq_data
*d
)
208 return __irqd_to_state(d
);
212 * Manipulation functions for irq_data.state
214 static inline void irqd_set_move_pending(struct irq_data
*d
)
216 __irqd_to_state(d
) |= IRQD_SETAFFINITY_PENDING
;
219 static inline void irqd_clr_move_pending(struct irq_data
*d
)
221 __irqd_to_state(d
) &= ~IRQD_SETAFFINITY_PENDING
;
224 static inline void irqd_set_managed_shutdown(struct irq_data
*d
)
226 __irqd_to_state(d
) |= IRQD_MANAGED_SHUTDOWN
;
229 static inline void irqd_clr_managed_shutdown(struct irq_data
*d
)
231 __irqd_to_state(d
) &= ~IRQD_MANAGED_SHUTDOWN
;
234 static inline void irqd_clear(struct irq_data
*d
, unsigned int mask
)
236 __irqd_to_state(d
) &= ~mask
;
239 static inline void irqd_set(struct irq_data
*d
, unsigned int mask
)
241 __irqd_to_state(d
) |= mask
;
244 static inline bool irqd_has_set(struct irq_data
*d
, unsigned int mask
)
246 return __irqd_to_state(d
) & mask
;
249 static inline void irq_state_set_disabled(struct irq_desc
*desc
)
251 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
254 static inline void irq_state_set_masked(struct irq_desc
*desc
)
256 irqd_set(&desc
->irq_data
, IRQD_IRQ_MASKED
);
259 #undef __irqd_to_state
261 static inline void __kstat_incr_irqs_this_cpu(struct irq_desc
*desc
)
263 __this_cpu_inc(desc
->kstat_irqs
->cnt
);
264 __this_cpu_inc(kstat
.irqs_sum
);
267 static inline void kstat_incr_irqs_this_cpu(struct irq_desc
*desc
)
269 __kstat_incr_irqs_this_cpu(desc
);
273 static inline int irq_desc_get_node(struct irq_desc
*desc
)
275 return irq_common_data_get_node(&desc
->irq_common_data
);
278 static inline int irq_desc_is_chained(struct irq_desc
*desc
)
280 return (desc
->action
&& desc
->action
== &chained_action
);
283 static inline bool irq_is_nmi(struct irq_desc
*desc
)
285 return desc
->istate
& IRQS_NMI
;
288 #ifdef CONFIG_PM_SLEEP
289 bool irq_pm_check_wakeup(struct irq_desc
*desc
);
290 void irq_pm_install_action(struct irq_desc
*desc
, struct irqaction
*action
);
291 void irq_pm_remove_action(struct irq_desc
*desc
, struct irqaction
*action
);
293 static inline bool irq_pm_check_wakeup(struct irq_desc
*desc
) { return false; }
295 irq_pm_install_action(struct irq_desc
*desc
, struct irqaction
*action
) { }
297 irq_pm_remove_action(struct irq_desc
*desc
, struct irqaction
*action
) { }
300 #ifdef CONFIG_IRQ_TIMINGS
302 #define IRQ_TIMINGS_SHIFT 5
303 #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT)
304 #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1)
307 * struct irq_timings - irq timings storing structure
308 * @values: a circular buffer of u64 encoded <timestamp,irq> values
309 * @count: the number of elements in the array
312 u64 values
[IRQ_TIMINGS_SIZE
];
316 DECLARE_PER_CPU(struct irq_timings
, irq_timings
);
318 extern void irq_timings_free(int irq
);
319 extern int irq_timings_alloc(int irq
);
321 static inline void irq_remove_timings(struct irq_desc
*desc
)
323 desc
->istate
&= ~IRQS_TIMINGS
;
325 irq_timings_free(irq_desc_get_irq(desc
));
328 static inline void irq_setup_timings(struct irq_desc
*desc
, struct irqaction
*act
)
330 int irq
= irq_desc_get_irq(desc
);
334 * We don't need the measurement because the idle code already
335 * knows the next expiry event.
337 if (act
->flags
& __IRQF_TIMER
)
341 * In case the timing allocation fails, we just want to warn,
342 * not fail, so letting the system boot anyway.
344 ret
= irq_timings_alloc(irq
);
346 pr_warn("Failed to allocate irq timing stats for irq%d (%d)",
351 desc
->istate
|= IRQS_TIMINGS
;
354 extern void irq_timings_enable(void);
355 extern void irq_timings_disable(void);
357 DECLARE_STATIC_KEY_FALSE(irq_timing_enabled
);
360 * The interrupt number and the timestamp are encoded into a single
361 * u64 variable to optimize the size.
362 * 48 bit time stamp and 16 bit IRQ number is way sufficient.
363 * Who cares an IRQ after 78 hours of idle time?
365 static inline u64
irq_timing_encode(u64 timestamp
, int irq
)
367 return (timestamp
<< 16) | irq
;
370 static inline int irq_timing_decode(u64 value
, u64
*timestamp
)
372 *timestamp
= value
>> 16;
373 return value
& U16_MAX
;
376 static __always_inline
void irq_timings_push(u64 ts
, int irq
)
378 struct irq_timings
*timings
= this_cpu_ptr(&irq_timings
);
380 timings
->values
[timings
->count
& IRQ_TIMINGS_MASK
] =
381 irq_timing_encode(ts
, irq
);
387 * The function record_irq_time is only called in one place in the
388 * interrupts handler. We want this function always inline so the code
389 * inside is embedded in the function and the static key branching
390 * code can act at the higher level. Without the explicit
391 * __always_inline we can end up with a function call and a small
392 * overhead in the hotpath for nothing.
394 static __always_inline
void record_irq_time(struct irq_desc
*desc
)
396 if (!static_branch_likely(&irq_timing_enabled
))
399 if (desc
->istate
& IRQS_TIMINGS
)
400 irq_timings_push(local_clock(), irq_desc_get_irq(desc
));
403 static inline void irq_remove_timings(struct irq_desc
*desc
) {}
404 static inline void irq_setup_timings(struct irq_desc
*desc
,
405 struct irqaction
*act
) {};
406 static inline void record_irq_time(struct irq_desc
*desc
) {}
407 #endif /* CONFIG_IRQ_TIMINGS */
410 #ifdef CONFIG_GENERIC_IRQ_CHIP
411 void irq_init_generic_chip(struct irq_chip_generic
*gc
, const char *name
,
412 int num_ct
, unsigned int irq_base
,
413 void __iomem
*reg_base
, irq_flow_handler_t handler
);
416 irq_init_generic_chip(struct irq_chip_generic
*gc
, const char *name
,
417 int num_ct
, unsigned int irq_base
,
418 void __iomem
*reg_base
, irq_flow_handler_t handler
) { }
419 #endif /* CONFIG_GENERIC_IRQ_CHIP */
421 #ifdef CONFIG_GENERIC_PENDING_IRQ
422 static inline bool irq_can_move_pcntxt(struct irq_data
*data
)
424 return irqd_can_move_in_process_context(data
);
426 static inline bool irq_move_pending(struct irq_data
*data
)
428 return irqd_is_setaffinity_pending(data
);
431 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
)
433 cpumask_copy(desc
->pending_mask
, mask
);
436 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
)
438 cpumask_copy(mask
, desc
->pending_mask
);
440 static inline struct cpumask
*irq_desc_get_pending_mask(struct irq_desc
*desc
)
442 return desc
->pending_mask
;
444 static inline bool handle_enforce_irqctx(struct irq_data
*data
)
446 return irqd_is_handle_enforce_irqctx(data
);
448 bool irq_fixup_move_pending(struct irq_desc
*desc
, bool force_clear
);
449 #else /* CONFIG_GENERIC_PENDING_IRQ */
450 static inline bool irq_can_move_pcntxt(struct irq_data
*data
)
454 static inline bool irq_move_pending(struct irq_data
*data
)
459 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
)
463 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
)
466 static inline struct cpumask
*irq_desc_get_pending_mask(struct irq_desc
*desc
)
470 static inline bool irq_fixup_move_pending(struct irq_desc
*desc
, bool fclear
)
474 static inline bool handle_enforce_irqctx(struct irq_data
*data
)
478 #endif /* !CONFIG_GENERIC_PENDING_IRQ */
480 #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
481 static inline int irq_domain_activate_irq(struct irq_data
*data
, bool reserve
)
483 irqd_set_activated(data
);
486 static inline void irq_domain_deactivate_irq(struct irq_data
*data
)
488 irqd_clr_activated(data
);
492 static inline struct irq_data
*irqd_get_parent_data(struct irq_data
*irqd
)
494 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
495 return irqd
->parent_data
;
501 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
502 #include <linux/debugfs.h>
504 struct irq_bit_descr
{
509 #define BIT_MASK_DESCR(m) { .mask = m, .name = #m }
511 void irq_debug_show_bits(struct seq_file
*m
, int ind
, unsigned int state
,
512 const struct irq_bit_descr
*sd
, int size
);
514 void irq_add_debugfs_entry(unsigned int irq
, struct irq_desc
*desc
);
515 static inline void irq_remove_debugfs_entry(struct irq_desc
*desc
)
517 debugfs_remove(desc
->debugfs_file
);
518 kfree(desc
->dev_name
);
520 void irq_debugfs_copy_devname(int irq
, struct device
*dev
);
521 # ifdef CONFIG_IRQ_DOMAIN
522 void irq_domain_debugfs_init(struct dentry
*root
);
524 static inline void irq_domain_debugfs_init(struct dentry
*root
)
528 #else /* CONFIG_GENERIC_IRQ_DEBUGFS */
529 static inline void irq_add_debugfs_entry(unsigned int irq
, struct irq_desc
*d
)
532 static inline void irq_remove_debugfs_entry(struct irq_desc
*d
)
535 static inline void irq_debugfs_copy_devname(int irq
, struct device
*dev
)
538 #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */