1 /* SPDX-License-Identifier: GPL-2.0 */
3 * IRQ subsystem internal functions and variables:
5 * Do not ever include this file from anything else than
6 * kernel/irq/. Do not even think about using any information outside
7 * of this file for your non core code.
9 #include <linux/irqdesc.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/clock.h>
14 #ifdef CONFIG_SPARSE_IRQ
15 # define IRQ_BITMAP_BITS (NR_IRQS + 8196)
17 # define IRQ_BITMAP_BITS NR_IRQS
20 #define istate core_internal_state__do_not_mess_with_it
22 extern bool noirqdebug
;
24 extern struct irqaction chained_action
;
27 * Bits used by threaded handlers:
28 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
29 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
30 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
31 * IRQTF_FORCED_THREAD - irq action is force threaded
41 * Bit masks for desc->core_internal_state__do_not_mess_with_it
43 * IRQS_AUTODETECT - autodetection in progress
44 * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
46 * IRQS_POLL_INPROGRESS - polling in progress
47 * IRQS_ONESHOT - irq is not unmasked in primary handler
48 * IRQS_REPLAY - irq is replayed
49 * IRQS_WAITING - irq is waiting
50 * IRQS_PENDING - irq is pending and replayed later
51 * IRQS_SUSPENDED - irq is suspended
54 IRQS_AUTODETECT
= 0x00000001,
55 IRQS_SPURIOUS_DISABLED
= 0x00000002,
56 IRQS_POLL_INPROGRESS
= 0x00000008,
57 IRQS_ONESHOT
= 0x00000020,
58 IRQS_REPLAY
= 0x00000040,
59 IRQS_WAITING
= 0x00000080,
60 IRQS_PENDING
= 0x00000200,
61 IRQS_SUSPENDED
= 0x00000800,
62 IRQS_TIMINGS
= 0x00001000,
68 extern int __irq_set_trigger(struct irq_desc
*desc
, unsigned long flags
);
69 extern void __disable_irq(struct irq_desc
*desc
);
70 extern void __enable_irq(struct irq_desc
*desc
);
72 #define IRQ_RESEND true
73 #define IRQ_NORESEND false
75 #define IRQ_START_FORCE true
76 #define IRQ_START_COND false
78 extern int irq_activate(struct irq_desc
*desc
);
79 extern int irq_activate_and_startup(struct irq_desc
*desc
, bool resend
);
80 extern int irq_startup(struct irq_desc
*desc
, bool resend
, bool force
);
82 extern void irq_shutdown(struct irq_desc
*desc
);
83 extern void irq_enable(struct irq_desc
*desc
);
84 extern void irq_disable(struct irq_desc
*desc
);
85 extern void irq_percpu_enable(struct irq_desc
*desc
, unsigned int cpu
);
86 extern void irq_percpu_disable(struct irq_desc
*desc
, unsigned int cpu
);
87 extern void mask_irq(struct irq_desc
*desc
);
88 extern void unmask_irq(struct irq_desc
*desc
);
89 extern void unmask_threaded_irq(struct irq_desc
*desc
);
91 #ifdef CONFIG_SPARSE_IRQ
92 static inline void irq_mark_irq(unsigned int irq
) { }
94 extern void irq_mark_irq(unsigned int irq
);
97 extern void init_kstat_irqs(struct irq_desc
*desc
, int node
, int nr
);
99 irqreturn_t
__handle_irq_event_percpu(struct irq_desc
*desc
, unsigned int *flags
);
100 irqreturn_t
handle_irq_event_percpu(struct irq_desc
*desc
);
101 irqreturn_t
handle_irq_event(struct irq_desc
*desc
);
103 /* Resending of interrupts :*/
104 void check_irq_resend(struct irq_desc
*desc
);
105 bool irq_wait_for_poll(struct irq_desc
*desc
);
106 void __irq_wake_thread(struct irq_desc
*desc
, struct irqaction
*action
);
108 #ifdef CONFIG_PROC_FS
109 extern void register_irq_proc(unsigned int irq
, struct irq_desc
*desc
);
110 extern void unregister_irq_proc(unsigned int irq
, struct irq_desc
*desc
);
111 extern void register_handler_proc(unsigned int irq
, struct irqaction
*action
);
112 extern void unregister_handler_proc(unsigned int irq
, struct irqaction
*action
);
114 static inline void register_irq_proc(unsigned int irq
, struct irq_desc
*desc
) { }
115 static inline void unregister_irq_proc(unsigned int irq
, struct irq_desc
*desc
) { }
116 static inline void register_handler_proc(unsigned int irq
,
117 struct irqaction
*action
) { }
118 static inline void unregister_handler_proc(unsigned int irq
,
119 struct irqaction
*action
) { }
122 extern bool irq_can_set_affinity_usr(unsigned int irq
);
124 extern int irq_select_affinity_usr(unsigned int irq
);
126 extern void irq_set_thread_affinity(struct irq_desc
*desc
);
128 extern int irq_do_set_affinity(struct irq_data
*data
,
129 const struct cpumask
*dest
, bool force
);
132 extern int irq_setup_affinity(struct irq_desc
*desc
);
134 static inline int irq_setup_affinity(struct irq_desc
*desc
) { return 0; }
137 /* Inline functions for support of irq chips on slow busses */
138 static inline void chip_bus_lock(struct irq_desc
*desc
)
140 if (unlikely(desc
->irq_data
.chip
->irq_bus_lock
))
141 desc
->irq_data
.chip
->irq_bus_lock(&desc
->irq_data
);
144 static inline void chip_bus_sync_unlock(struct irq_desc
*desc
)
146 if (unlikely(desc
->irq_data
.chip
->irq_bus_sync_unlock
))
147 desc
->irq_data
.chip
->irq_bus_sync_unlock(&desc
->irq_data
);
150 #define _IRQ_DESC_CHECK (1 << 0)
151 #define _IRQ_DESC_PERCPU (1 << 1)
153 #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
154 #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
156 #define for_each_action_of_desc(desc, act) \
157 for (act = desc->action; act; act = act->next)
160 __irq_get_desc_lock(unsigned int irq
, unsigned long *flags
, bool bus
,
162 void __irq_put_desc_unlock(struct irq_desc
*desc
, unsigned long flags
, bool bus
);
164 static inline struct irq_desc
*
165 irq_get_desc_buslock(unsigned int irq
, unsigned long *flags
, unsigned int check
)
167 return __irq_get_desc_lock(irq
, flags
, true, check
);
171 irq_put_desc_busunlock(struct irq_desc
*desc
, unsigned long flags
)
173 __irq_put_desc_unlock(desc
, flags
, true);
176 static inline struct irq_desc
*
177 irq_get_desc_lock(unsigned int irq
, unsigned long *flags
, unsigned int check
)
179 return __irq_get_desc_lock(irq
, flags
, false, check
);
183 irq_put_desc_unlock(struct irq_desc
*desc
, unsigned long flags
)
185 __irq_put_desc_unlock(desc
, flags
, false);
188 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
190 static inline unsigned int irqd_get(struct irq_data
*d
)
192 return __irqd_to_state(d
);
196 * Manipulation functions for irq_data.state
198 static inline void irqd_set_move_pending(struct irq_data
*d
)
200 __irqd_to_state(d
) |= IRQD_SETAFFINITY_PENDING
;
203 static inline void irqd_clr_move_pending(struct irq_data
*d
)
205 __irqd_to_state(d
) &= ~IRQD_SETAFFINITY_PENDING
;
208 static inline void irqd_set_managed_shutdown(struct irq_data
*d
)
210 __irqd_to_state(d
) |= IRQD_MANAGED_SHUTDOWN
;
213 static inline void irqd_clr_managed_shutdown(struct irq_data
*d
)
215 __irqd_to_state(d
) &= ~IRQD_MANAGED_SHUTDOWN
;
218 static inline void irqd_clear(struct irq_data
*d
, unsigned int mask
)
220 __irqd_to_state(d
) &= ~mask
;
223 static inline void irqd_set(struct irq_data
*d
, unsigned int mask
)
225 __irqd_to_state(d
) |= mask
;
228 static inline bool irqd_has_set(struct irq_data
*d
, unsigned int mask
)
230 return __irqd_to_state(d
) & mask
;
233 static inline void irq_state_set_disabled(struct irq_desc
*desc
)
235 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
238 static inline void irq_state_set_masked(struct irq_desc
*desc
)
240 irqd_set(&desc
->irq_data
, IRQD_IRQ_MASKED
);
243 #undef __irqd_to_state
245 static inline void kstat_incr_irqs_this_cpu(struct irq_desc
*desc
)
247 __this_cpu_inc(*desc
->kstat_irqs
);
248 __this_cpu_inc(kstat
.irqs_sum
);
251 static inline int irq_desc_get_node(struct irq_desc
*desc
)
253 return irq_common_data_get_node(&desc
->irq_common_data
);
256 static inline int irq_desc_is_chained(struct irq_desc
*desc
)
258 return (desc
->action
&& desc
->action
== &chained_action
);
261 #ifdef CONFIG_PM_SLEEP
262 bool irq_pm_check_wakeup(struct irq_desc
*desc
);
263 void irq_pm_install_action(struct irq_desc
*desc
, struct irqaction
*action
);
264 void irq_pm_remove_action(struct irq_desc
*desc
, struct irqaction
*action
);
266 static inline bool irq_pm_check_wakeup(struct irq_desc
*desc
) { return false; }
268 irq_pm_install_action(struct irq_desc
*desc
, struct irqaction
*action
) { }
270 irq_pm_remove_action(struct irq_desc
*desc
, struct irqaction
*action
) { }
273 #ifdef CONFIG_IRQ_TIMINGS
275 #define IRQ_TIMINGS_SHIFT 5
276 #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT)
277 #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1)
280 * struct irq_timings - irq timings storing structure
281 * @values: a circular buffer of u64 encoded <timestamp,irq> values
282 * @count: the number of elements in the array
285 u64 values
[IRQ_TIMINGS_SIZE
];
289 DECLARE_PER_CPU(struct irq_timings
, irq_timings
);
291 extern void irq_timings_free(int irq
);
292 extern int irq_timings_alloc(int irq
);
294 static inline void irq_remove_timings(struct irq_desc
*desc
)
296 desc
->istate
&= ~IRQS_TIMINGS
;
298 irq_timings_free(irq_desc_get_irq(desc
));
301 static inline void irq_setup_timings(struct irq_desc
*desc
, struct irqaction
*act
)
303 int irq
= irq_desc_get_irq(desc
);
307 * We don't need the measurement because the idle code already
308 * knows the next expiry event.
310 if (act
->flags
& __IRQF_TIMER
)
314 * In case the timing allocation fails, we just want to warn,
315 * not fail, so letting the system boot anyway.
317 ret
= irq_timings_alloc(irq
);
319 pr_warn("Failed to allocate irq timing stats for irq%d (%d)",
324 desc
->istate
|= IRQS_TIMINGS
;
327 extern void irq_timings_enable(void);
328 extern void irq_timings_disable(void);
330 DECLARE_STATIC_KEY_FALSE(irq_timing_enabled
);
333 * The interrupt number and the timestamp are encoded into a single
334 * u64 variable to optimize the size.
335 * 48 bit time stamp and 16 bit IRQ number is way sufficient.
336 * Who cares an IRQ after 78 hours of idle time?
338 static inline u64
irq_timing_encode(u64 timestamp
, int irq
)
340 return (timestamp
<< 16) | irq
;
343 static inline int irq_timing_decode(u64 value
, u64
*timestamp
)
345 *timestamp
= value
>> 16;
346 return value
& U16_MAX
;
350 * The function record_irq_time is only called in one place in the
351 * interrupts handler. We want this function always inline so the code
352 * inside is embedded in the function and the static key branching
353 * code can act at the higher level. Without the explicit
354 * __always_inline we can end up with a function call and a small
355 * overhead in the hotpath for nothing.
357 static __always_inline
void record_irq_time(struct irq_desc
*desc
)
359 if (!static_branch_likely(&irq_timing_enabled
))
362 if (desc
->istate
& IRQS_TIMINGS
) {
363 struct irq_timings
*timings
= this_cpu_ptr(&irq_timings
);
365 timings
->values
[timings
->count
& IRQ_TIMINGS_MASK
] =
366 irq_timing_encode(local_clock(),
367 irq_desc_get_irq(desc
));
373 static inline void irq_remove_timings(struct irq_desc
*desc
) {}
374 static inline void irq_setup_timings(struct irq_desc
*desc
,
375 struct irqaction
*act
) {};
376 static inline void record_irq_time(struct irq_desc
*desc
) {}
377 #endif /* CONFIG_IRQ_TIMINGS */
380 #ifdef CONFIG_GENERIC_IRQ_CHIP
381 void irq_init_generic_chip(struct irq_chip_generic
*gc
, const char *name
,
382 int num_ct
, unsigned int irq_base
,
383 void __iomem
*reg_base
, irq_flow_handler_t handler
);
386 irq_init_generic_chip(struct irq_chip_generic
*gc
, const char *name
,
387 int num_ct
, unsigned int irq_base
,
388 void __iomem
*reg_base
, irq_flow_handler_t handler
) { }
389 #endif /* CONFIG_GENERIC_IRQ_CHIP */
391 #ifdef CONFIG_GENERIC_PENDING_IRQ
392 static inline bool irq_can_move_pcntxt(struct irq_data
*data
)
394 return irqd_can_move_in_process_context(data
);
396 static inline bool irq_move_pending(struct irq_data
*data
)
398 return irqd_is_setaffinity_pending(data
);
401 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
)
403 cpumask_copy(desc
->pending_mask
, mask
);
406 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
)
408 cpumask_copy(mask
, desc
->pending_mask
);
410 static inline struct cpumask
*irq_desc_get_pending_mask(struct irq_desc
*desc
)
412 return desc
->pending_mask
;
414 bool irq_fixup_move_pending(struct irq_desc
*desc
, bool force_clear
);
415 #else /* CONFIG_GENERIC_PENDING_IRQ */
416 static inline bool irq_can_move_pcntxt(struct irq_data
*data
)
420 static inline bool irq_move_pending(struct irq_data
*data
)
425 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
)
429 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
)
432 static inline struct cpumask
*irq_desc_get_pending_mask(struct irq_desc
*desc
)
436 static inline bool irq_fixup_move_pending(struct irq_desc
*desc
, bool fclear
)
440 #endif /* !CONFIG_GENERIC_PENDING_IRQ */
442 #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
443 static inline int irq_domain_activate_irq(struct irq_data
*data
, bool reserve
)
445 irqd_set_activated(data
);
448 static inline void irq_domain_deactivate_irq(struct irq_data
*data
)
450 irqd_clr_activated(data
);
454 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
455 #include <linux/debugfs.h>
457 void irq_add_debugfs_entry(unsigned int irq
, struct irq_desc
*desc
);
458 static inline void irq_remove_debugfs_entry(struct irq_desc
*desc
)
460 debugfs_remove(desc
->debugfs_file
);
461 kfree(desc
->dev_name
);
463 void irq_debugfs_copy_devname(int irq
, struct device
*dev
);
464 # ifdef CONFIG_IRQ_DOMAIN
465 void irq_domain_debugfs_init(struct dentry
*root
);
467 static inline void irq_domain_debugfs_init(struct dentry
*root
)
471 #else /* CONFIG_GENERIC_IRQ_DEBUGFS */
472 static inline void irq_add_debugfs_entry(unsigned int irq
, struct irq_desc
*d
)
475 static inline void irq_remove_debugfs_entry(struct irq_desc
*d
)
478 static inline void irq_debugfs_copy_devname(int irq
, struct device
*dev
)
481 #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */