printf: Remove unused 'bprintf'
[drm/drm-misc.git] / include / linux / hardirq.h
blobd57cab4d4c06fdcb78c3c6591171a68a6eef01f3
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_HARDIRQ_H
3 #define LINUX_HARDIRQ_H
5 #include <linux/context_tracking_state.h>
6 #include <linux/preempt.h>
7 #include <linux/lockdep.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/sched.h>
10 #include <linux/vtime.h>
11 #include <asm/hardirq.h>
13 extern void synchronize_irq(unsigned int irq);
14 extern bool synchronize_hardirq(unsigned int irq);
16 #ifdef CONFIG_NO_HZ_FULL
17 void __rcu_irq_enter_check_tick(void);
18 #else
19 static inline void __rcu_irq_enter_check_tick(void) { }
20 #endif
22 static __always_inline void rcu_irq_enter_check_tick(void)
24 if (context_tracking_enabled())
25 __rcu_irq_enter_check_tick();
29 * It is safe to do non-atomic ops on ->hardirq_context,
30 * because NMI handlers may not preempt and the ops are
31 * always balanced, so the interrupted value of ->hardirq_context
32 * will always be restored.
34 #define __irq_enter() \
35 do { \
36 preempt_count_add(HARDIRQ_OFFSET); \
37 lockdep_hardirq_enter(); \
38 account_hardirq_enter(current); \
39 } while (0)
42 * Like __irq_enter() without time accounting for fast
43 * interrupts, e.g. reschedule IPI where time accounting
44 * is more expensive than the actual interrupt.
46 #define __irq_enter_raw() \
47 do { \
48 preempt_count_add(HARDIRQ_OFFSET); \
49 lockdep_hardirq_enter(); \
50 } while (0)
53 * Enter irq context (on NO_HZ, update jiffies):
55 void irq_enter(void);
57 * Like irq_enter(), but RCU is already watching.
59 void irq_enter_rcu(void);
62 * Exit irq context without processing softirqs:
64 #define __irq_exit() \
65 do { \
66 account_hardirq_exit(current); \
67 lockdep_hardirq_exit(); \
68 preempt_count_sub(HARDIRQ_OFFSET); \
69 } while (0)
72 * Like __irq_exit() without time accounting
74 #define __irq_exit_raw() \
75 do { \
76 lockdep_hardirq_exit(); \
77 preempt_count_sub(HARDIRQ_OFFSET); \
78 } while (0)
81 * Exit irq context and process softirqs if needed:
83 void irq_exit(void);
86 * Like irq_exit(), but return with RCU watching.
88 void irq_exit_rcu(void);
90 #ifndef arch_nmi_enter
91 #define arch_nmi_enter() do { } while (0)
92 #define arch_nmi_exit() do { } while (0)
93 #endif
96 * NMI vs Tracing
97 * --------------
99 * We must not land in a tracer until (or after) we've changed preempt_count
100 * such that in_nmi() becomes true. To that effect all NMI C entry points must
101 * be marked 'notrace' and call nmi_enter() as soon as possible.
105 * nmi_enter() can nest up to 15 times; see NMI_BITS.
107 #define __nmi_enter() \
108 do { \
109 lockdep_off(); \
110 arch_nmi_enter(); \
111 BUG_ON(in_nmi() == NMI_MASK); \
112 __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
113 } while (0)
115 #define nmi_enter() \
116 do { \
117 __nmi_enter(); \
118 lockdep_hardirq_enter(); \
119 ct_nmi_enter(); \
120 instrumentation_begin(); \
121 ftrace_nmi_enter(); \
122 instrumentation_end(); \
123 } while (0)
125 #define __nmi_exit() \
126 do { \
127 BUG_ON(!in_nmi()); \
128 __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
129 arch_nmi_exit(); \
130 lockdep_on(); \
131 } while (0)
133 #define nmi_exit() \
134 do { \
135 instrumentation_begin(); \
136 ftrace_nmi_exit(); \
137 instrumentation_end(); \
138 ct_nmi_exit(); \
139 lockdep_hardirq_exit(); \
140 __nmi_exit(); \
141 } while (0)
143 #endif /* LINUX_HARDIRQ_H */