1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Tick related global functions
8 #include <linux/clockchips.h>
9 #include <linux/irqflags.h>
10 #include <linux/percpu.h>
11 #include <linux/context_tracking_state.h>
12 #include <linux/cpumask.h>
13 #include <linux/sched.h>
14 #include <linux/rcupdate.h>
15 #include <linux/static_key.h>
17 #ifdef CONFIG_GENERIC_CLOCKEVENTS
18 extern void __init
tick_init(void);
19 /* Should be core only, but ARM BL switcher requires it */
20 extern void tick_suspend_local(void);
21 /* Should be core only, but XEN resume magic and ARM BL switcher require it */
22 extern void tick_resume_local(void);
23 #else /* CONFIG_GENERIC_CLOCKEVENTS */
24 static inline void tick_init(void) { }
25 static inline void tick_suspend_local(void) { }
26 static inline void tick_resume_local(void) { }
27 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
29 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU)
30 extern int tick_cpu_dying(unsigned int cpu
);
31 extern void tick_assert_timekeeping_handover(void);
33 #define tick_cpu_dying NULL
34 static inline void tick_assert_timekeeping_handover(void) { }
37 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
38 extern void tick_freeze(void);
39 extern void tick_unfreeze(void);
41 static inline void tick_freeze(void) { }
42 static inline void tick_unfreeze(void) { }
45 #ifdef CONFIG_TICK_ONESHOT
46 extern void tick_irq_enter(void);
47 # ifndef arch_needs_cpu
48 # define arch_needs_cpu() (0)
51 static inline void tick_irq_enter(void) { }
54 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
55 extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu
);
57 static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu
) { }
60 enum tick_broadcast_mode
{
66 enum tick_broadcast_state
{
71 extern struct static_key_false arch_needs_tick_broadcast
;
73 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
74 extern void tick_broadcast_control(enum tick_broadcast_mode mode
);
76 static inline void tick_broadcast_control(enum tick_broadcast_mode mode
) { }
77 #endif /* BROADCAST */
79 #ifdef CONFIG_GENERIC_CLOCKEVENTS
80 extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state
);
82 static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state
)
88 static inline void tick_broadcast_enable(void)
90 tick_broadcast_control(TICK_BROADCAST_ON
);
92 static inline void tick_broadcast_disable(void)
94 tick_broadcast_control(TICK_BROADCAST_OFF
);
96 static inline void tick_broadcast_force(void)
98 tick_broadcast_control(TICK_BROADCAST_FORCE
);
100 static inline int tick_broadcast_enter(void)
102 return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER
);
104 static inline void tick_broadcast_exit(void)
106 tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT
);
110 TICK_DEP_BIT_POSIX_TIMER
= 0,
111 TICK_DEP_BIT_PERF_EVENTS
= 1,
112 TICK_DEP_BIT_SCHED
= 2,
113 TICK_DEP_BIT_CLOCK_UNSTABLE
= 3,
114 TICK_DEP_BIT_RCU
= 4,
115 TICK_DEP_BIT_RCU_EXP
= 5
117 #define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP
119 #define TICK_DEP_MASK_NONE 0
120 #define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
121 #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
122 #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
123 #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
124 #define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
125 #define TICK_DEP_MASK_RCU_EXP (1 << TICK_DEP_BIT_RCU_EXP)
127 #ifdef CONFIG_NO_HZ_COMMON
128 extern bool tick_nohz_enabled
;
129 extern bool tick_nohz_tick_stopped(void);
130 extern bool tick_nohz_tick_stopped_cpu(int cpu
);
131 extern void tick_nohz_idle_stop_tick(void);
132 extern void tick_nohz_idle_retain_tick(void);
133 extern void tick_nohz_idle_restart_tick(void);
134 extern void tick_nohz_idle_enter(void);
135 extern void tick_nohz_idle_exit(void);
136 extern void tick_nohz_irq_exit(void);
137 extern bool tick_nohz_idle_got_tick(void);
138 extern ktime_t
tick_nohz_get_next_hrtimer(void);
139 extern ktime_t
tick_nohz_get_sleep_length(ktime_t
*delta_next
);
140 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu
);
141 extern u64
get_cpu_idle_time_us(int cpu
, u64
*last_update_time
);
142 extern u64
get_cpu_iowait_time_us(int cpu
, u64
*last_update_time
);
143 #else /* !CONFIG_NO_HZ_COMMON */
144 #define tick_nohz_enabled (0)
145 static inline int tick_nohz_tick_stopped(void) { return 0; }
146 static inline int tick_nohz_tick_stopped_cpu(int cpu
) { return 0; }
147 static inline void tick_nohz_idle_stop_tick(void) { }
148 static inline void tick_nohz_idle_retain_tick(void) { }
149 static inline void tick_nohz_idle_restart_tick(void) { }
150 static inline void tick_nohz_idle_enter(void) { }
151 static inline void tick_nohz_idle_exit(void) { }
152 static inline bool tick_nohz_idle_got_tick(void) { return false; }
153 static inline ktime_t
tick_nohz_get_next_hrtimer(void)
155 /* Next wake up is the tick period, assume it starts now */
156 return ktime_add(ktime_get(), TICK_NSEC
);
158 static inline ktime_t
tick_nohz_get_sleep_length(ktime_t
*delta_next
)
160 *delta_next
= TICK_NSEC
;
163 static inline u64
get_cpu_idle_time_us(int cpu
, u64
*unused
) { return -1; }
164 static inline u64
get_cpu_iowait_time_us(int cpu
, u64
*unused
) { return -1; }
165 #endif /* !CONFIG_NO_HZ_COMMON */
168 * Mask of CPUs that are nohz_full.
170 * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu()
173 extern cpumask_var_t tick_nohz_full_mask
;
175 #ifdef CONFIG_NO_HZ_FULL
176 extern bool tick_nohz_full_running
;
178 static inline bool tick_nohz_full_enabled(void)
180 if (!context_tracking_enabled())
183 return tick_nohz_full_running
;
187 * Check if a CPU is part of the nohz_full subset. Arrange for evaluating
188 * the cpu expression (typically smp_processor_id()) _after_ the static
191 #define tick_nohz_full_cpu(_cpu) ({ \
192 bool __ret = false; \
193 if (tick_nohz_full_enabled()) \
194 __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \
198 static inline void tick_nohz_full_add_cpus_to(struct cpumask
*mask
)
200 if (tick_nohz_full_enabled())
201 cpumask_or(mask
, mask
, tick_nohz_full_mask
);
204 extern void tick_nohz_dep_set(enum tick_dep_bits bit
);
205 extern void tick_nohz_dep_clear(enum tick_dep_bits bit
);
206 extern void tick_nohz_dep_set_cpu(int cpu
, enum tick_dep_bits bit
);
207 extern void tick_nohz_dep_clear_cpu(int cpu
, enum tick_dep_bits bit
);
208 extern void tick_nohz_dep_set_task(struct task_struct
*tsk
,
209 enum tick_dep_bits bit
);
210 extern void tick_nohz_dep_clear_task(struct task_struct
*tsk
,
211 enum tick_dep_bits bit
);
212 extern void tick_nohz_dep_set_signal(struct task_struct
*tsk
,
213 enum tick_dep_bits bit
);
214 extern void tick_nohz_dep_clear_signal(struct signal_struct
*signal
,
215 enum tick_dep_bits bit
);
216 extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu
);
219 * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
220 * on top of static keys.
222 static inline void tick_dep_set(enum tick_dep_bits bit
)
224 if (tick_nohz_full_enabled())
225 tick_nohz_dep_set(bit
);
228 static inline void tick_dep_clear(enum tick_dep_bits bit
)
230 if (tick_nohz_full_enabled())
231 tick_nohz_dep_clear(bit
);
234 static inline void tick_dep_set_cpu(int cpu
, enum tick_dep_bits bit
)
236 if (tick_nohz_full_cpu(cpu
))
237 tick_nohz_dep_set_cpu(cpu
, bit
);
240 static inline void tick_dep_clear_cpu(int cpu
, enum tick_dep_bits bit
)
242 if (tick_nohz_full_cpu(cpu
))
243 tick_nohz_dep_clear_cpu(cpu
, bit
);
246 static inline void tick_dep_set_task(struct task_struct
*tsk
,
247 enum tick_dep_bits bit
)
249 if (tick_nohz_full_enabled())
250 tick_nohz_dep_set_task(tsk
, bit
);
253 static inline void tick_dep_clear_task(struct task_struct
*tsk
,
254 enum tick_dep_bits bit
)
256 if (tick_nohz_full_enabled())
257 tick_nohz_dep_clear_task(tsk
, bit
);
260 static inline void tick_dep_init_task(struct task_struct
*tsk
)
262 atomic_set(&tsk
->tick_dep_mask
, 0);
265 static inline void tick_dep_set_signal(struct task_struct
*tsk
,
266 enum tick_dep_bits bit
)
268 if (tick_nohz_full_enabled())
269 tick_nohz_dep_set_signal(tsk
, bit
);
271 static inline void tick_dep_clear_signal(struct signal_struct
*signal
,
272 enum tick_dep_bits bit
)
274 if (tick_nohz_full_enabled())
275 tick_nohz_dep_clear_signal(signal
, bit
);
278 extern void tick_nohz_full_kick_cpu(int cpu
);
279 extern void __tick_nohz_task_switch(void);
280 extern void __init
tick_nohz_full_setup(cpumask_var_t cpumask
);
282 static inline bool tick_nohz_full_enabled(void) { return false; }
283 static inline bool tick_nohz_full_cpu(int cpu
) { return false; }
284 static inline void tick_nohz_full_add_cpus_to(struct cpumask
*mask
) { }
286 static inline void tick_nohz_dep_set_cpu(int cpu
, enum tick_dep_bits bit
) { }
287 static inline void tick_nohz_dep_clear_cpu(int cpu
, enum tick_dep_bits bit
) { }
288 static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu
) { return true; }
290 static inline void tick_dep_set(enum tick_dep_bits bit
) { }
291 static inline void tick_dep_clear(enum tick_dep_bits bit
) { }
292 static inline void tick_dep_set_cpu(int cpu
, enum tick_dep_bits bit
) { }
293 static inline void tick_dep_clear_cpu(int cpu
, enum tick_dep_bits bit
) { }
294 static inline void tick_dep_set_task(struct task_struct
*tsk
,
295 enum tick_dep_bits bit
) { }
296 static inline void tick_dep_clear_task(struct task_struct
*tsk
,
297 enum tick_dep_bits bit
) { }
298 static inline void tick_dep_init_task(struct task_struct
*tsk
) { }
299 static inline void tick_dep_set_signal(struct task_struct
*tsk
,
300 enum tick_dep_bits bit
) { }
301 static inline void tick_dep_clear_signal(struct signal_struct
*signal
,
302 enum tick_dep_bits bit
) { }
304 static inline void tick_nohz_full_kick_cpu(int cpu
) { }
305 static inline void __tick_nohz_task_switch(void) { }
306 static inline void tick_nohz_full_setup(cpumask_var_t cpumask
) { }
309 static inline void tick_nohz_task_switch(void)
311 if (tick_nohz_full_enabled())
312 __tick_nohz_task_switch();
315 static inline void tick_nohz_user_enter_prepare(void)
317 if (tick_nohz_full_cpu(smp_processor_id()))
318 rcu_nocb_flush_deferred_wakeup();