1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains the base functions to manage periodic tick
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 #include <linux/cpu.h>
11 #include <linux/err.h>
12 #include <linux/hrtimer.h>
13 #include <linux/interrupt.h>
14 #include <linux/nmi.h>
15 #include <linux/percpu.h>
16 #include <linux/profile.h>
17 #include <linux/sched.h>
18 #include <linux/module.h>
19 #include <trace/events/power.h>
21 #include <asm/irq_regs.h>
23 #include "tick-internal.h"
28 DEFINE_PER_CPU(struct tick_device
, tick_cpu_device
);
30 * Tick next event: keeps track of the tick time. It's updated by the
31 * CPU which handles the tick and protected by jiffies_lock. There is
32 * no requirement to write hold the jiffies seqcount for it.
34 ktime_t tick_next_period
;
37 * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
38 * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
39 * variable has two functions:
41 * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
42 * timekeeping lock all at once. Only the CPU which is assigned to do the
43 * update is handling it.
45 * 2) Hand off the duty in the NOHZ idle case by setting the value to
46 * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
47 * at it will take over and keep the time keeping alive. The handover
48 * procedure also covers cpu hotplug.
50 int tick_do_timer_cpu __read_mostly
= TICK_DO_TIMER_BOOT
;
51 #ifdef CONFIG_NO_HZ_FULL
53 * tick_do_timer_boot_cpu indicates the boot CPU temporarily owns
54 * tick_do_timer_cpu and it should be taken over by an eligible secondary
55 * when one comes online.
57 static int tick_do_timer_boot_cpu __read_mostly
= -1;
61 * Debugging: see timer_list.c
63 struct tick_device
*tick_get_device(int cpu
)
65 return &per_cpu(tick_cpu_device
, cpu
);
69 * tick_is_oneshot_available - check for a oneshot capable event device
71 int tick_is_oneshot_available(void)
73 struct clock_event_device
*dev
= __this_cpu_read(tick_cpu_device
.evtdev
);
75 if (!dev
|| !(dev
->features
& CLOCK_EVT_FEAT_ONESHOT
))
77 if (!(dev
->features
& CLOCK_EVT_FEAT_C3STOP
))
79 return tick_broadcast_oneshot_available();
85 static void tick_periodic(int cpu
)
87 if (tick_do_timer_cpu
== cpu
) {
88 raw_spin_lock(&jiffies_lock
);
89 write_seqcount_begin(&jiffies_seq
);
91 /* Keep track of the next tick event */
92 tick_next_period
= ktime_add_ns(tick_next_period
, TICK_NSEC
);
95 write_seqcount_end(&jiffies_seq
);
96 raw_spin_unlock(&jiffies_lock
);
100 update_process_times(user_mode(get_irq_regs()));
101 profile_tick(CPU_PROFILING
);
105 * Event handler for periodic ticks
107 void tick_handle_periodic(struct clock_event_device
*dev
)
109 int cpu
= smp_processor_id();
110 ktime_t next
= dev
->next_event
;
114 #if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
116 * The cpu might have transitioned to HIGHRES or NOHZ mode via
117 * update_process_times() -> run_local_timers() ->
118 * hrtimer_run_queues().
120 if (dev
->event_handler
!= tick_handle_periodic
)
124 if (!clockevent_state_oneshot(dev
))
128 * Setup the next period for devices, which do not have
131 next
= ktime_add_ns(next
, TICK_NSEC
);
133 if (!clockevents_program_event(dev
, next
, false))
136 * Have to be careful here. If we're in oneshot mode,
137 * before we call tick_periodic() in a loop, we need
138 * to be sure we're using a real hardware clocksource.
139 * Otherwise we could get trapped in an infinite
140 * loop, as the tick_periodic() increments jiffies,
141 * which then will increment time, possibly causing
142 * the loop to trigger again and again.
144 if (timekeeping_valid_for_hres())
150 * Setup the device for a periodic tick
152 void tick_setup_periodic(struct clock_event_device
*dev
, int broadcast
)
154 tick_set_periodic_handler(dev
, broadcast
);
156 /* Broadcast setup ? */
157 if (!tick_device_is_functional(dev
))
160 if ((dev
->features
& CLOCK_EVT_FEAT_PERIODIC
) &&
161 !tick_broadcast_oneshot_active()) {
162 clockevents_switch_state(dev
, CLOCK_EVT_STATE_PERIODIC
);
168 seq
= read_seqcount_begin(&jiffies_seq
);
169 next
= tick_next_period
;
170 } while (read_seqcount_retry(&jiffies_seq
, seq
));
172 clockevents_switch_state(dev
, CLOCK_EVT_STATE_ONESHOT
);
175 if (!clockevents_program_event(dev
, next
, false))
177 next
= ktime_add_ns(next
, TICK_NSEC
);
182 #ifdef CONFIG_NO_HZ_FULL
183 static void giveup_do_timer(void *info
)
185 int cpu
= *(unsigned int *)info
;
187 WARN_ON(tick_do_timer_cpu
!= smp_processor_id());
189 tick_do_timer_cpu
= cpu
;
192 static void tick_take_do_timer_from_boot(void)
194 int cpu
= smp_processor_id();
195 int from
= tick_do_timer_boot_cpu
;
197 if (from
>= 0 && from
!= cpu
)
198 smp_call_function_single(from
, giveup_do_timer
, &cpu
, 1);
203 * Setup the tick device
205 static void tick_setup_device(struct tick_device
*td
,
206 struct clock_event_device
*newdev
, int cpu
,
207 const struct cpumask
*cpumask
)
209 void (*handler
)(struct clock_event_device
*) = NULL
;
210 ktime_t next_event
= 0;
213 * First device setup ?
217 * If no cpu took the do_timer update, assign it to
220 if (tick_do_timer_cpu
== TICK_DO_TIMER_BOOT
) {
221 tick_do_timer_cpu
= cpu
;
223 tick_next_period
= ktime_get();
224 #ifdef CONFIG_NO_HZ_FULL
226 * The boot CPU may be nohz_full, in which case set
227 * tick_do_timer_boot_cpu so the first housekeeping
228 * secondary that comes up will take do_timer from
231 if (tick_nohz_full_cpu(cpu
))
232 tick_do_timer_boot_cpu
= cpu
;
234 } else if (tick_do_timer_boot_cpu
!= -1 &&
235 !tick_nohz_full_cpu(cpu
)) {
236 tick_take_do_timer_from_boot();
237 tick_do_timer_boot_cpu
= -1;
238 WARN_ON(tick_do_timer_cpu
!= cpu
);
243 * Startup in periodic mode first.
245 td
->mode
= TICKDEV_MODE_PERIODIC
;
247 handler
= td
->evtdev
->event_handler
;
248 next_event
= td
->evtdev
->next_event
;
249 td
->evtdev
->event_handler
= clockevents_handle_noop
;
255 * When the device is not per cpu, pin the interrupt to the
258 if (!cpumask_equal(newdev
->cpumask
, cpumask
))
259 irq_set_affinity(newdev
->irq
, cpumask
);
262 * When global broadcasting is active, check if the current
263 * device is registered as a placeholder for broadcast mode.
264 * This allows us to handle this x86 misfeature in a generic
265 * way. This function also returns !=0 when we keep the
266 * current active broadcast state for this CPU.
268 if (tick_device_uses_broadcast(newdev
, cpu
))
271 if (td
->mode
== TICKDEV_MODE_PERIODIC
)
272 tick_setup_periodic(newdev
, 0);
274 tick_setup_oneshot(newdev
, handler
, next_event
);
277 void tick_install_replacement(struct clock_event_device
*newdev
)
279 struct tick_device
*td
= this_cpu_ptr(&tick_cpu_device
);
280 int cpu
= smp_processor_id();
282 clockevents_exchange_device(td
->evtdev
, newdev
);
283 tick_setup_device(td
, newdev
, cpu
, cpumask_of(cpu
));
284 if (newdev
->features
& CLOCK_EVT_FEAT_ONESHOT
)
285 tick_oneshot_notify();
288 static bool tick_check_percpu(struct clock_event_device
*curdev
,
289 struct clock_event_device
*newdev
, int cpu
)
291 if (!cpumask_test_cpu(cpu
, newdev
->cpumask
))
293 if (cpumask_equal(newdev
->cpumask
, cpumask_of(cpu
)))
295 /* Check if irq affinity can be set */
296 if (newdev
->irq
>= 0 && !irq_can_set_affinity(newdev
->irq
))
298 /* Prefer an existing cpu local device */
299 if (curdev
&& cpumask_equal(curdev
->cpumask
, cpumask_of(cpu
)))
304 static bool tick_check_preferred(struct clock_event_device
*curdev
,
305 struct clock_event_device
*newdev
)
307 /* Prefer oneshot capable device */
308 if (!(newdev
->features
& CLOCK_EVT_FEAT_ONESHOT
)) {
309 if (curdev
&& (curdev
->features
& CLOCK_EVT_FEAT_ONESHOT
))
311 if (tick_oneshot_mode_active())
316 * Use the higher rated one, but prefer a CPU local device with a lower
317 * rating than a non-CPU local device
320 newdev
->rating
> curdev
->rating
||
321 !cpumask_equal(curdev
->cpumask
, newdev
->cpumask
);
325 * Check whether the new device is a better fit than curdev. curdev
328 bool tick_check_replacement(struct clock_event_device
*curdev
,
329 struct clock_event_device
*newdev
)
331 if (!tick_check_percpu(curdev
, newdev
, smp_processor_id()))
334 return tick_check_preferred(curdev
, newdev
);
338 * Check, if the new registered device should be used. Called with
339 * clockevents_lock held and interrupts disabled.
341 void tick_check_new_device(struct clock_event_device
*newdev
)
343 struct clock_event_device
*curdev
;
344 struct tick_device
*td
;
347 cpu
= smp_processor_id();
348 td
= &per_cpu(tick_cpu_device
, cpu
);
351 /* cpu local device ? */
352 if (!tick_check_percpu(curdev
, newdev
, cpu
))
355 /* Preference decision */
356 if (!tick_check_preferred(curdev
, newdev
))
359 if (!try_module_get(newdev
->owner
))
363 * Replace the eventually existing device by the new
364 * device. If the current device is the broadcast device, do
365 * not give it back to the clockevents layer !
367 if (tick_is_broadcast_device(curdev
)) {
368 clockevents_shutdown(curdev
);
371 clockevents_exchange_device(curdev
, newdev
);
372 tick_setup_device(td
, newdev
, cpu
, cpumask_of(cpu
));
373 if (newdev
->features
& CLOCK_EVT_FEAT_ONESHOT
)
374 tick_oneshot_notify();
379 * Can the new device be used as a broadcast device ?
381 tick_install_broadcast_device(newdev
);
385 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
386 * @state: The target state (enter/exit)
388 * The system enters/leaves a state, where affected devices might stop
389 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
391 * Called with interrupts disabled, so clockevents_lock is not
392 * required here because the local clock event device cannot go away
395 int tick_broadcast_oneshot_control(enum tick_broadcast_state state
)
397 struct tick_device
*td
= this_cpu_ptr(&tick_cpu_device
);
399 if (!(td
->evtdev
->features
& CLOCK_EVT_FEAT_C3STOP
))
402 return __tick_broadcast_oneshot_control(state
);
404 EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control
);
406 #ifdef CONFIG_HOTPLUG_CPU
408 * Transfer the do_timer job away from a dying cpu.
410 * Called with interrupts disabled. No locking required. If
411 * tick_do_timer_cpu is owned by this cpu, nothing can change it.
413 void tick_handover_do_timer(void)
415 if (tick_do_timer_cpu
== smp_processor_id())
416 tick_do_timer_cpu
= cpumask_first(cpu_online_mask
);
420 * Shutdown an event device on a given cpu:
422 * This is called on a life CPU, when a CPU is dead. So we cannot
423 * access the hardware device itself.
424 * We just set the mode and remove it from the lists.
426 void tick_shutdown(unsigned int cpu
)
428 struct tick_device
*td
= &per_cpu(tick_cpu_device
, cpu
);
429 struct clock_event_device
*dev
= td
->evtdev
;
431 td
->mode
= TICKDEV_MODE_PERIODIC
;
434 * Prevent that the clock events layer tries to call
435 * the set mode function!
437 clockevent_set_state(dev
, CLOCK_EVT_STATE_DETACHED
);
438 clockevents_exchange_device(dev
, NULL
);
439 dev
->event_handler
= clockevents_handle_noop
;
446 * tick_suspend_local - Suspend the local tick device
448 * Called from the local cpu for freeze with interrupts disabled.
450 * No locks required. Nothing can change the per cpu device.
452 void tick_suspend_local(void)
454 struct tick_device
*td
= this_cpu_ptr(&tick_cpu_device
);
456 clockevents_shutdown(td
->evtdev
);
460 * tick_resume_local - Resume the local tick device
462 * Called from the local CPU for unfreeze or XEN resume magic.
464 * No locks required. Nothing can change the per cpu device.
466 void tick_resume_local(void)
468 struct tick_device
*td
= this_cpu_ptr(&tick_cpu_device
);
469 bool broadcast
= tick_resume_check_broadcast();
471 clockevents_tick_resume(td
->evtdev
);
473 if (td
->mode
== TICKDEV_MODE_PERIODIC
)
474 tick_setup_periodic(td
->evtdev
, 0);
476 tick_resume_oneshot();
481 * tick_suspend - Suspend the tick and the broadcast device
483 * Called from syscore_suspend() via timekeeping_suspend with only one
484 * CPU online and interrupts disabled or from tick_unfreeze() under
487 * No locks required. Nothing can change the per cpu device.
489 void tick_suspend(void)
491 tick_suspend_local();
492 tick_suspend_broadcast();
496 * tick_resume - Resume the tick and the broadcast device
498 * Called from syscore_resume() via timekeeping_resume with only one
499 * CPU online and interrupts disabled.
501 * No locks required. Nothing can change the per cpu device.
503 void tick_resume(void)
505 tick_resume_broadcast();
509 #ifdef CONFIG_SUSPEND
510 static DEFINE_RAW_SPINLOCK(tick_freeze_lock
);
511 static unsigned int tick_freeze_depth
;
514 * tick_freeze - Suspend the local tick and (possibly) timekeeping.
516 * Check if this is the last online CPU executing the function and if so,
517 * suspend timekeeping. Otherwise suspend the local tick.
519 * Call with interrupts disabled. Must be balanced with %tick_unfreeze().
520 * Interrupts must not be enabled before the subsequent %tick_unfreeze().
522 void tick_freeze(void)
524 raw_spin_lock(&tick_freeze_lock
);
527 if (tick_freeze_depth
== num_online_cpus()) {
528 trace_suspend_resume(TPS("timekeeping_freeze"),
529 smp_processor_id(), true);
530 system_state
= SYSTEM_SUSPEND
;
531 sched_clock_suspend();
532 timekeeping_suspend();
534 tick_suspend_local();
537 raw_spin_unlock(&tick_freeze_lock
);
541 * tick_unfreeze - Resume the local tick and (possibly) timekeeping.
543 * Check if this is the first CPU executing the function and if so, resume
544 * timekeeping. Otherwise resume the local tick.
546 * Call with interrupts disabled. Must be balanced with %tick_freeze().
547 * Interrupts must not be enabled after the preceding %tick_freeze().
549 void tick_unfreeze(void)
551 raw_spin_lock(&tick_freeze_lock
);
553 if (tick_freeze_depth
== num_online_cpus()) {
554 timekeeping_resume();
555 sched_clock_resume();
556 system_state
= SYSTEM_RUNNING
;
557 trace_suspend_resume(TPS("timekeeping_freeze"),
558 smp_processor_id(), false);
560 touch_softlockup_watchdog();
566 raw_spin_unlock(&tick_freeze_lock
);
568 #endif /* CONFIG_SUSPEND */
571 * tick_init - initialize the tick control
573 void __init
tick_init(void)
575 tick_broadcast_init();