2 * linux/kernel/time/tick-broadcast.c
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/smp.h>
22 #include <linux/module.h>
24 #include "tick-internal.h"
27 * Broadcast support for broken x86 hardware, where the local apic
28 * timer stops in C3 state.
31 static struct tick_device tick_broadcast_device
;
32 static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly
;
33 static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly
;
34 static cpumask_var_t tmpmask __cpumask_var_read_mostly
;
35 static int tick_broadcast_forced
;
37 static __cacheline_aligned_in_smp
DEFINE_RAW_SPINLOCK(tick_broadcast_lock
);
39 #ifdef CONFIG_TICK_ONESHOT
40 static void tick_broadcast_clear_oneshot(int cpu
);
41 static void tick_resume_broadcast_oneshot(struct clock_event_device
*bc
);
43 static inline void tick_broadcast_clear_oneshot(int cpu
) { }
44 static inline void tick_resume_broadcast_oneshot(struct clock_event_device
*bc
) { }
48 * Debugging: see timer_list.c
50 struct tick_device
*tick_get_broadcast_device(void)
52 return &tick_broadcast_device
;
55 struct cpumask
*tick_get_broadcast_mask(void)
57 return tick_broadcast_mask
;
61 * Start the device in periodic mode
63 static void tick_broadcast_start_periodic(struct clock_event_device
*bc
)
66 tick_setup_periodic(bc
, 1);
70 * Check, if the device can be utilized as broadcast device:
72 static bool tick_check_broadcast_device(struct clock_event_device
*curdev
,
73 struct clock_event_device
*newdev
)
75 if ((newdev
->features
& CLOCK_EVT_FEAT_DUMMY
) ||
76 (newdev
->features
& CLOCK_EVT_FEAT_PERCPU
) ||
77 (newdev
->features
& CLOCK_EVT_FEAT_C3STOP
))
80 if (tick_broadcast_device
.mode
== TICKDEV_MODE_ONESHOT
&&
81 !(newdev
->features
& CLOCK_EVT_FEAT_ONESHOT
))
84 return !curdev
|| newdev
->rating
> curdev
->rating
;
88 * Conditionally install/replace broadcast device
90 void tick_install_broadcast_device(struct clock_event_device
*dev
)
92 struct clock_event_device
*cur
= tick_broadcast_device
.evtdev
;
94 if (!tick_check_broadcast_device(cur
, dev
))
97 if (!try_module_get(dev
->owner
))
100 clockevents_exchange_device(cur
, dev
);
102 cur
->event_handler
= clockevents_handle_noop
;
103 tick_broadcast_device
.evtdev
= dev
;
104 if (!cpumask_empty(tick_broadcast_mask
))
105 tick_broadcast_start_periodic(dev
);
107 * Inform all cpus about this. We might be in a situation
108 * where we did not switch to oneshot mode because the per cpu
109 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
110 * of a oneshot capable broadcast device. Without that
111 * notification the systems stays stuck in periodic mode
114 if (dev
->features
& CLOCK_EVT_FEAT_ONESHOT
)
119 * Check, if the device is the broadcast device
121 int tick_is_broadcast_device(struct clock_event_device
*dev
)
123 return (dev
&& tick_broadcast_device
.evtdev
== dev
);
126 int tick_broadcast_update_freq(struct clock_event_device
*dev
, u32 freq
)
130 if (tick_is_broadcast_device(dev
)) {
131 raw_spin_lock(&tick_broadcast_lock
);
132 ret
= __clockevents_update_freq(dev
, freq
);
133 raw_spin_unlock(&tick_broadcast_lock
);
139 static void err_broadcast(const struct cpumask
*mask
)
141 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
144 static void tick_device_setup_broadcast_func(struct clock_event_device
*dev
)
147 dev
->broadcast
= tick_broadcast
;
148 if (!dev
->broadcast
) {
149 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
151 dev
->broadcast
= err_broadcast
;
156 * Check, if the device is disfunctional and a place holder, which
157 * needs to be handled by the broadcast device.
159 int tick_device_uses_broadcast(struct clock_event_device
*dev
, int cpu
)
161 struct clock_event_device
*bc
= tick_broadcast_device
.evtdev
;
165 raw_spin_lock_irqsave(&tick_broadcast_lock
, flags
);
168 * Devices might be registered with both periodic and oneshot
169 * mode disabled. This signals, that the device needs to be
170 * operated from the broadcast device and is a placeholder for
171 * the cpu local device.
173 if (!tick_device_is_functional(dev
)) {
174 dev
->event_handler
= tick_handle_periodic
;
175 tick_device_setup_broadcast_func(dev
);
176 cpumask_set_cpu(cpu
, tick_broadcast_mask
);
177 if (tick_broadcast_device
.mode
== TICKDEV_MODE_PERIODIC
)
178 tick_broadcast_start_periodic(bc
);
180 tick_broadcast_setup_oneshot(bc
);
184 * Clear the broadcast bit for this cpu if the
185 * device is not power state affected.
187 if (!(dev
->features
& CLOCK_EVT_FEAT_C3STOP
))
188 cpumask_clear_cpu(cpu
, tick_broadcast_mask
);
190 tick_device_setup_broadcast_func(dev
);
193 * Clear the broadcast bit if the CPU is not in
194 * periodic broadcast on state.
196 if (!cpumask_test_cpu(cpu
, tick_broadcast_on
))
197 cpumask_clear_cpu(cpu
, tick_broadcast_mask
);
199 switch (tick_broadcast_device
.mode
) {
200 case TICKDEV_MODE_ONESHOT
:
202 * If the system is in oneshot mode we can
203 * unconditionally clear the oneshot mask bit,
204 * because the CPU is running and therefore
205 * not in an idle state which causes the power
206 * state affected device to stop. Let the
207 * caller initialize the device.
209 tick_broadcast_clear_oneshot(cpu
);
213 case TICKDEV_MODE_PERIODIC
:
215 * If the system is in periodic mode, check
216 * whether the broadcast device can be
219 if (cpumask_empty(tick_broadcast_mask
) && bc
)
220 clockevents_shutdown(bc
);
222 * If we kept the cpu in the broadcast mask,
223 * tell the caller to leave the per cpu device
224 * in shutdown state. The periodic interrupt
225 * is delivered by the broadcast device, if
226 * the broadcast device exists and is not
229 if (bc
&& !(bc
->features
& CLOCK_EVT_FEAT_HRTIMER
))
230 ret
= cpumask_test_cpu(cpu
, tick_broadcast_mask
);
236 raw_spin_unlock_irqrestore(&tick_broadcast_lock
, flags
);
240 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
241 int tick_receive_broadcast(void)
243 struct tick_device
*td
= this_cpu_ptr(&tick_cpu_device
);
244 struct clock_event_device
*evt
= td
->evtdev
;
249 if (!evt
->event_handler
)
252 evt
->event_handler(evt
);
258 * Broadcast the event to the cpus, which are set in the mask (mangled).
260 static bool tick_do_broadcast(struct cpumask
*mask
)
262 int cpu
= smp_processor_id();
263 struct tick_device
*td
;
267 * Check, if the current cpu is in the mask
269 if (cpumask_test_cpu(cpu
, mask
)) {
270 struct clock_event_device
*bc
= tick_broadcast_device
.evtdev
;
272 cpumask_clear_cpu(cpu
, mask
);
274 * We only run the local handler, if the broadcast
275 * device is not hrtimer based. Otherwise we run into
276 * a hrtimer recursion.
278 * local timer_interrupt()
285 local
= !(bc
->features
& CLOCK_EVT_FEAT_HRTIMER
);
288 if (!cpumask_empty(mask
)) {
290 * It might be necessary to actually check whether the devices
291 * have different broadcast functions. For now, just use the
292 * one of the first device. This works as long as we have this
293 * misfeature only on x86 (lapic)
295 td
= &per_cpu(tick_cpu_device
, cpumask_first(mask
));
296 td
->evtdev
->broadcast(mask
);
302 * Periodic broadcast:
303 * - invoke the broadcast handlers
305 static bool tick_do_periodic_broadcast(void)
307 cpumask_and(tmpmask
, cpu_online_mask
, tick_broadcast_mask
);
308 return tick_do_broadcast(tmpmask
);
312 * Event handler for periodic broadcast ticks
314 static void tick_handle_periodic_broadcast(struct clock_event_device
*dev
)
316 struct tick_device
*td
= this_cpu_ptr(&tick_cpu_device
);
319 raw_spin_lock(&tick_broadcast_lock
);
321 /* Handle spurious interrupts gracefully */
322 if (clockevent_state_shutdown(tick_broadcast_device
.evtdev
)) {
323 raw_spin_unlock(&tick_broadcast_lock
);
327 bc_local
= tick_do_periodic_broadcast();
329 if (clockevent_state_oneshot(dev
)) {
330 ktime_t next
= ktime_add(dev
->next_event
, tick_period
);
332 clockevents_program_event(dev
, next
, true);
334 raw_spin_unlock(&tick_broadcast_lock
);
337 * We run the handler of the local cpu after dropping
338 * tick_broadcast_lock because the handler might deadlock when
339 * trying to switch to oneshot mode.
342 td
->evtdev
->event_handler(td
->evtdev
);
346 * tick_broadcast_control - Enable/disable or force broadcast mode
347 * @mode: The selected broadcast mode
349 * Called when the system enters a state where affected tick devices
350 * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
352 void tick_broadcast_control(enum tick_broadcast_mode mode
)
354 struct clock_event_device
*bc
, *dev
;
355 struct tick_device
*td
;
359 /* Protects also the local clockevent device. */
360 raw_spin_lock_irqsave(&tick_broadcast_lock
, flags
);
361 td
= this_cpu_ptr(&tick_cpu_device
);
365 * Is the device not affected by the powerstate ?
367 if (!dev
|| !(dev
->features
& CLOCK_EVT_FEAT_C3STOP
))
370 if (!tick_device_is_functional(dev
))
373 cpu
= smp_processor_id();
374 bc
= tick_broadcast_device
.evtdev
;
375 bc_stopped
= cpumask_empty(tick_broadcast_mask
);
378 case TICK_BROADCAST_FORCE
:
379 tick_broadcast_forced
= 1;
380 case TICK_BROADCAST_ON
:
381 cpumask_set_cpu(cpu
, tick_broadcast_on
);
382 if (!cpumask_test_and_set_cpu(cpu
, tick_broadcast_mask
)) {
384 * Only shutdown the cpu local device, if:
386 * - the broadcast device exists
387 * - the broadcast device is not a hrtimer based one
388 * - the broadcast device is in periodic mode to
389 * avoid a hickup during switch to oneshot mode
391 if (bc
&& !(bc
->features
& CLOCK_EVT_FEAT_HRTIMER
) &&
392 tick_broadcast_device
.mode
== TICKDEV_MODE_PERIODIC
)
393 clockevents_shutdown(dev
);
397 case TICK_BROADCAST_OFF
:
398 if (tick_broadcast_forced
)
400 cpumask_clear_cpu(cpu
, tick_broadcast_on
);
401 if (!tick_device_is_functional(dev
))
403 if (cpumask_test_and_clear_cpu(cpu
, tick_broadcast_mask
)) {
404 if (tick_broadcast_device
.mode
==
405 TICKDEV_MODE_PERIODIC
)
406 tick_setup_periodic(dev
, 0);
412 if (cpumask_empty(tick_broadcast_mask
)) {
414 clockevents_shutdown(bc
);
415 } else if (bc_stopped
) {
416 if (tick_broadcast_device
.mode
== TICKDEV_MODE_PERIODIC
)
417 tick_broadcast_start_periodic(bc
);
419 tick_broadcast_setup_oneshot(bc
);
423 raw_spin_unlock_irqrestore(&tick_broadcast_lock
, flags
);
425 EXPORT_SYMBOL_GPL(tick_broadcast_control
);
428 * Set the periodic handler depending on broadcast on/off
430 void tick_set_periodic_handler(struct clock_event_device
*dev
, int broadcast
)
433 dev
->event_handler
= tick_handle_periodic
;
435 dev
->event_handler
= tick_handle_periodic_broadcast
;
438 #ifdef CONFIG_HOTPLUG_CPU
440 * Remove a CPU from broadcasting
442 void tick_shutdown_broadcast(unsigned int cpu
)
444 struct clock_event_device
*bc
;
447 raw_spin_lock_irqsave(&tick_broadcast_lock
, flags
);
449 bc
= tick_broadcast_device
.evtdev
;
450 cpumask_clear_cpu(cpu
, tick_broadcast_mask
);
451 cpumask_clear_cpu(cpu
, tick_broadcast_on
);
453 if (tick_broadcast_device
.mode
== TICKDEV_MODE_PERIODIC
) {
454 if (bc
&& cpumask_empty(tick_broadcast_mask
))
455 clockevents_shutdown(bc
);
458 raw_spin_unlock_irqrestore(&tick_broadcast_lock
, flags
);
462 void tick_suspend_broadcast(void)
464 struct clock_event_device
*bc
;
467 raw_spin_lock_irqsave(&tick_broadcast_lock
, flags
);
469 bc
= tick_broadcast_device
.evtdev
;
471 clockevents_shutdown(bc
);
473 raw_spin_unlock_irqrestore(&tick_broadcast_lock
, flags
);
477 * This is called from tick_resume_local() on a resuming CPU. That's
478 * called from the core resume function, tick_unfreeze() and the magic XEN
481 * In none of these cases the broadcast device mode can change and the
482 * bit of the resuming CPU in the broadcast mask is safe as well.
484 bool tick_resume_check_broadcast(void)
486 if (tick_broadcast_device
.mode
== TICKDEV_MODE_ONESHOT
)
489 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask
);
492 void tick_resume_broadcast(void)
494 struct clock_event_device
*bc
;
497 raw_spin_lock_irqsave(&tick_broadcast_lock
, flags
);
499 bc
= tick_broadcast_device
.evtdev
;
502 clockevents_tick_resume(bc
);
504 switch (tick_broadcast_device
.mode
) {
505 case TICKDEV_MODE_PERIODIC
:
506 if (!cpumask_empty(tick_broadcast_mask
))
507 tick_broadcast_start_periodic(bc
);
509 case TICKDEV_MODE_ONESHOT
:
510 if (!cpumask_empty(tick_broadcast_mask
))
511 tick_resume_broadcast_oneshot(bc
);
515 raw_spin_unlock_irqrestore(&tick_broadcast_lock
, flags
);
518 #ifdef CONFIG_TICK_ONESHOT
520 static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly
;
521 static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly
;
522 static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly
;
525 * Exposed for debugging: see timer_list.c
527 struct cpumask
*tick_get_broadcast_oneshot_mask(void)
529 return tick_broadcast_oneshot_mask
;
533 * Called before going idle with interrupts disabled. Checks whether a
534 * broadcast event from the other core is about to happen. We detected
535 * that in tick_broadcast_oneshot_control(). The callsite can use this
536 * to avoid a deep idle transition as we are about to get the
537 * broadcast IPI right away.
539 int tick_check_broadcast_expired(void)
541 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask
);
545 * Set broadcast interrupt affinity
547 static void tick_broadcast_set_affinity(struct clock_event_device
*bc
,
548 const struct cpumask
*cpumask
)
550 if (!(bc
->features
& CLOCK_EVT_FEAT_DYNIRQ
))
553 if (cpumask_equal(bc
->cpumask
, cpumask
))
556 bc
->cpumask
= cpumask
;
557 irq_set_affinity(bc
->irq
, bc
->cpumask
);
560 static void tick_broadcast_set_event(struct clock_event_device
*bc
, int cpu
,
563 if (!clockevent_state_oneshot(bc
))
564 clockevents_switch_state(bc
, CLOCK_EVT_STATE_ONESHOT
);
566 clockevents_program_event(bc
, expires
, 1);
567 tick_broadcast_set_affinity(bc
, cpumask_of(cpu
));
570 static void tick_resume_broadcast_oneshot(struct clock_event_device
*bc
)
572 clockevents_switch_state(bc
, CLOCK_EVT_STATE_ONESHOT
);
576 * Called from irq_enter() when idle was interrupted to reenable the
579 void tick_check_oneshot_broadcast_this_cpu(void)
581 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask
)) {
582 struct tick_device
*td
= this_cpu_ptr(&tick_cpu_device
);
585 * We might be in the middle of switching over from
586 * periodic to oneshot. If the CPU has not yet
587 * switched over, leave the device alone.
589 if (td
->mode
== TICKDEV_MODE_ONESHOT
) {
590 clockevents_switch_state(td
->evtdev
,
591 CLOCK_EVT_STATE_ONESHOT
);
597 * Handle oneshot mode broadcasting
599 static void tick_handle_oneshot_broadcast(struct clock_event_device
*dev
)
601 struct tick_device
*td
;
602 ktime_t now
, next_event
;
603 int cpu
, next_cpu
= 0;
606 raw_spin_lock(&tick_broadcast_lock
);
607 dev
->next_event
= KTIME_MAX
;
608 next_event
= KTIME_MAX
;
609 cpumask_clear(tmpmask
);
611 /* Find all expired events */
612 for_each_cpu(cpu
, tick_broadcast_oneshot_mask
) {
613 td
= &per_cpu(tick_cpu_device
, cpu
);
614 if (td
->evtdev
->next_event
<= now
) {
615 cpumask_set_cpu(cpu
, tmpmask
);
617 * Mark the remote cpu in the pending mask, so
618 * it can avoid reprogramming the cpu local
619 * timer in tick_broadcast_oneshot_control().
621 cpumask_set_cpu(cpu
, tick_broadcast_pending_mask
);
622 } else if (td
->evtdev
->next_event
< next_event
) {
623 next_event
= td
->evtdev
->next_event
;
629 * Remove the current cpu from the pending mask. The event is
630 * delivered immediately in tick_do_broadcast() !
632 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask
);
634 /* Take care of enforced broadcast requests */
635 cpumask_or(tmpmask
, tmpmask
, tick_broadcast_force_mask
);
636 cpumask_clear(tick_broadcast_force_mask
);
639 * Sanity check. Catch the case where we try to broadcast to
642 if (WARN_ON_ONCE(!cpumask_subset(tmpmask
, cpu_online_mask
)))
643 cpumask_and(tmpmask
, tmpmask
, cpu_online_mask
);
646 * Wakeup the cpus which have an expired event.
648 bc_local
= tick_do_broadcast(tmpmask
);
651 * Two reasons for reprogram:
653 * - The global event did not expire any CPU local
654 * events. This happens in dyntick mode, as the maximum PIT
655 * delta is quite small.
657 * - There are pending events on sleeping CPUs which were not
660 if (next_event
!= KTIME_MAX
)
661 tick_broadcast_set_event(dev
, next_cpu
, next_event
);
663 raw_spin_unlock(&tick_broadcast_lock
);
666 td
= this_cpu_ptr(&tick_cpu_device
);
667 td
->evtdev
->event_handler(td
->evtdev
);
671 static int broadcast_needs_cpu(struct clock_event_device
*bc
, int cpu
)
673 if (!(bc
->features
& CLOCK_EVT_FEAT_HRTIMER
))
675 if (bc
->next_event
== KTIME_MAX
)
677 return bc
->bound_on
== cpu
? -EBUSY
: 0;
680 static void broadcast_shutdown_local(struct clock_event_device
*bc
,
681 struct clock_event_device
*dev
)
684 * For hrtimer based broadcasting we cannot shutdown the cpu
685 * local device if our own event is the first one to expire or
686 * if we own the broadcast timer.
688 if (bc
->features
& CLOCK_EVT_FEAT_HRTIMER
) {
689 if (broadcast_needs_cpu(bc
, smp_processor_id()))
691 if (dev
->next_event
< bc
->next_event
)
694 clockevents_switch_state(dev
, CLOCK_EVT_STATE_SHUTDOWN
);
697 int __tick_broadcast_oneshot_control(enum tick_broadcast_state state
)
699 struct clock_event_device
*bc
, *dev
;
704 * If there is no broadcast device, tell the caller not to go
707 if (!tick_broadcast_device
.evtdev
)
710 dev
= this_cpu_ptr(&tick_cpu_device
)->evtdev
;
712 raw_spin_lock(&tick_broadcast_lock
);
713 bc
= tick_broadcast_device
.evtdev
;
714 cpu
= smp_processor_id();
716 if (state
== TICK_BROADCAST_ENTER
) {
718 * If the current CPU owns the hrtimer broadcast
719 * mechanism, it cannot go deep idle and we do not add
720 * the CPU to the broadcast mask. We don't have to go
721 * through the EXIT path as the local timer is not
724 ret
= broadcast_needs_cpu(bc
, cpu
);
729 * If the broadcast device is in periodic mode, we
732 if (tick_broadcast_device
.mode
== TICKDEV_MODE_PERIODIC
) {
733 /* If it is a hrtimer based broadcast, return busy */
734 if (bc
->features
& CLOCK_EVT_FEAT_HRTIMER
)
739 if (!cpumask_test_and_set_cpu(cpu
, tick_broadcast_oneshot_mask
)) {
740 WARN_ON_ONCE(cpumask_test_cpu(cpu
, tick_broadcast_pending_mask
));
742 /* Conditionally shut down the local timer. */
743 broadcast_shutdown_local(bc
, dev
);
746 * We only reprogram the broadcast timer if we
747 * did not mark ourself in the force mask and
748 * if the cpu local event is earlier than the
749 * broadcast event. If the current CPU is in
750 * the force mask, then we are going to be
751 * woken by the IPI right away; we return
752 * busy, so the CPU does not try to go deep
755 if (cpumask_test_cpu(cpu
, tick_broadcast_force_mask
)) {
757 } else if (dev
->next_event
< bc
->next_event
) {
758 tick_broadcast_set_event(bc
, cpu
, dev
->next_event
);
760 * In case of hrtimer broadcasts the
761 * programming might have moved the
762 * timer to this cpu. If yes, remove
763 * us from the broadcast mask and
766 ret
= broadcast_needs_cpu(bc
, cpu
);
768 cpumask_clear_cpu(cpu
,
769 tick_broadcast_oneshot_mask
);
774 if (cpumask_test_and_clear_cpu(cpu
, tick_broadcast_oneshot_mask
)) {
775 clockevents_switch_state(dev
, CLOCK_EVT_STATE_ONESHOT
);
777 * The cpu which was handling the broadcast
778 * timer marked this cpu in the broadcast
779 * pending mask and fired the broadcast
780 * IPI. So we are going to handle the expired
781 * event anyway via the broadcast IPI
782 * handler. No need to reprogram the timer
783 * with an already expired event.
785 if (cpumask_test_and_clear_cpu(cpu
,
786 tick_broadcast_pending_mask
))
790 * Bail out if there is no next event.
792 if (dev
->next_event
== KTIME_MAX
)
795 * If the pending bit is not set, then we are
796 * either the CPU handling the broadcast
797 * interrupt or we got woken by something else.
799 * We are not longer in the broadcast mask, so
800 * if the cpu local expiry time is already
801 * reached, we would reprogram the cpu local
802 * timer with an already expired event.
804 * This can lead to a ping-pong when we return
805 * to idle and therefor rearm the broadcast
806 * timer before the cpu local timer was able
807 * to fire. This happens because the forced
808 * reprogramming makes sure that the event
809 * will happen in the future and depending on
810 * the min_delta setting this might be far
811 * enough out that the ping-pong starts.
813 * If the cpu local next_event has expired
814 * then we know that the broadcast timer
815 * next_event has expired as well and
816 * broadcast is about to be handled. So we
817 * avoid reprogramming and enforce that the
818 * broadcast handler, which did not run yet,
819 * will invoke the cpu local handler.
821 * We cannot call the handler directly from
822 * here, because we might be in a NOHZ phase
823 * and we did not go through the irq_enter()
827 if (dev
->next_event
<= now
) {
828 cpumask_set_cpu(cpu
, tick_broadcast_force_mask
);
832 * We got woken by something else. Reprogram
833 * the cpu local timer device.
835 tick_program_event(dev
->next_event
, 1);
839 raw_spin_unlock(&tick_broadcast_lock
);
844 * Reset the one shot broadcast for a cpu
846 * Called with tick_broadcast_lock held
848 static void tick_broadcast_clear_oneshot(int cpu
)
850 cpumask_clear_cpu(cpu
, tick_broadcast_oneshot_mask
);
851 cpumask_clear_cpu(cpu
, tick_broadcast_pending_mask
);
854 static void tick_broadcast_init_next_event(struct cpumask
*mask
,
857 struct tick_device
*td
;
860 for_each_cpu(cpu
, mask
) {
861 td
= &per_cpu(tick_cpu_device
, cpu
);
863 td
->evtdev
->next_event
= expires
;
868 * tick_broadcast_setup_oneshot - setup the broadcast device
870 void tick_broadcast_setup_oneshot(struct clock_event_device
*bc
)
872 int cpu
= smp_processor_id();
877 /* Set it up only once ! */
878 if (bc
->event_handler
!= tick_handle_oneshot_broadcast
) {
879 int was_periodic
= clockevent_state_periodic(bc
);
881 bc
->event_handler
= tick_handle_oneshot_broadcast
;
884 * We must be careful here. There might be other CPUs
885 * waiting for periodic broadcast. We need to set the
886 * oneshot_mask bits for those and program the
887 * broadcast device to fire.
889 cpumask_copy(tmpmask
, tick_broadcast_mask
);
890 cpumask_clear_cpu(cpu
, tmpmask
);
891 cpumask_or(tick_broadcast_oneshot_mask
,
892 tick_broadcast_oneshot_mask
, tmpmask
);
894 if (was_periodic
&& !cpumask_empty(tmpmask
)) {
895 clockevents_switch_state(bc
, CLOCK_EVT_STATE_ONESHOT
);
896 tick_broadcast_init_next_event(tmpmask
,
898 tick_broadcast_set_event(bc
, cpu
, tick_next_period
);
900 bc
->next_event
= KTIME_MAX
;
903 * The first cpu which switches to oneshot mode sets
904 * the bit for all other cpus which are in the general
905 * (periodic) broadcast mask. So the bit is set and
906 * would prevent the first broadcast enter after this
907 * to program the bc device.
909 tick_broadcast_clear_oneshot(cpu
);
914 * Select oneshot operating mode for the broadcast device
916 void tick_broadcast_switch_to_oneshot(void)
918 struct clock_event_device
*bc
;
921 raw_spin_lock_irqsave(&tick_broadcast_lock
, flags
);
923 tick_broadcast_device
.mode
= TICKDEV_MODE_ONESHOT
;
924 bc
= tick_broadcast_device
.evtdev
;
926 tick_broadcast_setup_oneshot(bc
);
928 raw_spin_unlock_irqrestore(&tick_broadcast_lock
, flags
);
931 #ifdef CONFIG_HOTPLUG_CPU
932 void hotplug_cpu__broadcast_tick_pull(int deadcpu
)
934 struct clock_event_device
*bc
;
937 raw_spin_lock_irqsave(&tick_broadcast_lock
, flags
);
938 bc
= tick_broadcast_device
.evtdev
;
940 if (bc
&& broadcast_needs_cpu(bc
, deadcpu
)) {
941 /* This moves the broadcast assignment to this CPU: */
942 clockevents_program_event(bc
, bc
->next_event
, 1);
944 raw_spin_unlock_irqrestore(&tick_broadcast_lock
, flags
);
948 * Remove a dead CPU from broadcasting
950 void tick_shutdown_broadcast_oneshot(unsigned int cpu
)
954 raw_spin_lock_irqsave(&tick_broadcast_lock
, flags
);
957 * Clear the broadcast masks for the dead cpu, but do not stop
958 * the broadcast device!
960 cpumask_clear_cpu(cpu
, tick_broadcast_oneshot_mask
);
961 cpumask_clear_cpu(cpu
, tick_broadcast_pending_mask
);
962 cpumask_clear_cpu(cpu
, tick_broadcast_force_mask
);
964 raw_spin_unlock_irqrestore(&tick_broadcast_lock
, flags
);
969 * Check, whether the broadcast device is in one shot mode
971 int tick_broadcast_oneshot_active(void)
973 return tick_broadcast_device
.mode
== TICKDEV_MODE_ONESHOT
;
977 * Check whether the broadcast device supports oneshot.
979 bool tick_broadcast_oneshot_available(void)
981 struct clock_event_device
*bc
= tick_broadcast_device
.evtdev
;
983 return bc
? bc
->features
& CLOCK_EVT_FEAT_ONESHOT
: false;
987 int __tick_broadcast_oneshot_control(enum tick_broadcast_state state
)
989 struct clock_event_device
*bc
= tick_broadcast_device
.evtdev
;
991 if (!bc
|| (bc
->features
& CLOCK_EVT_FEAT_HRTIMER
))
998 void __init
tick_broadcast_init(void)
1000 zalloc_cpumask_var(&tick_broadcast_mask
, GFP_NOWAIT
);
1001 zalloc_cpumask_var(&tick_broadcast_on
, GFP_NOWAIT
);
1002 zalloc_cpumask_var(&tmpmask
, GFP_NOWAIT
);
1003 #ifdef CONFIG_TICK_ONESHOT
1004 zalloc_cpumask_var(&tick_broadcast_oneshot_mask
, GFP_NOWAIT
);
1005 zalloc_cpumask_var(&tick_broadcast_pending_mask
, GFP_NOWAIT
);
1006 zalloc_cpumask_var(&tick_broadcast_force_mask
, GFP_NOWAIT
);