Linux 3.8-rc7
[cris-mirror.git] / kernel / time / tick-broadcast.c
blobf113755695e2351ad9f32b7e38a808e98c0ad5f4
1 /*
2 * linux/kernel/time/tick-broadcast.c
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
22 #include "tick-internal.h"
25 * Broadcast support for broken x86 hardware, where the local apic
26 * timer stops in C3 state.
29 static struct tick_device tick_broadcast_device;
30 /* FIXME: Use cpumask_var_t. */
31 static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
32 static DECLARE_BITMAP(tmpmask, NR_CPUS);
33 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34 static int tick_broadcast_force;
36 #ifdef CONFIG_TICK_ONESHOT
37 static void tick_broadcast_clear_oneshot(int cpu);
38 #else
39 static inline void tick_broadcast_clear_oneshot(int cpu) { }
40 #endif
43 * Debugging: see timer_list.c
45 struct tick_device *tick_get_broadcast_device(void)
47 return &tick_broadcast_device;
50 struct cpumask *tick_get_broadcast_mask(void)
52 return to_cpumask(tick_broadcast_mask);
56 * Start the device in periodic mode
58 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
60 if (bc)
61 tick_setup_periodic(bc, 1);
65 * Check, if the device can be utilized as broadcast device:
67 int tick_check_broadcast_device(struct clock_event_device *dev)
69 if ((tick_broadcast_device.evtdev &&
70 tick_broadcast_device.evtdev->rating >= dev->rating) ||
71 (dev->features & CLOCK_EVT_FEAT_C3STOP))
72 return 0;
74 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
75 tick_broadcast_device.evtdev = dev;
76 if (!cpumask_empty(tick_get_broadcast_mask()))
77 tick_broadcast_start_periodic(dev);
78 return 1;
82 * Check, if the device is the broadcast device
84 int tick_is_broadcast_device(struct clock_event_device *dev)
86 return (dev && tick_broadcast_device.evtdev == dev);
90 * Check, if the device is disfunctional and a place holder, which
91 * needs to be handled by the broadcast device.
93 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
95 unsigned long flags;
96 int ret = 0;
98 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
101 * Devices might be registered with both periodic and oneshot
102 * mode disabled. This signals, that the device needs to be
103 * operated from the broadcast device and is a placeholder for
104 * the cpu local device.
106 if (!tick_device_is_functional(dev)) {
107 dev->event_handler = tick_handle_periodic;
108 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
109 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
110 ret = 1;
111 } else {
113 * When the new device is not affected by the stop
114 * feature and the cpu is marked in the broadcast mask
115 * then clear the broadcast bit.
117 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
118 int cpu = smp_processor_id();
120 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
121 tick_broadcast_clear_oneshot(cpu);
124 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
125 return ret;
129 * Broadcast the event to the cpus, which are set in the mask (mangled).
131 static void tick_do_broadcast(struct cpumask *mask)
133 int cpu = smp_processor_id();
134 struct tick_device *td;
137 * Check, if the current cpu is in the mask
139 if (cpumask_test_cpu(cpu, mask)) {
140 cpumask_clear_cpu(cpu, mask);
141 td = &per_cpu(tick_cpu_device, cpu);
142 td->evtdev->event_handler(td->evtdev);
145 if (!cpumask_empty(mask)) {
147 * It might be necessary to actually check whether the devices
148 * have different broadcast functions. For now, just use the
149 * one of the first device. This works as long as we have this
150 * misfeature only on x86 (lapic)
152 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
153 td->evtdev->broadcast(mask);
158 * Periodic broadcast:
159 * - invoke the broadcast handlers
161 static void tick_do_periodic_broadcast(void)
163 raw_spin_lock(&tick_broadcast_lock);
165 cpumask_and(to_cpumask(tmpmask),
166 cpu_online_mask, tick_get_broadcast_mask());
167 tick_do_broadcast(to_cpumask(tmpmask));
169 raw_spin_unlock(&tick_broadcast_lock);
173 * Event handler for periodic broadcast ticks
175 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
177 ktime_t next;
179 tick_do_periodic_broadcast();
182 * The device is in periodic mode. No reprogramming necessary:
184 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
185 return;
188 * Setup the next period for devices, which do not have
189 * periodic mode. We read dev->next_event first and add to it
190 * when the event already expired. clockevents_program_event()
191 * sets dev->next_event only when the event is really
192 * programmed to the device.
194 for (next = dev->next_event; ;) {
195 next = ktime_add(next, tick_period);
197 if (!clockevents_program_event(dev, next, false))
198 return;
199 tick_do_periodic_broadcast();
204 * Powerstate information: The system enters/leaves a state, where
205 * affected devices might stop
207 static void tick_do_broadcast_on_off(unsigned long *reason)
209 struct clock_event_device *bc, *dev;
210 struct tick_device *td;
211 unsigned long flags;
212 int cpu, bc_stopped;
214 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
216 cpu = smp_processor_id();
217 td = &per_cpu(tick_cpu_device, cpu);
218 dev = td->evtdev;
219 bc = tick_broadcast_device.evtdev;
222 * Is the device not affected by the powerstate ?
224 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
225 goto out;
227 if (!tick_device_is_functional(dev))
228 goto out;
230 bc_stopped = cpumask_empty(tick_get_broadcast_mask());
232 switch (*reason) {
233 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
234 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
235 if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
236 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
237 if (tick_broadcast_device.mode ==
238 TICKDEV_MODE_PERIODIC)
239 clockevents_shutdown(dev);
241 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
242 tick_broadcast_force = 1;
243 break;
244 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
245 if (!tick_broadcast_force &&
246 cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
247 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
248 if (tick_broadcast_device.mode ==
249 TICKDEV_MODE_PERIODIC)
250 tick_setup_periodic(dev, 0);
252 break;
255 if (cpumask_empty(tick_get_broadcast_mask())) {
256 if (!bc_stopped)
257 clockevents_shutdown(bc);
258 } else if (bc_stopped) {
259 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
260 tick_broadcast_start_periodic(bc);
261 else
262 tick_broadcast_setup_oneshot(bc);
264 out:
265 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
269 * Powerstate information: The system enters/leaves a state, where
270 * affected devices might stop.
272 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
274 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
275 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
276 "offline CPU #%d\n", *oncpu);
277 else
278 tick_do_broadcast_on_off(&reason);
282 * Set the periodic handler depending on broadcast on/off
284 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
286 if (!broadcast)
287 dev->event_handler = tick_handle_periodic;
288 else
289 dev->event_handler = tick_handle_periodic_broadcast;
293 * Remove a CPU from broadcasting
295 void tick_shutdown_broadcast(unsigned int *cpup)
297 struct clock_event_device *bc;
298 unsigned long flags;
299 unsigned int cpu = *cpup;
301 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
303 bc = tick_broadcast_device.evtdev;
304 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
306 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
307 if (bc && cpumask_empty(tick_get_broadcast_mask()))
308 clockevents_shutdown(bc);
311 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
314 void tick_suspend_broadcast(void)
316 struct clock_event_device *bc;
317 unsigned long flags;
319 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
321 bc = tick_broadcast_device.evtdev;
322 if (bc)
323 clockevents_shutdown(bc);
325 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
328 int tick_resume_broadcast(void)
330 struct clock_event_device *bc;
331 unsigned long flags;
332 int broadcast = 0;
334 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
336 bc = tick_broadcast_device.evtdev;
338 if (bc) {
339 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
341 switch (tick_broadcast_device.mode) {
342 case TICKDEV_MODE_PERIODIC:
343 if (!cpumask_empty(tick_get_broadcast_mask()))
344 tick_broadcast_start_periodic(bc);
345 broadcast = cpumask_test_cpu(smp_processor_id(),
346 tick_get_broadcast_mask());
347 break;
348 case TICKDEV_MODE_ONESHOT:
349 if (!cpumask_empty(tick_get_broadcast_mask()))
350 broadcast = tick_resume_broadcast_oneshot(bc);
351 break;
354 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
356 return broadcast;
360 #ifdef CONFIG_TICK_ONESHOT
362 /* FIXME: use cpumask_var_t. */
363 static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
366 * Exposed for debugging: see timer_list.c
368 struct cpumask *tick_get_broadcast_oneshot_mask(void)
370 return to_cpumask(tick_broadcast_oneshot_mask);
373 static int tick_broadcast_set_event(ktime_t expires, int force)
375 struct clock_event_device *bc = tick_broadcast_device.evtdev;
377 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
378 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
380 return clockevents_program_event(bc, expires, force);
383 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
385 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
386 return 0;
390 * Called from irq_enter() when idle was interrupted to reenable the
391 * per cpu device.
393 void tick_check_oneshot_broadcast(int cpu)
395 if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
396 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
398 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
403 * Handle oneshot mode broadcasting
405 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
407 struct tick_device *td;
408 ktime_t now, next_event;
409 int cpu;
411 raw_spin_lock(&tick_broadcast_lock);
412 again:
413 dev->next_event.tv64 = KTIME_MAX;
414 next_event.tv64 = KTIME_MAX;
415 cpumask_clear(to_cpumask(tmpmask));
416 now = ktime_get();
417 /* Find all expired events */
418 for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
419 td = &per_cpu(tick_cpu_device, cpu);
420 if (td->evtdev->next_event.tv64 <= now.tv64)
421 cpumask_set_cpu(cpu, to_cpumask(tmpmask));
422 else if (td->evtdev->next_event.tv64 < next_event.tv64)
423 next_event.tv64 = td->evtdev->next_event.tv64;
427 * Wakeup the cpus which have an expired event.
429 tick_do_broadcast(to_cpumask(tmpmask));
432 * Two reasons for reprogram:
434 * - The global event did not expire any CPU local
435 * events. This happens in dyntick mode, as the maximum PIT
436 * delta is quite small.
438 * - There are pending events on sleeping CPUs which were not
439 * in the event mask
441 if (next_event.tv64 != KTIME_MAX) {
443 * Rearm the broadcast device. If event expired,
444 * repeat the above
446 if (tick_broadcast_set_event(next_event, 0))
447 goto again;
449 raw_spin_unlock(&tick_broadcast_lock);
453 * Powerstate information: The system enters/leaves a state, where
454 * affected devices might stop
456 void tick_broadcast_oneshot_control(unsigned long reason)
458 struct clock_event_device *bc, *dev;
459 struct tick_device *td;
460 unsigned long flags;
461 int cpu;
464 * Periodic mode does not care about the enter/exit of power
465 * states
467 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
468 return;
471 * We are called with preemtion disabled from the depth of the
472 * idle code, so we can't be moved away.
474 cpu = smp_processor_id();
475 td = &per_cpu(tick_cpu_device, cpu);
476 dev = td->evtdev;
478 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
479 return;
481 bc = tick_broadcast_device.evtdev;
483 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
484 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
485 if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
486 cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
487 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
488 if (dev->next_event.tv64 < bc->next_event.tv64)
489 tick_broadcast_set_event(dev->next_event, 1);
491 } else {
492 if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
493 cpumask_clear_cpu(cpu,
494 tick_get_broadcast_oneshot_mask());
495 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
496 if (dev->next_event.tv64 != KTIME_MAX)
497 tick_program_event(dev->next_event, 1);
500 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
504 * Reset the one shot broadcast for a cpu
506 * Called with tick_broadcast_lock held
508 static void tick_broadcast_clear_oneshot(int cpu)
510 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
513 static void tick_broadcast_init_next_event(struct cpumask *mask,
514 ktime_t expires)
516 struct tick_device *td;
517 int cpu;
519 for_each_cpu(cpu, mask) {
520 td = &per_cpu(tick_cpu_device, cpu);
521 if (td->evtdev)
522 td->evtdev->next_event = expires;
527 * tick_broadcast_setup_oneshot - setup the broadcast device
529 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
531 int cpu = smp_processor_id();
533 /* Set it up only once ! */
534 if (bc->event_handler != tick_handle_oneshot_broadcast) {
535 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
537 bc->event_handler = tick_handle_oneshot_broadcast;
539 /* Take the do_timer update */
540 tick_do_timer_cpu = cpu;
543 * We must be careful here. There might be other CPUs
544 * waiting for periodic broadcast. We need to set the
545 * oneshot_mask bits for those and program the
546 * broadcast device to fire.
548 cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
549 cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
550 cpumask_or(tick_get_broadcast_oneshot_mask(),
551 tick_get_broadcast_oneshot_mask(),
552 to_cpumask(tmpmask));
554 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
555 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
556 tick_broadcast_init_next_event(to_cpumask(tmpmask),
557 tick_next_period);
558 tick_broadcast_set_event(tick_next_period, 1);
559 } else
560 bc->next_event.tv64 = KTIME_MAX;
561 } else {
563 * The first cpu which switches to oneshot mode sets
564 * the bit for all other cpus which are in the general
565 * (periodic) broadcast mask. So the bit is set and
566 * would prevent the first broadcast enter after this
567 * to program the bc device.
569 tick_broadcast_clear_oneshot(cpu);
574 * Select oneshot operating mode for the broadcast device
576 void tick_broadcast_switch_to_oneshot(void)
578 struct clock_event_device *bc;
579 unsigned long flags;
581 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
583 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
584 bc = tick_broadcast_device.evtdev;
585 if (bc)
586 tick_broadcast_setup_oneshot(bc);
588 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
593 * Remove a dead CPU from broadcasting
595 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
597 unsigned long flags;
598 unsigned int cpu = *cpup;
600 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
603 * Clear the broadcast mask flag for the dead cpu, but do not
604 * stop the broadcast device!
606 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
608 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
612 * Check, whether the broadcast device is in one shot mode
614 int tick_broadcast_oneshot_active(void)
616 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
620 * Check whether the broadcast device supports oneshot.
622 bool tick_broadcast_oneshot_available(void)
624 struct clock_event_device *bc = tick_broadcast_device.evtdev;
626 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
629 #endif