2 * linux/kernel/time/clockevents.c
4 * This file contains functions which manage clock event devices.
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
14 #include <linux/clockchips.h>
15 #include <linux/hrtimer.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/smp.h>
19 #include <linux/device.h>
21 #include "tick-internal.h"
23 /* The registered clock event devices */
24 static LIST_HEAD(clockevent_devices
);
25 static LIST_HEAD(clockevents_released
);
26 /* Protection for the above */
27 static DEFINE_RAW_SPINLOCK(clockevents_lock
);
28 /* Protection for unbind operations */
29 static DEFINE_MUTEX(clockevents_mutex
);
32 struct clock_event_device
*ce
;
36 static u64
cev_delta2ns(unsigned long latch
, struct clock_event_device
*evt
,
39 u64 clc
= (u64
) latch
<< evt
->shift
;
42 if (unlikely(!evt
->mult
)) {
46 rnd
= (u64
) evt
->mult
- 1;
49 * Upper bound sanity check. If the backwards conversion is
50 * not equal latch, we know that the above shift overflowed.
52 if ((clc
>> evt
->shift
) != (u64
)latch
)
56 * Scaled math oddities:
58 * For mult <= (1 << shift) we can safely add mult - 1 to
59 * prevent integer rounding loss. So the backwards conversion
60 * from nsec to device ticks will be correct.
62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
63 * need to be careful. Adding mult - 1 will result in a value
64 * which when converted back to device ticks can be larger
65 * than latch by up to (mult - 1) >> shift. For the min_delta
66 * calculation we still want to apply this in order to stay
67 * above the minimum device ticks limit. For the upper limit
68 * we would end up with a latch value larger than the upper
69 * limit of the device, so we omit the add to stay below the
70 * device upper boundary.
72 * Also omit the add if it would overflow the u64 boundary.
74 if ((~0ULL - clc
> rnd
) &&
75 (!ismax
|| evt
->mult
<= (1U << evt
->shift
)))
78 do_div(clc
, evt
->mult
);
80 /* Deltas less than 1usec are pointless noise */
81 return clc
> 1000 ? clc
: 1000;
85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
86 * @latch: value to convert
87 * @evt: pointer to clock event device descriptor
89 * Math helper, returns latch value converted to nanoseconds (bound checked)
91 u64
clockevent_delta2ns(unsigned long latch
, struct clock_event_device
*evt
)
93 return cev_delta2ns(latch
, evt
, false);
95 EXPORT_SYMBOL_GPL(clockevent_delta2ns
);
98 * clockevents_set_mode - set the operating mode of a clock event device
99 * @dev: device to modify
102 * Must be called with interrupts disabled !
104 void clockevents_set_mode(struct clock_event_device
*dev
,
105 enum clock_event_mode mode
)
107 if (dev
->mode
!= mode
) {
108 dev
->set_mode(mode
, dev
);
112 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
113 * on it, so fix it up and emit a warning:
115 if (mode
== CLOCK_EVT_MODE_ONESHOT
) {
116 if (unlikely(!dev
->mult
)) {
125 * clockevents_shutdown - shutdown the device and clear next_event
126 * @dev: device to shutdown
128 void clockevents_shutdown(struct clock_event_device
*dev
)
130 clockevents_set_mode(dev
, CLOCK_EVT_MODE_SHUTDOWN
);
131 dev
->next_event
.tv64
= KTIME_MAX
;
134 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
136 /* Limit min_delta to a jiffie */
137 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
140 * clockevents_increase_min_delta - raise minimum delta of a clock event device
141 * @dev: device to increase the minimum delta
143 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
145 static int clockevents_increase_min_delta(struct clock_event_device
*dev
)
147 /* Nothing to do if we already reached the limit */
148 if (dev
->min_delta_ns
>= MIN_DELTA_LIMIT
) {
149 printk_deferred(KERN_WARNING
150 "CE: Reprogramming failure. Giving up\n");
151 dev
->next_event
.tv64
= KTIME_MAX
;
155 if (dev
->min_delta_ns
< 5000)
156 dev
->min_delta_ns
= 5000;
158 dev
->min_delta_ns
+= dev
->min_delta_ns
>> 1;
160 if (dev
->min_delta_ns
> MIN_DELTA_LIMIT
)
161 dev
->min_delta_ns
= MIN_DELTA_LIMIT
;
163 printk_deferred(KERN_WARNING
164 "CE: %s increased min_delta_ns to %llu nsec\n",
165 dev
->name
? dev
->name
: "?",
166 (unsigned long long) dev
->min_delta_ns
);
171 * clockevents_program_min_delta - Set clock event device to the minimum delay.
172 * @dev: device to program
174 * Returns 0 on success, -ETIME when the retry loop failed.
176 static int clockevents_program_min_delta(struct clock_event_device
*dev
)
178 unsigned long long clc
;
183 delta
= dev
->min_delta_ns
;
184 dev
->next_event
= ktime_add_ns(ktime_get(), delta
);
186 if (dev
->mode
== CLOCK_EVT_MODE_SHUTDOWN
)
190 clc
= ((unsigned long long) delta
* dev
->mult
) >> dev
->shift
;
191 if (dev
->set_next_event((unsigned long) clc
, dev
) == 0)
196 * We tried 3 times to program the device with the
197 * given min_delta_ns. Try to increase the minimum
198 * delta, if that fails as well get out of here.
200 if (clockevents_increase_min_delta(dev
))
207 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
210 * clockevents_program_min_delta - Set clock event device to the minimum delay.
211 * @dev: device to program
213 * Returns 0 on success, -ETIME when the retry loop failed.
215 static int clockevents_program_min_delta(struct clock_event_device
*dev
)
217 unsigned long long clc
;
220 delta
= dev
->min_delta_ns
;
221 dev
->next_event
= ktime_add_ns(ktime_get(), delta
);
223 if (dev
->mode
== CLOCK_EVT_MODE_SHUTDOWN
)
227 clc
= ((unsigned long long) delta
* dev
->mult
) >> dev
->shift
;
228 return dev
->set_next_event((unsigned long) clc
, dev
);
231 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
234 * clockevents_program_event - Reprogram the clock event device.
235 * @dev: device to program
236 * @expires: absolute expiry time (monotonic clock)
237 * @force: program minimum delay if expires can not be set
239 * Returns 0 on success, -ETIME when the event is in the past.
241 int clockevents_program_event(struct clock_event_device
*dev
, ktime_t expires
,
244 unsigned long long clc
;
248 if (unlikely(expires
.tv64
< 0)) {
253 dev
->next_event
= expires
;
255 if (dev
->mode
== CLOCK_EVT_MODE_SHUTDOWN
)
258 /* Shortcut for clockevent devices that can deal with ktime. */
259 if (dev
->features
& CLOCK_EVT_FEAT_KTIME
)
260 return dev
->set_next_ktime(expires
, dev
);
262 delta
= ktime_to_ns(ktime_sub(expires
, ktime_get()));
264 return force
? clockevents_program_min_delta(dev
) : -ETIME
;
266 delta
= min(delta
, (int64_t) dev
->max_delta_ns
);
267 delta
= max(delta
, (int64_t) dev
->min_delta_ns
);
269 clc
= ((unsigned long long) delta
* dev
->mult
) >> dev
->shift
;
270 rc
= dev
->set_next_event((unsigned long) clc
, dev
);
272 return (rc
&& force
) ? clockevents_program_min_delta(dev
) : rc
;
276 * Called after a notify add to make devices available which were
277 * released from the notifier call.
279 static void clockevents_notify_released(void)
281 struct clock_event_device
*dev
;
283 while (!list_empty(&clockevents_released
)) {
284 dev
= list_entry(clockevents_released
.next
,
285 struct clock_event_device
, list
);
286 list_del(&dev
->list
);
287 list_add(&dev
->list
, &clockevent_devices
);
288 tick_check_new_device(dev
);
293 * Try to install a replacement clock event device
295 static int clockevents_replace(struct clock_event_device
*ced
)
297 struct clock_event_device
*dev
, *newdev
= NULL
;
299 list_for_each_entry(dev
, &clockevent_devices
, list
) {
300 if (dev
== ced
|| dev
->mode
!= CLOCK_EVT_MODE_UNUSED
)
303 if (!tick_check_replacement(newdev
, dev
))
306 if (!try_module_get(dev
->owner
))
310 module_put(newdev
->owner
);
314 tick_install_replacement(newdev
);
315 list_del_init(&ced
->list
);
317 return newdev
? 0 : -EBUSY
;
321 * Called with clockevents_mutex and clockevents_lock held
323 static int __clockevents_try_unbind(struct clock_event_device
*ced
, int cpu
)
325 /* Fast track. Device is unused */
326 if (ced
->mode
== CLOCK_EVT_MODE_UNUSED
) {
327 list_del_init(&ced
->list
);
331 return ced
== per_cpu(tick_cpu_device
, cpu
).evtdev
? -EAGAIN
: -EBUSY
;
335 * SMP function call to unbind a device
337 static void __clockevents_unbind(void *arg
)
339 struct ce_unbind
*cu
= arg
;
342 raw_spin_lock(&clockevents_lock
);
343 res
= __clockevents_try_unbind(cu
->ce
, smp_processor_id());
345 res
= clockevents_replace(cu
->ce
);
347 raw_spin_unlock(&clockevents_lock
);
351 * Issues smp function call to unbind a per cpu device. Called with
352 * clockevents_mutex held.
354 static int clockevents_unbind(struct clock_event_device
*ced
, int cpu
)
356 struct ce_unbind cu
= { .ce
= ced
, .res
= -ENODEV
};
358 smp_call_function_single(cpu
, __clockevents_unbind
, &cu
, 1);
363 * Unbind a clockevents device.
365 int clockevents_unbind_device(struct clock_event_device
*ced
, int cpu
)
369 mutex_lock(&clockevents_mutex
);
370 ret
= clockevents_unbind(ced
, cpu
);
371 mutex_unlock(&clockevents_mutex
);
374 EXPORT_SYMBOL_GPL(clockevents_unbind
);
377 * clockevents_register_device - register a clock event device
378 * @dev: device to register
380 void clockevents_register_device(struct clock_event_device
*dev
)
384 BUG_ON(dev
->mode
!= CLOCK_EVT_MODE_UNUSED
);
386 WARN_ON(num_possible_cpus() > 1);
387 dev
->cpumask
= cpumask_of(smp_processor_id());
390 raw_spin_lock_irqsave(&clockevents_lock
, flags
);
392 list_add(&dev
->list
, &clockevent_devices
);
393 tick_check_new_device(dev
);
394 clockevents_notify_released();
396 raw_spin_unlock_irqrestore(&clockevents_lock
, flags
);
398 EXPORT_SYMBOL_GPL(clockevents_register_device
);
400 void clockevents_config(struct clock_event_device
*dev
, u32 freq
)
404 if (!(dev
->features
& CLOCK_EVT_FEAT_ONESHOT
))
408 * Calculate the maximum number of seconds we can sleep. Limit
409 * to 10 minutes for hardware which can program more than
410 * 32bit ticks so we still get reasonable conversion values.
412 sec
= dev
->max_delta_ticks
;
416 else if (sec
> 600 && dev
->max_delta_ticks
> UINT_MAX
)
419 clockevents_calc_mult_shift(dev
, freq
, sec
);
420 dev
->min_delta_ns
= cev_delta2ns(dev
->min_delta_ticks
, dev
, false);
421 dev
->max_delta_ns
= cev_delta2ns(dev
->max_delta_ticks
, dev
, true);
425 * clockevents_config_and_register - Configure and register a clock event device
426 * @dev: device to register
427 * @freq: The clock frequency
428 * @min_delta: The minimum clock ticks to program in oneshot mode
429 * @max_delta: The maximum clock ticks to program in oneshot mode
431 * min/max_delta can be 0 for devices which do not support oneshot mode.
433 void clockevents_config_and_register(struct clock_event_device
*dev
,
434 u32 freq
, unsigned long min_delta
,
435 unsigned long max_delta
)
437 dev
->min_delta_ticks
= min_delta
;
438 dev
->max_delta_ticks
= max_delta
;
439 clockevents_config(dev
, freq
);
440 clockevents_register_device(dev
);
442 EXPORT_SYMBOL_GPL(clockevents_config_and_register
);
444 int __clockevents_update_freq(struct clock_event_device
*dev
, u32 freq
)
446 clockevents_config(dev
, freq
);
448 if (dev
->mode
== CLOCK_EVT_MODE_ONESHOT
)
449 return clockevents_program_event(dev
, dev
->next_event
, false);
451 if (dev
->mode
== CLOCK_EVT_MODE_PERIODIC
)
452 dev
->set_mode(CLOCK_EVT_MODE_PERIODIC
, dev
);
458 * clockevents_update_freq - Update frequency and reprogram a clock event device.
459 * @dev: device to modify
460 * @freq: new device frequency
462 * Reconfigure and reprogram a clock event device in oneshot
463 * mode. Must be called on the cpu for which the device delivers per
464 * cpu timer events. If called for the broadcast device the core takes
465 * care of serialization.
467 * Returns 0 on success, -ETIME when the event is in the past.
469 int clockevents_update_freq(struct clock_event_device
*dev
, u32 freq
)
474 local_irq_save(flags
);
475 ret
= tick_broadcast_update_freq(dev
, freq
);
477 ret
= __clockevents_update_freq(dev
, freq
);
478 local_irq_restore(flags
);
483 * Noop handler when we shut down an event device
485 void clockevents_handle_noop(struct clock_event_device
*dev
)
490 * clockevents_exchange_device - release and request clock devices
491 * @old: device to release (can be NULL)
492 * @new: device to request (can be NULL)
494 * Called from the notifier chain. clockevents_lock is held already
496 void clockevents_exchange_device(struct clock_event_device
*old
,
497 struct clock_event_device
*new)
501 local_irq_save(flags
);
503 * Caller releases a clock event device. We queue it into the
504 * released list and do a notify add later.
507 module_put(old
->owner
);
508 clockevents_set_mode(old
, CLOCK_EVT_MODE_UNUSED
);
509 list_del(&old
->list
);
510 list_add(&old
->list
, &clockevents_released
);
514 BUG_ON(new->mode
!= CLOCK_EVT_MODE_UNUSED
);
515 clockevents_shutdown(new);
517 local_irq_restore(flags
);
521 * clockevents_suspend - suspend clock devices
523 void clockevents_suspend(void)
525 struct clock_event_device
*dev
;
527 list_for_each_entry_reverse(dev
, &clockevent_devices
, list
)
533 * clockevents_resume - resume clock devices
535 void clockevents_resume(void)
537 struct clock_event_device
*dev
;
539 list_for_each_entry(dev
, &clockevent_devices
, list
)
544 #ifdef CONFIG_GENERIC_CLOCKEVENTS
546 * clockevents_notify - notification about relevant events
547 * Returns 0 on success, any other value on error
549 int clockevents_notify(unsigned long reason
, void *arg
)
551 struct clock_event_device
*dev
, *tmp
;
555 raw_spin_lock_irqsave(&clockevents_lock
, flags
);
558 case CLOCK_EVT_NOTIFY_BROADCAST_ON
:
559 case CLOCK_EVT_NOTIFY_BROADCAST_OFF
:
560 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE
:
561 tick_broadcast_on_off(reason
, arg
);
564 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER
:
565 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT
:
566 ret
= tick_broadcast_oneshot_control(reason
);
569 case CLOCK_EVT_NOTIFY_CPU_DYING
:
570 tick_handover_do_timer(arg
);
573 case CLOCK_EVT_NOTIFY_SUSPEND
:
575 tick_suspend_broadcast();
578 case CLOCK_EVT_NOTIFY_RESUME
:
582 case CLOCK_EVT_NOTIFY_CPU_DEAD
:
583 tick_shutdown_broadcast_oneshot(arg
);
584 tick_shutdown_broadcast(arg
);
587 * Unregister the clock event devices which were
588 * released from the users in the notify chain.
590 list_for_each_entry_safe(dev
, tmp
, &clockevents_released
, list
)
591 list_del(&dev
->list
);
593 * Now check whether the CPU has left unused per cpu devices
596 list_for_each_entry_safe(dev
, tmp
, &clockevent_devices
, list
) {
597 if (cpumask_test_cpu(cpu
, dev
->cpumask
) &&
598 cpumask_weight(dev
->cpumask
) == 1 &&
599 !tick_is_broadcast_device(dev
)) {
600 BUG_ON(dev
->mode
!= CLOCK_EVT_MODE_UNUSED
);
601 list_del(&dev
->list
);
608 raw_spin_unlock_irqrestore(&clockevents_lock
, flags
);
611 EXPORT_SYMBOL_GPL(clockevents_notify
);
614 struct bus_type clockevents_subsys
= {
615 .name
= "clockevents",
616 .dev_name
= "clockevent",
619 static DEFINE_PER_CPU(struct device
, tick_percpu_dev
);
620 static struct tick_device
*tick_get_tick_dev(struct device
*dev
);
622 static ssize_t
sysfs_show_current_tick_dev(struct device
*dev
,
623 struct device_attribute
*attr
,
626 struct tick_device
*td
;
629 raw_spin_lock_irq(&clockevents_lock
);
630 td
= tick_get_tick_dev(dev
);
631 if (td
&& td
->evtdev
)
632 count
= snprintf(buf
, PAGE_SIZE
, "%s\n", td
->evtdev
->name
);
633 raw_spin_unlock_irq(&clockevents_lock
);
636 static DEVICE_ATTR(current_device
, 0444, sysfs_show_current_tick_dev
, NULL
);
638 /* We don't support the abomination of removable broadcast devices */
639 static ssize_t
sysfs_unbind_tick_dev(struct device
*dev
,
640 struct device_attribute
*attr
,
641 const char *buf
, size_t count
)
643 char name
[CS_NAME_LEN
];
644 ssize_t ret
= sysfs_get_uname(buf
, name
, count
);
645 struct clock_event_device
*ce
;
651 mutex_lock(&clockevents_mutex
);
652 raw_spin_lock_irq(&clockevents_lock
);
653 list_for_each_entry(ce
, &clockevent_devices
, list
) {
654 if (!strcmp(ce
->name
, name
)) {
655 ret
= __clockevents_try_unbind(ce
, dev
->id
);
659 raw_spin_unlock_irq(&clockevents_lock
);
661 * We hold clockevents_mutex, so ce can't go away
664 ret
= clockevents_unbind(ce
, dev
->id
);
665 mutex_unlock(&clockevents_mutex
);
666 return ret
? ret
: count
;
668 static DEVICE_ATTR(unbind_device
, 0200, NULL
, sysfs_unbind_tick_dev
);
670 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
671 static struct device tick_bc_dev
= {
672 .init_name
= "broadcast",
674 .bus
= &clockevents_subsys
,
677 static struct tick_device
*tick_get_tick_dev(struct device
*dev
)
679 return dev
== &tick_bc_dev
? tick_get_broadcast_device() :
680 &per_cpu(tick_cpu_device
, dev
->id
);
683 static __init
int tick_broadcast_init_sysfs(void)
685 int err
= device_register(&tick_bc_dev
);
688 err
= device_create_file(&tick_bc_dev
, &dev_attr_current_device
);
692 static struct tick_device
*tick_get_tick_dev(struct device
*dev
)
694 return &per_cpu(tick_cpu_device
, dev
->id
);
696 static inline int tick_broadcast_init_sysfs(void) { return 0; }
699 static int __init
tick_init_sysfs(void)
703 for_each_possible_cpu(cpu
) {
704 struct device
*dev
= &per_cpu(tick_percpu_dev
, cpu
);
708 dev
->bus
= &clockevents_subsys
;
709 err
= device_register(dev
);
711 err
= device_create_file(dev
, &dev_attr_current_device
);
713 err
= device_create_file(dev
, &dev_attr_unbind_device
);
717 return tick_broadcast_init_sysfs();
720 static int __init
clockevents_init_sysfs(void)
722 int err
= subsys_system_register(&clockevents_subsys
, NULL
);
725 err
= tick_init_sysfs();
728 device_initcall(clockevents_init_sysfs
);
731 #endif /* GENERIC_CLOCK_EVENTS */