2 * linux/kernel/time/clocksource.c
4 * This file contains the functions which manage clocksource drivers.
6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * o Allow clocksource drivers to be unregistered
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/device.h>
29 #include <linux/clocksource.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
33 #include <linux/tick.h>
34 #include <linux/kthread.h>
36 #include "tick-internal.h"
37 #include "timekeeping_internal.h"
40 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
41 * @mult: pointer to mult variable
42 * @shift: pointer to shift variable
43 * @from: frequency to convert from
44 * @to: frequency to convert to
45 * @maxsec: guaranteed runtime conversion range in seconds
47 * The function evaluates the shift/mult pair for the scaled math
48 * operations of clocksources and clockevents.
50 * @to and @from are frequency values in HZ. For clock sources @to is
51 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
52 * event @to is the counter frequency and @from is NSEC_PER_SEC.
54 * The @maxsec conversion range argument controls the time frame in
55 * seconds which must be covered by the runtime conversion with the
56 * calculated mult and shift factors. This guarantees that no 64bit
57 * overflow happens when the input value of the conversion is
58 * multiplied with the calculated mult factor. Larger ranges may
59 * reduce the conversion accuracy by chosing smaller mult and shift
63 clocks_calc_mult_shift(u32
*mult
, u32
*shift
, u32 from
, u32 to
, u32 maxsec
)
69 * Calculate the shift factor which is limiting the conversion
72 tmp
= ((u64
)maxsec
* from
) >> 32;
79 * Find the conversion shift/mult pair which has the best
80 * accuracy and fits the maxsec conversion range:
82 for (sft
= 32; sft
> 0; sft
--) {
83 tmp
= (u64
) to
<< sft
;
86 if ((tmp
>> sftacc
) == 0)
93 /*[Clocksource internal variables]---------
95 * currently selected clocksource.
97 * linked list with the registered clocksources
99 * protects manipulations to curr_clocksource and the clocksource_list
101 * Name of the user-specified clocksource.
103 static struct clocksource
*curr_clocksource
;
104 static LIST_HEAD(clocksource_list
);
105 static DEFINE_MUTEX(clocksource_mutex
);
106 static char override_name
[CS_NAME_LEN
];
107 static int finished_booting
;
109 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
110 static void clocksource_watchdog_work(struct work_struct
*work
);
111 static void clocksource_select(void);
113 static LIST_HEAD(watchdog_list
);
114 static struct clocksource
*watchdog
;
115 static struct timer_list watchdog_timer
;
116 static DECLARE_WORK(watchdog_work
, clocksource_watchdog_work
);
117 static DEFINE_SPINLOCK(watchdog_lock
);
118 static int watchdog_running
;
119 static atomic_t watchdog_reset_pending
;
121 static int clocksource_watchdog_kthread(void *data
);
122 static void __clocksource_change_rating(struct clocksource
*cs
, int rating
);
125 * Interval: 0.5sec Threshold: 0.0625s
127 #define WATCHDOG_INTERVAL (HZ >> 1)
128 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
130 static void clocksource_watchdog_work(struct work_struct
*work
)
133 * If kthread_run fails the next watchdog scan over the
134 * watchdog_list will find the unstable clock again.
136 kthread_run(clocksource_watchdog_kthread
, NULL
, "kwatchdog");
139 static void __clocksource_unstable(struct clocksource
*cs
)
141 cs
->flags
&= ~(CLOCK_SOURCE_VALID_FOR_HRES
| CLOCK_SOURCE_WATCHDOG
);
142 cs
->flags
|= CLOCK_SOURCE_UNSTABLE
;
143 if (finished_booting
)
144 schedule_work(&watchdog_work
);
148 * clocksource_mark_unstable - mark clocksource unstable via watchdog
149 * @cs: clocksource to be marked unstable
151 * This function is called instead of clocksource_change_rating from
152 * cpu hotplug code to avoid a deadlock between the clocksource mutex
153 * and the cpu hotplug mutex. It defers the update of the clocksource
154 * to the watchdog thread.
156 void clocksource_mark_unstable(struct clocksource
*cs
)
160 spin_lock_irqsave(&watchdog_lock
, flags
);
161 if (!(cs
->flags
& CLOCK_SOURCE_UNSTABLE
)) {
162 if (list_empty(&cs
->wd_list
))
163 list_add(&cs
->wd_list
, &watchdog_list
);
164 __clocksource_unstable(cs
);
166 spin_unlock_irqrestore(&watchdog_lock
, flags
);
169 static void clocksource_watchdog(unsigned long data
)
171 struct clocksource
*cs
;
172 cycle_t csnow
, wdnow
, cslast
, wdlast
, delta
;
173 int64_t wd_nsec
, cs_nsec
;
174 int next_cpu
, reset_pending
;
176 spin_lock(&watchdog_lock
);
177 if (!watchdog_running
)
180 reset_pending
= atomic_read(&watchdog_reset_pending
);
182 list_for_each_entry(cs
, &watchdog_list
, wd_list
) {
184 /* Clocksource already marked unstable? */
185 if (cs
->flags
& CLOCK_SOURCE_UNSTABLE
) {
186 if (finished_booting
)
187 schedule_work(&watchdog_work
);
192 csnow
= cs
->read(cs
);
193 wdnow
= watchdog
->read(watchdog
);
196 /* Clocksource initialized ? */
197 if (!(cs
->flags
& CLOCK_SOURCE_WATCHDOG
) ||
198 atomic_read(&watchdog_reset_pending
)) {
199 cs
->flags
|= CLOCK_SOURCE_WATCHDOG
;
205 delta
= clocksource_delta(wdnow
, cs
->wd_last
, watchdog
->mask
);
206 wd_nsec
= clocksource_cyc2ns(delta
, watchdog
->mult
,
209 delta
= clocksource_delta(csnow
, cs
->cs_last
, cs
->mask
);
210 cs_nsec
= clocksource_cyc2ns(delta
, cs
->mult
, cs
->shift
);
211 wdlast
= cs
->wd_last
; /* save these in case we print them */
212 cslast
= cs
->cs_last
;
216 if (atomic_read(&watchdog_reset_pending
))
219 /* Check the deviation from the watchdog clocksource. */
220 if (abs(cs_nsec
- wd_nsec
) > WATCHDOG_THRESHOLD
) {
221 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
222 smp_processor_id(), cs
->name
);
223 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
224 watchdog
->name
, wdnow
, wdlast
, watchdog
->mask
);
225 pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
226 cs
->name
, csnow
, cslast
, cs
->mask
);
227 __clocksource_unstable(cs
);
231 if (!(cs
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
) &&
232 (cs
->flags
& CLOCK_SOURCE_IS_CONTINUOUS
) &&
233 (watchdog
->flags
& CLOCK_SOURCE_IS_CONTINUOUS
)) {
234 /* Mark it valid for high-res. */
235 cs
->flags
|= CLOCK_SOURCE_VALID_FOR_HRES
;
238 * clocksource_done_booting() will sort it if
239 * finished_booting is not set yet.
241 if (!finished_booting
)
245 * If this is not the current clocksource let
246 * the watchdog thread reselect it. Due to the
247 * change to high res this clocksource might
248 * be preferred now. If it is the current
249 * clocksource let the tick code know about
252 if (cs
!= curr_clocksource
) {
253 cs
->flags
|= CLOCK_SOURCE_RESELECT
;
254 schedule_work(&watchdog_work
);
262 * We only clear the watchdog_reset_pending, when we did a
263 * full cycle through all clocksources.
266 atomic_dec(&watchdog_reset_pending
);
269 * Cycle through CPUs to check if the CPUs stay synchronized
272 next_cpu
= cpumask_next(raw_smp_processor_id(), cpu_online_mask
);
273 if (next_cpu
>= nr_cpu_ids
)
274 next_cpu
= cpumask_first(cpu_online_mask
);
275 watchdog_timer
.expires
+= WATCHDOG_INTERVAL
;
276 add_timer_on(&watchdog_timer
, next_cpu
);
278 spin_unlock(&watchdog_lock
);
281 static inline void clocksource_start_watchdog(void)
283 if (watchdog_running
|| !watchdog
|| list_empty(&watchdog_list
))
285 init_timer(&watchdog_timer
);
286 watchdog_timer
.function
= clocksource_watchdog
;
287 watchdog_timer
.expires
= jiffies
+ WATCHDOG_INTERVAL
;
288 add_timer_on(&watchdog_timer
, cpumask_first(cpu_online_mask
));
289 watchdog_running
= 1;
292 static inline void clocksource_stop_watchdog(void)
294 if (!watchdog_running
|| (watchdog
&& !list_empty(&watchdog_list
)))
296 del_timer(&watchdog_timer
);
297 watchdog_running
= 0;
300 static inline void clocksource_reset_watchdog(void)
302 struct clocksource
*cs
;
304 list_for_each_entry(cs
, &watchdog_list
, wd_list
)
305 cs
->flags
&= ~CLOCK_SOURCE_WATCHDOG
;
308 static void clocksource_resume_watchdog(void)
310 atomic_inc(&watchdog_reset_pending
);
313 static void clocksource_enqueue_watchdog(struct clocksource
*cs
)
317 spin_lock_irqsave(&watchdog_lock
, flags
);
318 if (cs
->flags
& CLOCK_SOURCE_MUST_VERIFY
) {
319 /* cs is a clocksource to be watched. */
320 list_add(&cs
->wd_list
, &watchdog_list
);
321 cs
->flags
&= ~CLOCK_SOURCE_WATCHDOG
;
323 /* cs is a watchdog. */
324 if (cs
->flags
& CLOCK_SOURCE_IS_CONTINUOUS
)
325 cs
->flags
|= CLOCK_SOURCE_VALID_FOR_HRES
;
327 spin_unlock_irqrestore(&watchdog_lock
, flags
);
330 static void clocksource_select_watchdog(bool fallback
)
332 struct clocksource
*cs
, *old_wd
;
335 spin_lock_irqsave(&watchdog_lock
, flags
);
336 /* save current watchdog */
341 list_for_each_entry(cs
, &clocksource_list
, list
) {
342 /* cs is a clocksource to be watched. */
343 if (cs
->flags
& CLOCK_SOURCE_MUST_VERIFY
)
346 /* Skip current if we were requested for a fallback. */
347 if (fallback
&& cs
== old_wd
)
350 /* Pick the best watchdog. */
351 if (!watchdog
|| cs
->rating
> watchdog
->rating
)
354 /* If we failed to find a fallback restore the old one. */
358 /* If we changed the watchdog we need to reset cycles. */
359 if (watchdog
!= old_wd
)
360 clocksource_reset_watchdog();
362 /* Check if the watchdog timer needs to be started. */
363 clocksource_start_watchdog();
364 spin_unlock_irqrestore(&watchdog_lock
, flags
);
367 static void clocksource_dequeue_watchdog(struct clocksource
*cs
)
371 spin_lock_irqsave(&watchdog_lock
, flags
);
372 if (cs
!= watchdog
) {
373 if (cs
->flags
& CLOCK_SOURCE_MUST_VERIFY
) {
374 /* cs is a watched clocksource. */
375 list_del_init(&cs
->wd_list
);
376 /* Check if the watchdog timer needs to be stopped. */
377 clocksource_stop_watchdog();
380 spin_unlock_irqrestore(&watchdog_lock
, flags
);
383 static int __clocksource_watchdog_kthread(void)
385 struct clocksource
*cs
, *tmp
;
390 spin_lock_irqsave(&watchdog_lock
, flags
);
391 list_for_each_entry_safe(cs
, tmp
, &watchdog_list
, wd_list
) {
392 if (cs
->flags
& CLOCK_SOURCE_UNSTABLE
) {
393 list_del_init(&cs
->wd_list
);
394 list_add(&cs
->wd_list
, &unstable
);
397 if (cs
->flags
& CLOCK_SOURCE_RESELECT
) {
398 cs
->flags
&= ~CLOCK_SOURCE_RESELECT
;
402 /* Check if the watchdog timer needs to be stopped. */
403 clocksource_stop_watchdog();
404 spin_unlock_irqrestore(&watchdog_lock
, flags
);
406 /* Needs to be done outside of watchdog lock */
407 list_for_each_entry_safe(cs
, tmp
, &unstable
, wd_list
) {
408 list_del_init(&cs
->wd_list
);
409 __clocksource_change_rating(cs
, 0);
414 static int clocksource_watchdog_kthread(void *data
)
416 mutex_lock(&clocksource_mutex
);
417 if (__clocksource_watchdog_kthread())
418 clocksource_select();
419 mutex_unlock(&clocksource_mutex
);
423 static bool clocksource_is_watchdog(struct clocksource
*cs
)
425 return cs
== watchdog
;
428 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
430 static void clocksource_enqueue_watchdog(struct clocksource
*cs
)
432 if (cs
->flags
& CLOCK_SOURCE_IS_CONTINUOUS
)
433 cs
->flags
|= CLOCK_SOURCE_VALID_FOR_HRES
;
436 static void clocksource_select_watchdog(bool fallback
) { }
437 static inline void clocksource_dequeue_watchdog(struct clocksource
*cs
) { }
438 static inline void clocksource_resume_watchdog(void) { }
439 static inline int __clocksource_watchdog_kthread(void) { return 0; }
440 static bool clocksource_is_watchdog(struct clocksource
*cs
) { return false; }
441 void clocksource_mark_unstable(struct clocksource
*cs
) { }
443 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
446 * clocksource_suspend - suspend the clocksource(s)
448 void clocksource_suspend(void)
450 struct clocksource
*cs
;
452 list_for_each_entry_reverse(cs
, &clocksource_list
, list
)
458 * clocksource_resume - resume the clocksource(s)
460 void clocksource_resume(void)
462 struct clocksource
*cs
;
464 list_for_each_entry(cs
, &clocksource_list
, list
)
468 clocksource_resume_watchdog();
472 * clocksource_touch_watchdog - Update watchdog
474 * Update the watchdog after exception contexts such as kgdb so as not
475 * to incorrectly trip the watchdog. This might fail when the kernel
476 * was stopped in code which holds watchdog_lock.
478 void clocksource_touch_watchdog(void)
480 clocksource_resume_watchdog();
484 * clocksource_max_adjustment- Returns max adjustment amount
485 * @cs: Pointer to clocksource
488 static u32
clocksource_max_adjustment(struct clocksource
*cs
)
492 * We won't try to correct for more than 11% adjustments (110,000 ppm),
494 ret
= (u64
)cs
->mult
* 11;
500 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
501 * @mult: cycle to nanosecond multiplier
502 * @shift: cycle to nanosecond divisor (power of two)
503 * @maxadj: maximum adjustment value to mult (~11%)
504 * @mask: bitmask for two's complement subtraction of non 64 bit counters
505 * @max_cyc: maximum cycle value before potential overflow (does not include
508 * NOTE: This function includes a safety margin of 50%, in other words, we
509 * return half the number of nanoseconds the hardware counter can technically
510 * cover. This is done so that we can potentially detect problems caused by
511 * delayed timers or bad hardware, which might result in time intervals that
512 * are larger than what the math used can handle without overflows.
514 u64
clocks_calc_max_nsecs(u32 mult
, u32 shift
, u32 maxadj
, u64 mask
, u64
*max_cyc
)
516 u64 max_nsecs
, max_cycles
;
519 * Calculate the maximum number of cycles that we can pass to the
520 * cyc2ns() function without overflowing a 64-bit result.
522 max_cycles
= ULLONG_MAX
;
523 do_div(max_cycles
, mult
+maxadj
);
526 * The actual maximum number of cycles we can defer the clocksource is
527 * determined by the minimum of max_cycles and mask.
528 * Note: Here we subtract the maxadj to make sure we don't sleep for
529 * too long if there's a large negative adjustment.
531 max_cycles
= min(max_cycles
, mask
);
532 max_nsecs
= clocksource_cyc2ns(max_cycles
, mult
- maxadj
, shift
);
534 /* return the max_cycles value as well if requested */
536 *max_cyc
= max_cycles
;
538 /* Return 50% of the actual maximum, so we can detect bad values */
545 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
546 * @cs: Pointer to clocksource to be updated
549 static inline void clocksource_update_max_deferment(struct clocksource
*cs
)
551 cs
->max_idle_ns
= clocks_calc_max_nsecs(cs
->mult
, cs
->shift
,
552 cs
->maxadj
, cs
->mask
,
556 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
558 static struct clocksource
*clocksource_find_best(bool oneshot
, bool skipcur
)
560 struct clocksource
*cs
;
562 if (!finished_booting
|| list_empty(&clocksource_list
))
566 * We pick the clocksource with the highest rating. If oneshot
567 * mode is active, we pick the highres valid clocksource with
570 list_for_each_entry(cs
, &clocksource_list
, list
) {
571 if (skipcur
&& cs
== curr_clocksource
)
573 if (oneshot
&& !(cs
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
))
580 static void __clocksource_select(bool skipcur
)
582 bool oneshot
= tick_oneshot_mode_active();
583 struct clocksource
*best
, *cs
;
585 /* Find the best suitable clocksource */
586 best
= clocksource_find_best(oneshot
, skipcur
);
590 /* Check for the override clocksource. */
591 list_for_each_entry(cs
, &clocksource_list
, list
) {
592 if (skipcur
&& cs
== curr_clocksource
)
594 if (strcmp(cs
->name
, override_name
) != 0)
597 * Check to make sure we don't switch to a non-highres
598 * capable clocksource if the tick code is in oneshot
599 * mode (highres or nohz)
601 if (!(cs
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
) && oneshot
) {
602 /* Override clocksource cannot be used. */
603 pr_warn("Override clocksource %s is not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
605 override_name
[0] = 0;
607 /* Override clocksource can be used. */
612 if (curr_clocksource
!= best
&& !timekeeping_notify(best
)) {
613 pr_info("Switched to clocksource %s\n", best
->name
);
614 curr_clocksource
= best
;
619 * clocksource_select - Select the best clocksource available
621 * Private function. Must hold clocksource_mutex when called.
623 * Select the clocksource with the best rating, or the clocksource,
624 * which is selected by userspace override.
626 static void clocksource_select(void)
628 __clocksource_select(false);
631 static void clocksource_select_fallback(void)
633 __clocksource_select(true);
636 #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
637 static inline void clocksource_select(void) { }
638 static inline void clocksource_select_fallback(void) { }
643 * clocksource_done_booting - Called near the end of core bootup
645 * Hack to avoid lots of clocksource churn at boot time.
646 * We use fs_initcall because we want this to start before
647 * device_initcall but after subsys_initcall.
649 static int __init
clocksource_done_booting(void)
651 mutex_lock(&clocksource_mutex
);
652 curr_clocksource
= clocksource_default_clock();
653 finished_booting
= 1;
655 * Run the watchdog first to eliminate unstable clock sources
657 __clocksource_watchdog_kthread();
658 clocksource_select();
659 mutex_unlock(&clocksource_mutex
);
662 fs_initcall(clocksource_done_booting
);
665 * Enqueue the clocksource sorted by rating
667 static void clocksource_enqueue(struct clocksource
*cs
)
669 struct list_head
*entry
= &clocksource_list
;
670 struct clocksource
*tmp
;
672 list_for_each_entry(tmp
, &clocksource_list
, list
)
673 /* Keep track of the place, where to insert */
674 if (tmp
->rating
>= cs
->rating
)
676 list_add(&cs
->list
, entry
);
680 * __clocksource_update_freq_scale - Used update clocksource with new freq
681 * @cs: clocksource to be registered
682 * @scale: Scale factor multiplied against freq to get clocksource hz
683 * @freq: clocksource frequency (cycles per second) divided by scale
685 * This should only be called from the clocksource->enable() method.
687 * This *SHOULD NOT* be called directly! Please use the
688 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
691 void __clocksource_update_freq_scale(struct clocksource
*cs
, u32 scale
, u32 freq
)
696 * Default clocksources are *special* and self-define their mult/shift.
697 * But, you're not special, so you should specify a freq value.
701 * Calc the maximum number of seconds which we can run before
702 * wrapping around. For clocksources which have a mask > 32-bit
703 * we need to limit the max sleep time to have a good
704 * conversion precision. 10 minutes is still a reasonable
705 * amount. That results in a shift value of 24 for a
706 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
707 * ~ 0.06ppm granularity for NTP.
714 else if (sec
> 600 && cs
->mask
> UINT_MAX
)
717 clocks_calc_mult_shift(&cs
->mult
, &cs
->shift
, freq
,
718 NSEC_PER_SEC
/ scale
, sec
* scale
);
721 * Ensure clocksources that have large 'mult' values don't overflow
724 cs
->maxadj
= clocksource_max_adjustment(cs
);
725 while (freq
&& ((cs
->mult
+ cs
->maxadj
< cs
->mult
)
726 || (cs
->mult
- cs
->maxadj
> cs
->mult
))) {
729 cs
->maxadj
= clocksource_max_adjustment(cs
);
733 * Only warn for *special* clocksources that self-define
734 * their mult/shift values and don't specify a freq.
736 WARN_ONCE(cs
->mult
+ cs
->maxadj
< cs
->mult
,
737 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
740 clocksource_update_max_deferment(cs
);
742 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
743 cs
->name
, cs
->mask
, cs
->max_cycles
, cs
->max_idle_ns
);
745 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale
);
748 * __clocksource_register_scale - Used to install new clocksources
749 * @cs: clocksource to be registered
750 * @scale: Scale factor multiplied against freq to get clocksource hz
751 * @freq: clocksource frequency (cycles per second) divided by scale
753 * Returns -EBUSY if registration fails, zero otherwise.
755 * This *SHOULD NOT* be called directly! Please use the
756 * clocksource_register_hz() or clocksource_register_khz helper functions.
758 int __clocksource_register_scale(struct clocksource
*cs
, u32 scale
, u32 freq
)
761 /* Initialize mult/shift and max_idle_ns */
762 __clocksource_update_freq_scale(cs
, scale
, freq
);
764 /* Add clocksource to the clocksource list */
765 mutex_lock(&clocksource_mutex
);
766 clocksource_enqueue(cs
);
767 clocksource_enqueue_watchdog(cs
);
768 clocksource_select();
769 clocksource_select_watchdog(false);
770 mutex_unlock(&clocksource_mutex
);
773 EXPORT_SYMBOL_GPL(__clocksource_register_scale
);
775 static void __clocksource_change_rating(struct clocksource
*cs
, int rating
)
779 clocksource_enqueue(cs
);
783 * clocksource_change_rating - Change the rating of a registered clocksource
784 * @cs: clocksource to be changed
785 * @rating: new rating
787 void clocksource_change_rating(struct clocksource
*cs
, int rating
)
789 mutex_lock(&clocksource_mutex
);
790 __clocksource_change_rating(cs
, rating
);
791 clocksource_select();
792 clocksource_select_watchdog(false);
793 mutex_unlock(&clocksource_mutex
);
795 EXPORT_SYMBOL(clocksource_change_rating
);
798 * Unbind clocksource @cs. Called with clocksource_mutex held
800 static int clocksource_unbind(struct clocksource
*cs
)
802 if (clocksource_is_watchdog(cs
)) {
803 /* Select and try to install a replacement watchdog. */
804 clocksource_select_watchdog(true);
805 if (clocksource_is_watchdog(cs
))
809 if (cs
== curr_clocksource
) {
810 /* Select and try to install a replacement clock source */
811 clocksource_select_fallback();
812 if (curr_clocksource
== cs
)
815 clocksource_dequeue_watchdog(cs
);
816 list_del_init(&cs
->list
);
821 * clocksource_unregister - remove a registered clocksource
822 * @cs: clocksource to be unregistered
824 int clocksource_unregister(struct clocksource
*cs
)
828 mutex_lock(&clocksource_mutex
);
829 if (!list_empty(&cs
->list
))
830 ret
= clocksource_unbind(cs
);
831 mutex_unlock(&clocksource_mutex
);
834 EXPORT_SYMBOL(clocksource_unregister
);
838 * sysfs_show_current_clocksources - sysfs interface for current clocksource
841 * @buf: char buffer to be filled with clocksource list
843 * Provides sysfs interface for listing current clocksource.
846 sysfs_show_current_clocksources(struct device
*dev
,
847 struct device_attribute
*attr
, char *buf
)
851 mutex_lock(&clocksource_mutex
);
852 count
= snprintf(buf
, PAGE_SIZE
, "%s\n", curr_clocksource
->name
);
853 mutex_unlock(&clocksource_mutex
);
858 ssize_t
sysfs_get_uname(const char *buf
, char *dst
, size_t cnt
)
862 /* strings from sysfs write are not 0 terminated! */
863 if (!cnt
|| cnt
>= CS_NAME_LEN
)
867 if (buf
[cnt
-1] == '\n')
870 memcpy(dst
, buf
, cnt
);
876 * sysfs_override_clocksource - interface for manually overriding clocksource
879 * @buf: name of override clocksource
880 * @count: length of buffer
882 * Takes input from sysfs interface for manually overriding the default
883 * clocksource selection.
885 static ssize_t
sysfs_override_clocksource(struct device
*dev
,
886 struct device_attribute
*attr
,
887 const char *buf
, size_t count
)
891 mutex_lock(&clocksource_mutex
);
893 ret
= sysfs_get_uname(buf
, override_name
, count
);
895 clocksource_select();
897 mutex_unlock(&clocksource_mutex
);
903 * sysfs_unbind_current_clocksource - interface for manually unbinding clocksource
907 * @count: length of buffer
909 * Takes input from sysfs interface for manually unbinding a clocksource.
911 static ssize_t
sysfs_unbind_clocksource(struct device
*dev
,
912 struct device_attribute
*attr
,
913 const char *buf
, size_t count
)
915 struct clocksource
*cs
;
916 char name
[CS_NAME_LEN
];
919 ret
= sysfs_get_uname(buf
, name
, count
);
924 mutex_lock(&clocksource_mutex
);
925 list_for_each_entry(cs
, &clocksource_list
, list
) {
926 if (strcmp(cs
->name
, name
))
928 ret
= clocksource_unbind(cs
);
931 mutex_unlock(&clocksource_mutex
);
933 return ret
? ret
: count
;
937 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
940 * @buf: char buffer to be filled with clocksource list
942 * Provides sysfs interface for listing registered clocksources
945 sysfs_show_available_clocksources(struct device
*dev
,
946 struct device_attribute
*attr
,
949 struct clocksource
*src
;
952 mutex_lock(&clocksource_mutex
);
953 list_for_each_entry(src
, &clocksource_list
, list
) {
955 * Don't show non-HRES clocksource if the tick code is
956 * in one shot mode (highres=on or nohz=on)
958 if (!tick_oneshot_mode_active() ||
959 (src
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
))
960 count
+= snprintf(buf
+ count
,
961 max((ssize_t
)PAGE_SIZE
- count
, (ssize_t
)0),
964 mutex_unlock(&clocksource_mutex
);
966 count
+= snprintf(buf
+ count
,
967 max((ssize_t
)PAGE_SIZE
- count
, (ssize_t
)0), "\n");
975 static DEVICE_ATTR(current_clocksource
, 0644, sysfs_show_current_clocksources
,
976 sysfs_override_clocksource
);
978 static DEVICE_ATTR(unbind_clocksource
, 0200, NULL
, sysfs_unbind_clocksource
);
980 static DEVICE_ATTR(available_clocksource
, 0444,
981 sysfs_show_available_clocksources
, NULL
);
983 static struct bus_type clocksource_subsys
= {
984 .name
= "clocksource",
985 .dev_name
= "clocksource",
988 static struct device device_clocksource
= {
990 .bus
= &clocksource_subsys
,
993 static int __init
init_clocksource_sysfs(void)
995 int error
= subsys_system_register(&clocksource_subsys
, NULL
);
998 error
= device_register(&device_clocksource
);
1000 error
= device_create_file(
1001 &device_clocksource
,
1002 &dev_attr_current_clocksource
);
1004 error
= device_create_file(&device_clocksource
,
1005 &dev_attr_unbind_clocksource
);
1007 error
= device_create_file(
1008 &device_clocksource
,
1009 &dev_attr_available_clocksource
);
1013 device_initcall(init_clocksource_sysfs
);
1014 #endif /* CONFIG_SYSFS */
1017 * boot_override_clocksource - boot clock override
1018 * @str: override name
1020 * Takes a clocksource= boot argument and uses it
1021 * as the clocksource override name.
1023 static int __init
boot_override_clocksource(char* str
)
1025 mutex_lock(&clocksource_mutex
);
1027 strlcpy(override_name
, str
, sizeof(override_name
));
1028 mutex_unlock(&clocksource_mutex
);
1032 __setup("clocksource=", boot_override_clocksource
);
1035 * boot_override_clock - Compatibility layer for deprecated boot option
1036 * @str: override name
1038 * DEPRECATED! Takes a clock= boot argument and uses it
1039 * as the clocksource override name
1041 static int __init
boot_override_clock(char* str
)
1043 if (!strcmp(str
, "pmtmr")) {
1044 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1045 return boot_override_clocksource("acpi_pm");
1047 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1048 return boot_override_clocksource(str
);
1051 __setup("clock=", boot_override_clock
);