2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
25 #include <linux/compiler.h>
27 #include "tick-internal.h"
28 #include "ntp_internal.h"
29 #include "timekeeping_internal.h"
31 #define TK_CLEAR_NTP (1 << 0)
32 #define TK_MIRROR (1 << 1)
33 #define TK_CLOCK_WAS_SET (1 << 2)
35 static struct timekeeper timekeeper
;
36 static DEFINE_RAW_SPINLOCK(timekeeper_lock
);
37 static seqcount_t timekeeper_seq
;
38 static struct timekeeper shadow_timekeeper
;
40 /* flag for if timekeeping is suspended */
41 int __read_mostly timekeeping_suspended
;
43 /* Flag for if there is a persistent clock on this platform */
44 bool __read_mostly persistent_clock_exist
= false;
46 static inline void tk_normalize_xtime(struct timekeeper
*tk
)
48 while (tk
->xtime_nsec
>= ((u64
)NSEC_PER_SEC
<< tk
->shift
)) {
49 tk
->xtime_nsec
-= (u64
)NSEC_PER_SEC
<< tk
->shift
;
54 static void tk_set_xtime(struct timekeeper
*tk
, const struct timespec
*ts
)
56 tk
->xtime_sec
= ts
->tv_sec
;
57 tk
->xtime_nsec
= (u64
)ts
->tv_nsec
<< tk
->shift
;
60 static void tk_xtime_add(struct timekeeper
*tk
, const struct timespec
*ts
)
62 tk
->xtime_sec
+= ts
->tv_sec
;
63 tk
->xtime_nsec
+= (u64
)ts
->tv_nsec
<< tk
->shift
;
64 tk_normalize_xtime(tk
);
67 static void tk_set_wall_to_mono(struct timekeeper
*tk
, struct timespec wtm
)
72 * Verify consistency of: offset_real = -wall_to_monotonic
73 * before modifying anything
75 set_normalized_timespec(&tmp
, -tk
->wall_to_monotonic
.tv_sec
,
76 -tk
->wall_to_monotonic
.tv_nsec
);
77 WARN_ON_ONCE(tk
->offs_real
.tv64
!= timespec_to_ktime(tmp
).tv64
);
78 tk
->wall_to_monotonic
= wtm
;
79 set_normalized_timespec(&tmp
, -wtm
.tv_sec
, -wtm
.tv_nsec
);
80 tk
->offs_real
= timespec_to_ktime(tmp
);
81 tk
->offs_tai
= ktime_add(tk
->offs_real
, ktime_set(tk
->tai_offset
, 0));
84 static void tk_set_sleep_time(struct timekeeper
*tk
, struct timespec t
)
86 /* Verify consistency before modifying */
87 WARN_ON_ONCE(tk
->offs_boot
.tv64
!= timespec_to_ktime(tk
->total_sleep_time
).tv64
);
89 tk
->total_sleep_time
= t
;
90 tk
->offs_boot
= timespec_to_ktime(t
);
94 * tk_setup_internals - Set up internals to use clocksource clock.
96 * @tk: The target timekeeper to setup.
97 * @clock: Pointer to clocksource.
99 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
100 * pair and interval request.
102 * Unless you're the timekeeping code, you should not be using this!
104 static void tk_setup_internals(struct timekeeper
*tk
, struct clocksource
*clock
)
107 u64 tmp
, ntpinterval
;
108 struct clocksource
*old_clock
;
110 old_clock
= tk
->clock
;
112 tk
->cycle_last
= clock
->cycle_last
= clock
->read(clock
);
114 /* Do the ns -> cycle conversion first, using original mult */
115 tmp
= NTP_INTERVAL_LENGTH
;
116 tmp
<<= clock
->shift
;
118 tmp
+= clock
->mult
/2;
119 do_div(tmp
, clock
->mult
);
123 interval
= (cycle_t
) tmp
;
124 tk
->cycle_interval
= interval
;
126 /* Go back from cycles -> shifted ns */
127 tk
->xtime_interval
= (u64
) interval
* clock
->mult
;
128 tk
->xtime_remainder
= ntpinterval
- tk
->xtime_interval
;
130 ((u64
) interval
* clock
->mult
) >> clock
->shift
;
132 /* if changing clocks, convert xtime_nsec shift units */
134 int shift_change
= clock
->shift
- old_clock
->shift
;
135 if (shift_change
< 0)
136 tk
->xtime_nsec
>>= -shift_change
;
138 tk
->xtime_nsec
<<= shift_change
;
140 tk
->shift
= clock
->shift
;
143 tk
->ntp_error_shift
= NTP_SCALE_SHIFT
- clock
->shift
;
146 * The timekeeper keeps its own mult values for the currently
147 * active clocksource. These value will be adjusted via NTP
148 * to counteract clock drifting.
150 tk
->mult
= clock
->mult
;
153 /* Timekeeper helper functions. */
155 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
156 u32 (*arch_gettimeoffset
)(void);
158 u32
get_arch_timeoffset(void)
160 if (likely(arch_gettimeoffset
))
161 return arch_gettimeoffset();
165 static inline u32
get_arch_timeoffset(void) { return 0; }
168 static inline s64
timekeeping_get_ns(struct timekeeper
*tk
)
170 cycle_t cycle_now
, cycle_delta
;
171 struct clocksource
*clock
;
174 /* read clocksource: */
176 cycle_now
= clock
->read(clock
);
178 /* calculate the delta since the last update_wall_time: */
179 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
181 nsec
= (cycle_delta
* tk
->mult
+ tk
->xtime_nsec
) >> tk
->shift
;
183 /* If arch requires, add in get_arch_timeoffset() */
184 return nsec
+ get_arch_timeoffset();
187 static inline s64
timekeeping_get_ns_raw(struct timekeeper
*tk
)
189 cycle_t cycle_now
, cycle_delta
;
190 struct clocksource
*clock
;
193 /* read clocksource: */
195 cycle_now
= clock
->read(clock
);
197 /* calculate the delta since the last update_wall_time: */
198 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
200 /* convert delta to nanoseconds. */
201 nsec
= clocksource_cyc2ns(cycle_delta
, clock
->mult
, clock
->shift
);
203 /* If arch requires, add in get_arch_timeoffset() */
204 return nsec
+ get_arch_timeoffset();
207 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain
);
209 static void update_pvclock_gtod(struct timekeeper
*tk
, bool was_set
)
211 raw_notifier_call_chain(&pvclock_gtod_chain
, was_set
, tk
);
215 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
217 int pvclock_gtod_register_notifier(struct notifier_block
*nb
)
219 struct timekeeper
*tk
= &timekeeper
;
223 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
224 ret
= raw_notifier_chain_register(&pvclock_gtod_chain
, nb
);
225 update_pvclock_gtod(tk
, true);
226 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
230 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier
);
233 * pvclock_gtod_unregister_notifier - unregister a pvclock
234 * timedata update listener
236 int pvclock_gtod_unregister_notifier(struct notifier_block
*nb
)
241 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
242 ret
= raw_notifier_chain_unregister(&pvclock_gtod_chain
, nb
);
243 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
247 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier
);
249 /* must hold timekeeper_lock */
250 static void timekeeping_update(struct timekeeper
*tk
, unsigned int action
)
252 if (action
& TK_CLEAR_NTP
) {
257 update_pvclock_gtod(tk
, action
& TK_CLOCK_WAS_SET
);
259 if (action
& TK_MIRROR
)
260 memcpy(&shadow_timekeeper
, &timekeeper
, sizeof(timekeeper
));
264 * timekeeping_forward_now - update clock to the current time
266 * Forward the current clock to update its state since the last call to
267 * update_wall_time(). This is useful before significant clock changes,
268 * as it avoids having to deal with this time offset explicitly.
270 static void timekeeping_forward_now(struct timekeeper
*tk
)
272 cycle_t cycle_now
, cycle_delta
;
273 struct clocksource
*clock
;
277 cycle_now
= clock
->read(clock
);
278 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
279 tk
->cycle_last
= clock
->cycle_last
= cycle_now
;
281 tk
->xtime_nsec
+= cycle_delta
* tk
->mult
;
283 /* If arch requires, add in get_arch_timeoffset() */
284 tk
->xtime_nsec
+= (u64
)get_arch_timeoffset() << tk
->shift
;
286 tk_normalize_xtime(tk
);
288 nsec
= clocksource_cyc2ns(cycle_delta
, clock
->mult
, clock
->shift
);
289 timespec_add_ns(&tk
->raw_time
, nsec
);
293 * __getnstimeofday - Returns the time of day in a timespec.
294 * @ts: pointer to the timespec to be set
296 * Updates the time of day in the timespec.
297 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
299 int __getnstimeofday(struct timespec
*ts
)
301 struct timekeeper
*tk
= &timekeeper
;
306 seq
= read_seqcount_begin(&timekeeper_seq
);
308 ts
->tv_sec
= tk
->xtime_sec
;
309 nsecs
= timekeeping_get_ns(tk
);
311 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
314 timespec_add_ns(ts
, nsecs
);
317 * Do not bail out early, in case there were callers still using
318 * the value, even in the face of the WARN_ON.
320 if (unlikely(timekeeping_suspended
))
324 EXPORT_SYMBOL(__getnstimeofday
);
327 * getnstimeofday - Returns the time of day in a timespec.
328 * @ts: pointer to the timespec to be set
330 * Returns the time of day in a timespec (WARN if suspended).
332 void getnstimeofday(struct timespec
*ts
)
334 WARN_ON(__getnstimeofday(ts
));
336 EXPORT_SYMBOL(getnstimeofday
);
338 ktime_t
ktime_get(void)
340 struct timekeeper
*tk
= &timekeeper
;
344 WARN_ON(timekeeping_suspended
);
347 seq
= read_seqcount_begin(&timekeeper_seq
);
348 secs
= tk
->xtime_sec
+ tk
->wall_to_monotonic
.tv_sec
;
349 nsecs
= timekeeping_get_ns(tk
) + tk
->wall_to_monotonic
.tv_nsec
;
351 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
353 * Use ktime_set/ktime_add_ns to create a proper ktime on
354 * 32-bit architectures without CONFIG_KTIME_SCALAR.
356 return ktime_add_ns(ktime_set(secs
, 0), nsecs
);
358 EXPORT_SYMBOL_GPL(ktime_get
);
361 * ktime_get_ts - get the monotonic clock in timespec format
362 * @ts: pointer to timespec variable
364 * The function calculates the monotonic clock from the realtime
365 * clock and the wall_to_monotonic offset and stores the result
366 * in normalized timespec format in the variable pointed to by @ts.
368 void ktime_get_ts(struct timespec
*ts
)
370 struct timekeeper
*tk
= &timekeeper
;
371 struct timespec tomono
;
375 WARN_ON(timekeeping_suspended
);
378 seq
= read_seqcount_begin(&timekeeper_seq
);
379 ts
->tv_sec
= tk
->xtime_sec
;
380 nsec
= timekeeping_get_ns(tk
);
381 tomono
= tk
->wall_to_monotonic
;
383 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
385 ts
->tv_sec
+= tomono
.tv_sec
;
387 timespec_add_ns(ts
, nsec
+ tomono
.tv_nsec
);
389 EXPORT_SYMBOL_GPL(ktime_get_ts
);
393 * timekeeping_clocktai - Returns the TAI time of day in a timespec
394 * @ts: pointer to the timespec to be set
396 * Returns the time of day in a timespec.
398 void timekeeping_clocktai(struct timespec
*ts
)
400 struct timekeeper
*tk
= &timekeeper
;
404 WARN_ON(timekeeping_suspended
);
407 seq
= read_seqcount_begin(&timekeeper_seq
);
409 ts
->tv_sec
= tk
->xtime_sec
+ tk
->tai_offset
;
410 nsecs
= timekeeping_get_ns(tk
);
412 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
415 timespec_add_ns(ts
, nsecs
);
418 EXPORT_SYMBOL(timekeeping_clocktai
);
422 * ktime_get_clocktai - Returns the TAI time of day in a ktime
424 * Returns the time of day in a ktime.
426 ktime_t
ktime_get_clocktai(void)
430 timekeeping_clocktai(&ts
);
431 return timespec_to_ktime(ts
);
433 EXPORT_SYMBOL(ktime_get_clocktai
);
435 #ifdef CONFIG_NTP_PPS
438 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
439 * @ts_raw: pointer to the timespec to be set to raw monotonic time
440 * @ts_real: pointer to the timespec to be set to the time of day
442 * This function reads both the time of day and raw monotonic time at the
443 * same time atomically and stores the resulting timestamps in timespec
446 void getnstime_raw_and_real(struct timespec
*ts_raw
, struct timespec
*ts_real
)
448 struct timekeeper
*tk
= &timekeeper
;
450 s64 nsecs_raw
, nsecs_real
;
452 WARN_ON_ONCE(timekeeping_suspended
);
455 seq
= read_seqcount_begin(&timekeeper_seq
);
457 *ts_raw
= tk
->raw_time
;
458 ts_real
->tv_sec
= tk
->xtime_sec
;
459 ts_real
->tv_nsec
= 0;
461 nsecs_raw
= timekeeping_get_ns_raw(tk
);
462 nsecs_real
= timekeeping_get_ns(tk
);
464 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
466 timespec_add_ns(ts_raw
, nsecs_raw
);
467 timespec_add_ns(ts_real
, nsecs_real
);
469 EXPORT_SYMBOL(getnstime_raw_and_real
);
471 #endif /* CONFIG_NTP_PPS */
474 * do_gettimeofday - Returns the time of day in a timeval
475 * @tv: pointer to the timeval to be set
477 * NOTE: Users should be converted to using getnstimeofday()
479 void do_gettimeofday(struct timeval
*tv
)
483 getnstimeofday(&now
);
484 tv
->tv_sec
= now
.tv_sec
;
485 tv
->tv_usec
= now
.tv_nsec
/1000;
487 EXPORT_SYMBOL(do_gettimeofday
);
490 * do_settimeofday - Sets the time of day
491 * @tv: pointer to the timespec variable containing the new time
493 * Sets the time of day to the new time and update NTP and notify hrtimers
495 int do_settimeofday(const struct timespec
*tv
)
497 struct timekeeper
*tk
= &timekeeper
;
498 struct timespec ts_delta
, xt
;
501 if (!timespec_valid_strict(tv
))
504 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
505 write_seqcount_begin(&timekeeper_seq
);
507 timekeeping_forward_now(tk
);
510 ts_delta
.tv_sec
= tv
->tv_sec
- xt
.tv_sec
;
511 ts_delta
.tv_nsec
= tv
->tv_nsec
- xt
.tv_nsec
;
513 tk_set_wall_to_mono(tk
, timespec_sub(tk
->wall_to_monotonic
, ts_delta
));
515 tk_set_xtime(tk
, tv
);
517 timekeeping_update(tk
, TK_CLEAR_NTP
| TK_MIRROR
| TK_CLOCK_WAS_SET
);
519 write_seqcount_end(&timekeeper_seq
);
520 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
522 /* signal hrtimers about time change */
527 EXPORT_SYMBOL(do_settimeofday
);
530 * timekeeping_inject_offset - Adds or subtracts from the current time.
531 * @tv: pointer to the timespec variable containing the offset
533 * Adds or subtracts an offset value from the current time.
535 int timekeeping_inject_offset(struct timespec
*ts
)
537 struct timekeeper
*tk
= &timekeeper
;
542 if ((unsigned long)ts
->tv_nsec
>= NSEC_PER_SEC
)
545 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
546 write_seqcount_begin(&timekeeper_seq
);
548 timekeeping_forward_now(tk
);
550 /* Make sure the proposed value is valid */
551 tmp
= timespec_add(tk_xtime(tk
), *ts
);
552 if (!timespec_valid_strict(&tmp
)) {
557 tk_xtime_add(tk
, ts
);
558 tk_set_wall_to_mono(tk
, timespec_sub(tk
->wall_to_monotonic
, *ts
));
560 error
: /* even if we error out, we forwarded the time, so call update */
561 timekeeping_update(tk
, TK_CLEAR_NTP
| TK_MIRROR
| TK_CLOCK_WAS_SET
);
563 write_seqcount_end(&timekeeper_seq
);
564 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
566 /* signal hrtimers about time change */
571 EXPORT_SYMBOL(timekeeping_inject_offset
);
575 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
578 s32
timekeeping_get_tai_offset(void)
580 struct timekeeper
*tk
= &timekeeper
;
585 seq
= read_seqcount_begin(&timekeeper_seq
);
586 ret
= tk
->tai_offset
;
587 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
593 * __timekeeping_set_tai_offset - Lock free worker function
596 static void __timekeeping_set_tai_offset(struct timekeeper
*tk
, s32 tai_offset
)
598 tk
->tai_offset
= tai_offset
;
599 tk
->offs_tai
= ktime_add(tk
->offs_real
, ktime_set(tai_offset
, 0));
603 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
606 void timekeeping_set_tai_offset(s32 tai_offset
)
608 struct timekeeper
*tk
= &timekeeper
;
611 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
612 write_seqcount_begin(&timekeeper_seq
);
613 __timekeeping_set_tai_offset(tk
, tai_offset
);
614 timekeeping_update(tk
, TK_MIRROR
| TK_CLOCK_WAS_SET
);
615 write_seqcount_end(&timekeeper_seq
);
616 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
621 * change_clocksource - Swaps clocksources if a new one is available
623 * Accumulates current time interval and initializes new clocksource
625 static int change_clocksource(void *data
)
627 struct timekeeper
*tk
= &timekeeper
;
628 struct clocksource
*new, *old
;
631 new = (struct clocksource
*) data
;
633 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
634 write_seqcount_begin(&timekeeper_seq
);
636 timekeeping_forward_now(tk
);
638 * If the cs is in module, get a module reference. Succeeds
639 * for built-in code (owner == NULL) as well.
641 if (try_module_get(new->owner
)) {
642 if (!new->enable
|| new->enable(new) == 0) {
644 tk_setup_internals(tk
, new);
647 module_put(old
->owner
);
649 module_put(new->owner
);
652 timekeeping_update(tk
, TK_CLEAR_NTP
| TK_MIRROR
| TK_CLOCK_WAS_SET
);
654 write_seqcount_end(&timekeeper_seq
);
655 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
661 * timekeeping_notify - Install a new clock source
662 * @clock: pointer to the clock source
664 * This function is called from clocksource.c after a new, better clock
665 * source has been registered. The caller holds the clocksource_mutex.
667 int timekeeping_notify(struct clocksource
*clock
)
669 struct timekeeper
*tk
= &timekeeper
;
671 if (tk
->clock
== clock
)
673 stop_machine(change_clocksource
, clock
, NULL
);
675 return tk
->clock
== clock
? 0 : -1;
679 * ktime_get_real - get the real (wall-) time in ktime_t format
681 * returns the time in ktime_t format
683 ktime_t
ktime_get_real(void)
687 getnstimeofday(&now
);
689 return timespec_to_ktime(now
);
691 EXPORT_SYMBOL_GPL(ktime_get_real
);
694 * getrawmonotonic - Returns the raw monotonic time in a timespec
695 * @ts: pointer to the timespec to be set
697 * Returns the raw monotonic time (completely un-modified by ntp)
699 void getrawmonotonic(struct timespec
*ts
)
701 struct timekeeper
*tk
= &timekeeper
;
706 seq
= read_seqcount_begin(&timekeeper_seq
);
707 nsecs
= timekeeping_get_ns_raw(tk
);
710 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
712 timespec_add_ns(ts
, nsecs
);
714 EXPORT_SYMBOL(getrawmonotonic
);
717 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
719 int timekeeping_valid_for_hres(void)
721 struct timekeeper
*tk
= &timekeeper
;
726 seq
= read_seqcount_begin(&timekeeper_seq
);
728 ret
= tk
->clock
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
;
730 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
736 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
738 u64
timekeeping_max_deferment(void)
740 struct timekeeper
*tk
= &timekeeper
;
745 seq
= read_seqcount_begin(&timekeeper_seq
);
747 ret
= tk
->clock
->max_idle_ns
;
749 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
755 * read_persistent_clock - Return time from the persistent clock.
757 * Weak dummy function for arches that do not yet support it.
758 * Reads the time from the battery backed persistent clock.
759 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
761 * XXX - Do be sure to remove it once all arches implement it.
763 void __weak
read_persistent_clock(struct timespec
*ts
)
770 * read_boot_clock - Return time of the system start.
772 * Weak dummy function for arches that do not yet support it.
773 * Function to read the exact time the system has been started.
774 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
776 * XXX - Do be sure to remove it once all arches implement it.
778 void __weak
read_boot_clock(struct timespec
*ts
)
785 * timekeeping_init - Initializes the clocksource and common timekeeping values
787 void __init
timekeeping_init(void)
789 struct timekeeper
*tk
= &timekeeper
;
790 struct clocksource
*clock
;
792 struct timespec now
, boot
, tmp
;
794 read_persistent_clock(&now
);
796 if (!timespec_valid_strict(&now
)) {
797 pr_warn("WARNING: Persistent clock returned invalid value!\n"
798 " Check your CMOS/BIOS settings.\n");
801 } else if (now
.tv_sec
|| now
.tv_nsec
)
802 persistent_clock_exist
= true;
804 read_boot_clock(&boot
);
805 if (!timespec_valid_strict(&boot
)) {
806 pr_warn("WARNING: Boot clock returned invalid value!\n"
807 " Check your CMOS/BIOS settings.\n");
812 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
813 write_seqcount_begin(&timekeeper_seq
);
816 clock
= clocksource_default_clock();
818 clock
->enable(clock
);
819 tk_setup_internals(tk
, clock
);
821 tk_set_xtime(tk
, &now
);
822 tk
->raw_time
.tv_sec
= 0;
823 tk
->raw_time
.tv_nsec
= 0;
824 if (boot
.tv_sec
== 0 && boot
.tv_nsec
== 0)
827 set_normalized_timespec(&tmp
, -boot
.tv_sec
, -boot
.tv_nsec
);
828 tk_set_wall_to_mono(tk
, tmp
);
832 tk_set_sleep_time(tk
, tmp
);
834 memcpy(&shadow_timekeeper
, &timekeeper
, sizeof(timekeeper
));
836 write_seqcount_end(&timekeeper_seq
);
837 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
840 /* time in seconds when suspend began */
841 static struct timespec timekeeping_suspend_time
;
844 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
845 * @delta: pointer to a timespec delta value
847 * Takes a timespec offset measuring a suspend interval and properly
848 * adds the sleep offset to the timekeeping variables.
850 static void __timekeeping_inject_sleeptime(struct timekeeper
*tk
,
851 struct timespec
*delta
)
853 if (!timespec_valid_strict(delta
)) {
854 printk_deferred(KERN_WARNING
855 "__timekeeping_inject_sleeptime: Invalid "
856 "sleep delta value!\n");
859 tk_xtime_add(tk
, delta
);
860 tk_set_wall_to_mono(tk
, timespec_sub(tk
->wall_to_monotonic
, *delta
));
861 tk_set_sleep_time(tk
, timespec_add(tk
->total_sleep_time
, *delta
));
862 tk_debug_account_sleep_time(delta
);
866 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
867 * @delta: pointer to a timespec delta value
869 * This hook is for architectures that cannot support read_persistent_clock
870 * because their RTC/persistent clock is only accessible when irqs are enabled.
872 * This function should only be called by rtc_resume(), and allows
873 * a suspend offset to be injected into the timekeeping values.
875 void timekeeping_inject_sleeptime(struct timespec
*delta
)
877 struct timekeeper
*tk
= &timekeeper
;
881 * Make sure we don't set the clock twice, as timekeeping_resume()
884 if (has_persistent_clock())
887 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
888 write_seqcount_begin(&timekeeper_seq
);
890 timekeeping_forward_now(tk
);
892 __timekeeping_inject_sleeptime(tk
, delta
);
894 timekeeping_update(tk
, TK_CLEAR_NTP
| TK_MIRROR
| TK_CLOCK_WAS_SET
);
896 write_seqcount_end(&timekeeper_seq
);
897 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
899 /* signal hrtimers about time change */
904 * timekeeping_resume - Resumes the generic timekeeping subsystem.
906 * This is for the generic clocksource timekeeping.
907 * xtime/wall_to_monotonic/jiffies/etc are
908 * still managed by arch specific suspend/resume code.
910 static void timekeeping_resume(void)
912 struct timekeeper
*tk
= &timekeeper
;
913 struct clocksource
*clock
= tk
->clock
;
915 struct timespec ts_new
, ts_delta
;
916 cycle_t cycle_now
, cycle_delta
;
917 bool suspendtime_found
= false;
919 read_persistent_clock(&ts_new
);
921 clockevents_resume();
922 clocksource_resume();
924 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
925 write_seqcount_begin(&timekeeper_seq
);
928 * After system resumes, we need to calculate the suspended time and
929 * compensate it for the OS time. There are 3 sources that could be
930 * used: Nonstop clocksource during suspend, persistent clock and rtc
933 * One specific platform may have 1 or 2 or all of them, and the
934 * preference will be:
935 * suspend-nonstop clocksource -> persistent clock -> rtc
936 * The less preferred source will only be tried if there is no better
937 * usable source. The rtc part is handled separately in rtc core code.
939 cycle_now
= clock
->read(clock
);
940 if ((clock
->flags
& CLOCK_SOURCE_SUSPEND_NONSTOP
) &&
941 cycle_now
> clock
->cycle_last
) {
942 u64 num
, max
= ULLONG_MAX
;
943 u32 mult
= clock
->mult
;
944 u32 shift
= clock
->shift
;
947 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
950 * "cycle_delta * mutl" may cause 64 bits overflow, if the
951 * suspended time is too long. In that case we need do the
952 * 64 bits math carefully
955 if (cycle_delta
> max
) {
956 num
= div64_u64(cycle_delta
, max
);
957 nsec
= (((u64
) max
* mult
) >> shift
) * num
;
958 cycle_delta
-= num
* max
;
960 nsec
+= ((u64
) cycle_delta
* mult
) >> shift
;
962 ts_delta
= ns_to_timespec(nsec
);
963 suspendtime_found
= true;
964 } else if (timespec_compare(&ts_new
, &timekeeping_suspend_time
) > 0) {
965 ts_delta
= timespec_sub(ts_new
, timekeeping_suspend_time
);
966 suspendtime_found
= true;
969 if (suspendtime_found
)
970 __timekeeping_inject_sleeptime(tk
, &ts_delta
);
972 /* Re-base the last cycle value */
973 tk
->cycle_last
= clock
->cycle_last
= cycle_now
;
975 timekeeping_suspended
= 0;
976 timekeeping_update(tk
, TK_MIRROR
| TK_CLOCK_WAS_SET
);
977 write_seqcount_end(&timekeeper_seq
);
978 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
980 touch_softlockup_watchdog();
982 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME
, NULL
);
984 /* Resume hrtimers */
988 static int timekeeping_suspend(void)
990 struct timekeeper
*tk
= &timekeeper
;
992 struct timespec delta
, delta_delta
;
993 static struct timespec old_delta
;
995 read_persistent_clock(&timekeeping_suspend_time
);
998 * On some systems the persistent_clock can not be detected at
999 * timekeeping_init by its return value, so if we see a valid
1000 * value returned, update the persistent_clock_exists flag.
1002 if (timekeeping_suspend_time
.tv_sec
|| timekeeping_suspend_time
.tv_nsec
)
1003 persistent_clock_exist
= true;
1005 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
1006 write_seqcount_begin(&timekeeper_seq
);
1007 timekeeping_forward_now(tk
);
1008 timekeeping_suspended
= 1;
1011 * To avoid drift caused by repeated suspend/resumes,
1012 * which each can add ~1 second drift error,
1013 * try to compensate so the difference in system time
1014 * and persistent_clock time stays close to constant.
1016 delta
= timespec_sub(tk_xtime(tk
), timekeeping_suspend_time
);
1017 delta_delta
= timespec_sub(delta
, old_delta
);
1018 if (abs(delta_delta
.tv_sec
) >= 2) {
1020 * if delta_delta is too large, assume time correction
1021 * has occured and set old_delta to the current delta.
1025 /* Otherwise try to adjust old_system to compensate */
1026 timekeeping_suspend_time
=
1027 timespec_add(timekeeping_suspend_time
, delta_delta
);
1030 timekeeping_update(tk
, TK_MIRROR
);
1031 write_seqcount_end(&timekeeper_seq
);
1032 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
1034 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND
, NULL
);
1035 clocksource_suspend();
1036 clockevents_suspend();
1041 /* sysfs resume/suspend bits for timekeeping */
1042 static struct syscore_ops timekeeping_syscore_ops
= {
1043 .resume
= timekeeping_resume
,
1044 .suspend
= timekeeping_suspend
,
1047 static int __init
timekeeping_init_ops(void)
1049 register_syscore_ops(&timekeeping_syscore_ops
);
1053 device_initcall(timekeeping_init_ops
);
1056 * If the error is already larger, we look ahead even further
1057 * to compensate for late or lost adjustments.
1059 static __always_inline
int timekeeping_bigadjust(struct timekeeper
*tk
,
1060 s64 error
, s64
*interval
,
1064 u32 look_ahead
, adj
;
1068 * Use the current error value to determine how much to look ahead.
1069 * The larger the error the slower we adjust for it to avoid problems
1070 * with losing too many ticks, otherwise we would overadjust and
1071 * produce an even larger error. The smaller the adjustment the
1072 * faster we try to adjust for it, as lost ticks can do less harm
1073 * here. This is tuned so that an error of about 1 msec is adjusted
1074 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1076 error2
= tk
->ntp_error
>> (NTP_SCALE_SHIFT
+ 22 - 2 * SHIFT_HZ
);
1077 error2
= abs(error2
);
1078 for (look_ahead
= 0; error2
> 0; look_ahead
++)
1082 * Now calculate the error in (1 << look_ahead) ticks, but first
1083 * remove the single look ahead already included in the error.
1085 tick_error
= ntp_tick_length() >> (tk
->ntp_error_shift
+ 1);
1086 tick_error
-= tk
->xtime_interval
>> 1;
1087 error
= ((error
- tick_error
) >> look_ahead
) + tick_error
;
1089 /* Finally calculate the adjustment shift value. */
1094 *interval
= -*interval
;
1098 for (adj
= 0; error
> i
; adj
++)
1107 * Adjust the multiplier to reduce the error value,
1108 * this is optimized for the most common adjustments of -1,0,1,
1109 * for other values we can do a bit more work.
1111 static void timekeeping_adjust(struct timekeeper
*tk
, s64 offset
)
1113 s64 error
, interval
= tk
->cycle_interval
;
1117 * The point of this is to check if the error is greater than half
1120 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
1122 * Note we subtract one in the shift, so that error is really error*2.
1123 * This "saves" dividing(shifting) interval twice, but keeps the
1124 * (error > interval) comparison as still measuring if error is
1125 * larger than half an interval.
1127 * Note: It does not "save" on aggravation when reading the code.
1129 error
= tk
->ntp_error
>> (tk
->ntp_error_shift
- 1);
1130 if (error
> interval
) {
1132 * We now divide error by 4(via shift), which checks if
1133 * the error is greater than twice the interval.
1134 * If it is greater, we need a bigadjust, if its smaller,
1135 * we can adjust by 1.
1138 if (likely(error
<= interval
))
1141 adj
= timekeeping_bigadjust(tk
, error
, &interval
, &offset
);
1143 if (error
< -interval
) {
1144 /* See comment above, this is just switched for the negative */
1146 if (likely(error
>= -interval
)) {
1148 interval
= -interval
;
1151 adj
= timekeeping_bigadjust(tk
, error
, &interval
, &offset
);
1158 if (unlikely(tk
->clock
->maxadj
&&
1159 (tk
->mult
+ adj
> tk
->clock
->mult
+ tk
->clock
->maxadj
))) {
1160 printk_deferred_once(KERN_WARNING
1161 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1162 tk
->clock
->name
, (long)tk
->mult
+ adj
,
1163 (long)tk
->clock
->mult
+ tk
->clock
->maxadj
);
1166 * So the following can be confusing.
1168 * To keep things simple, lets assume adj == 1 for now.
1170 * When adj != 1, remember that the interval and offset values
1171 * have been appropriately scaled so the math is the same.
1173 * The basic idea here is that we're increasing the multiplier
1174 * by one, this causes the xtime_interval to be incremented by
1175 * one cycle_interval. This is because:
1176 * xtime_interval = cycle_interval * mult
1177 * So if mult is being incremented by one:
1178 * xtime_interval = cycle_interval * (mult + 1)
1180 * xtime_interval = (cycle_interval * mult) + cycle_interval
1181 * Which can be shortened to:
1182 * xtime_interval += cycle_interval
1184 * So offset stores the non-accumulated cycles. Thus the current
1185 * time (in shifted nanoseconds) is:
1186 * now = (offset * adj) + xtime_nsec
1187 * Now, even though we're adjusting the clock frequency, we have
1188 * to keep time consistent. In other words, we can't jump back
1189 * in time, and we also want to avoid jumping forward in time.
1191 * So given the same offset value, we need the time to be the same
1192 * both before and after the freq adjustment.
1193 * now = (offset * adj_1) + xtime_nsec_1
1194 * now = (offset * adj_2) + xtime_nsec_2
1196 * (offset * adj_1) + xtime_nsec_1 =
1197 * (offset * adj_2) + xtime_nsec_2
1201 * (offset * adj_1) + xtime_nsec_1 =
1202 * (offset * (adj_1+1)) + xtime_nsec_2
1203 * (offset * adj_1) + xtime_nsec_1 =
1204 * (offset * adj_1) + offset + xtime_nsec_2
1205 * Canceling the sides:
1206 * xtime_nsec_1 = offset + xtime_nsec_2
1208 * xtime_nsec_2 = xtime_nsec_1 - offset
1209 * Which simplfies to:
1210 * xtime_nsec -= offset
1212 * XXX - TODO: Doc ntp_error calculation.
1215 tk
->xtime_interval
+= interval
;
1216 tk
->xtime_nsec
-= offset
;
1217 tk
->ntp_error
-= (interval
- offset
) << tk
->ntp_error_shift
;
1221 * It may be possible that when we entered this function, xtime_nsec
1222 * was very small. Further, if we're slightly speeding the clocksource
1223 * in the code above, its possible the required corrective factor to
1224 * xtime_nsec could cause it to underflow.
1226 * Now, since we already accumulated the second, cannot simply roll
1227 * the accumulated second back, since the NTP subsystem has been
1228 * notified via second_overflow. So instead we push xtime_nsec forward
1229 * by the amount we underflowed, and add that amount into the error.
1231 * We'll correct this error next time through this function, when
1232 * xtime_nsec is not as small.
1234 if (unlikely((s64
)tk
->xtime_nsec
< 0)) {
1235 s64 neg
= -(s64
)tk
->xtime_nsec
;
1237 tk
->ntp_error
+= neg
<< tk
->ntp_error_shift
;
1243 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1245 * Helper function that accumulates a the nsecs greater then a second
1246 * from the xtime_nsec field to the xtime_secs field.
1247 * It also calls into the NTP code to handle leapsecond processing.
1250 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper
*tk
)
1252 u64 nsecps
= (u64
)NSEC_PER_SEC
<< tk
->shift
;
1253 unsigned int clock_set
= 0;
1255 while (tk
->xtime_nsec
>= nsecps
) {
1258 tk
->xtime_nsec
-= nsecps
;
1261 /* Figure out if its a leap sec and apply if needed */
1262 leap
= second_overflow(tk
->xtime_sec
);
1263 if (unlikely(leap
)) {
1266 tk
->xtime_sec
+= leap
;
1270 tk_set_wall_to_mono(tk
,
1271 timespec_sub(tk
->wall_to_monotonic
, ts
));
1273 __timekeeping_set_tai_offset(tk
, tk
->tai_offset
- leap
);
1275 clock_set
= TK_CLOCK_WAS_SET
;
1282 * logarithmic_accumulation - shifted accumulation of cycles
1284 * This functions accumulates a shifted interval of cycles into
1285 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1288 * Returns the unconsumed cycles.
1290 static cycle_t
logarithmic_accumulation(struct timekeeper
*tk
, cycle_t offset
,
1292 unsigned int *clock_set
)
1294 cycle_t interval
= tk
->cycle_interval
<< shift
;
1297 /* If the offset is smaller then a shifted interval, do nothing */
1298 if (offset
< interval
)
1301 /* Accumulate one shifted interval */
1303 tk
->cycle_last
+= interval
;
1305 tk
->xtime_nsec
+= tk
->xtime_interval
<< shift
;
1306 *clock_set
|= accumulate_nsecs_to_secs(tk
);
1308 /* Accumulate raw time */
1309 raw_nsecs
= (u64
)tk
->raw_interval
<< shift
;
1310 raw_nsecs
+= tk
->raw_time
.tv_nsec
;
1311 if (raw_nsecs
>= NSEC_PER_SEC
) {
1312 u64 raw_secs
= raw_nsecs
;
1313 raw_nsecs
= do_div(raw_secs
, NSEC_PER_SEC
);
1314 tk
->raw_time
.tv_sec
+= raw_secs
;
1316 tk
->raw_time
.tv_nsec
= raw_nsecs
;
1318 /* Accumulate error between NTP and clock interval */
1319 tk
->ntp_error
+= ntp_tick_length() << shift
;
1320 tk
->ntp_error
-= (tk
->xtime_interval
+ tk
->xtime_remainder
) <<
1321 (tk
->ntp_error_shift
+ shift
);
1326 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1327 static inline void old_vsyscall_fixup(struct timekeeper
*tk
)
1332 * Store only full nanoseconds into xtime_nsec after rounding
1333 * it up and add the remainder to the error difference.
1334 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1335 * by truncating the remainder in vsyscalls. However, it causes
1336 * additional work to be done in timekeeping_adjust(). Once
1337 * the vsyscall implementations are converted to use xtime_nsec
1338 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1339 * users are removed, this can be killed.
1341 remainder
= tk
->xtime_nsec
& ((1ULL << tk
->shift
) - 1);
1342 tk
->xtime_nsec
-= remainder
;
1343 tk
->xtime_nsec
+= 1ULL << tk
->shift
;
1344 tk
->ntp_error
+= remainder
<< tk
->ntp_error_shift
;
1345 tk
->ntp_error
-= (1ULL << tk
->shift
) << tk
->ntp_error_shift
;
1348 #define old_vsyscall_fixup(tk)
1354 * update_wall_time - Uses the current clocksource to increment the wall time
1357 void update_wall_time(void)
1359 struct clocksource
*clock
;
1360 struct timekeeper
*real_tk
= &timekeeper
;
1361 struct timekeeper
*tk
= &shadow_timekeeper
;
1363 int shift
= 0, maxshift
;
1364 unsigned int clock_set
= 0;
1365 unsigned long flags
;
1367 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
1369 /* Make sure we're fully resumed: */
1370 if (unlikely(timekeeping_suspended
))
1373 clock
= real_tk
->clock
;
1375 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1376 offset
= real_tk
->cycle_interval
;
1378 offset
= (clock
->read(clock
) - clock
->cycle_last
) & clock
->mask
;
1381 /* Check if there's really nothing to do */
1382 if (offset
< real_tk
->cycle_interval
)
1386 * With NO_HZ we may have to accumulate many cycle_intervals
1387 * (think "ticks") worth of time at once. To do this efficiently,
1388 * we calculate the largest doubling multiple of cycle_intervals
1389 * that is smaller than the offset. We then accumulate that
1390 * chunk in one go, and then try to consume the next smaller
1393 shift
= ilog2(offset
) - ilog2(tk
->cycle_interval
);
1394 shift
= max(0, shift
);
1395 /* Bound shift to one less than what overflows tick_length */
1396 maxshift
= (64 - (ilog2(ntp_tick_length())+1)) - 1;
1397 shift
= min(shift
, maxshift
);
1398 while (offset
>= tk
->cycle_interval
) {
1399 offset
= logarithmic_accumulation(tk
, offset
, shift
,
1401 if (offset
< tk
->cycle_interval
<<shift
)
1405 /* correct the clock when NTP error is too big */
1406 timekeeping_adjust(tk
, offset
);
1409 * XXX This can be killed once everyone converts
1410 * to the new update_vsyscall.
1412 old_vsyscall_fixup(tk
);
1415 * Finally, make sure that after the rounding
1416 * xtime_nsec isn't larger than NSEC_PER_SEC
1418 clock_set
|= accumulate_nsecs_to_secs(tk
);
1420 write_seqcount_begin(&timekeeper_seq
);
1421 /* Update clock->cycle_last with the new value */
1422 clock
->cycle_last
= tk
->cycle_last
;
1424 * Update the real timekeeper.
1426 * We could avoid this memcpy by switching pointers, but that
1427 * requires changes to all other timekeeper usage sites as
1428 * well, i.e. move the timekeeper pointer getter into the
1429 * spinlocked/seqcount protected sections. And we trade this
1430 * memcpy under the timekeeper_seq against one before we start
1433 memcpy(real_tk
, tk
, sizeof(*tk
));
1434 timekeeping_update(real_tk
, clock_set
);
1435 write_seqcount_end(&timekeeper_seq
);
1437 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
1439 /* Have to call _delayed version, since in irq context*/
1440 clock_was_set_delayed();
1444 * getboottime - Return the real time of system boot.
1445 * @ts: pointer to the timespec to be set
1447 * Returns the wall-time of boot in a timespec.
1449 * This is based on the wall_to_monotonic offset and the total suspend
1450 * time. Calls to settimeofday will affect the value returned (which
1451 * basically means that however wrong your real time clock is at boot time,
1452 * you get the right time here).
1454 void getboottime(struct timespec
*ts
)
1456 struct timekeeper
*tk
= &timekeeper
;
1457 struct timespec boottime
= {
1458 .tv_sec
= tk
->wall_to_monotonic
.tv_sec
+
1459 tk
->total_sleep_time
.tv_sec
,
1460 .tv_nsec
= tk
->wall_to_monotonic
.tv_nsec
+
1461 tk
->total_sleep_time
.tv_nsec
1464 set_normalized_timespec(ts
, -boottime
.tv_sec
, -boottime
.tv_nsec
);
1466 EXPORT_SYMBOL_GPL(getboottime
);
1469 * get_monotonic_boottime - Returns monotonic time since boot
1470 * @ts: pointer to the timespec to be set
1472 * Returns the monotonic time since boot in a timespec.
1474 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1475 * includes the time spent in suspend.
1477 void get_monotonic_boottime(struct timespec
*ts
)
1479 struct timekeeper
*tk
= &timekeeper
;
1480 struct timespec tomono
, sleep
;
1484 WARN_ON(timekeeping_suspended
);
1487 seq
= read_seqcount_begin(&timekeeper_seq
);
1488 ts
->tv_sec
= tk
->xtime_sec
;
1489 nsec
= timekeeping_get_ns(tk
);
1490 tomono
= tk
->wall_to_monotonic
;
1491 sleep
= tk
->total_sleep_time
;
1493 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1495 ts
->tv_sec
+= tomono
.tv_sec
+ sleep
.tv_sec
;
1497 timespec_add_ns(ts
, nsec
+ tomono
.tv_nsec
+ sleep
.tv_nsec
);
1499 EXPORT_SYMBOL_GPL(get_monotonic_boottime
);
1502 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1504 * Returns the monotonic time since boot in a ktime
1506 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1507 * includes the time spent in suspend.
1509 ktime_t
ktime_get_boottime(void)
1513 get_monotonic_boottime(&ts
);
1514 return timespec_to_ktime(ts
);
1516 EXPORT_SYMBOL_GPL(ktime_get_boottime
);
1519 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1520 * @ts: pointer to the timespec to be converted
1522 void monotonic_to_bootbased(struct timespec
*ts
)
1524 struct timekeeper
*tk
= &timekeeper
;
1526 *ts
= timespec_add(*ts
, tk
->total_sleep_time
);
1528 EXPORT_SYMBOL_GPL(monotonic_to_bootbased
);
1530 unsigned long get_seconds(void)
1532 struct timekeeper
*tk
= &timekeeper
;
1534 return tk
->xtime_sec
;
1536 EXPORT_SYMBOL(get_seconds
);
1538 struct timespec
__current_kernel_time(void)
1540 struct timekeeper
*tk
= &timekeeper
;
1542 return tk_xtime(tk
);
1545 struct timespec
current_kernel_time(void)
1547 struct timekeeper
*tk
= &timekeeper
;
1548 struct timespec now
;
1552 seq
= read_seqcount_begin(&timekeeper_seq
);
1555 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1559 EXPORT_SYMBOL(current_kernel_time
);
1561 struct timespec
get_monotonic_coarse(void)
1563 struct timekeeper
*tk
= &timekeeper
;
1564 struct timespec now
, mono
;
1568 seq
= read_seqcount_begin(&timekeeper_seq
);
1571 mono
= tk
->wall_to_monotonic
;
1572 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1574 set_normalized_timespec(&now
, now
.tv_sec
+ mono
.tv_sec
,
1575 now
.tv_nsec
+ mono
.tv_nsec
);
1580 * Must hold jiffies_lock
1582 void do_timer(unsigned long ticks
)
1584 jiffies_64
+= ticks
;
1585 calc_global_load(ticks
);
1589 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1590 * and sleep offsets.
1591 * @xtim: pointer to timespec to be set with xtime
1592 * @wtom: pointer to timespec to be set with wall_to_monotonic
1593 * @sleep: pointer to timespec to be set with time in suspend
1595 void get_xtime_and_monotonic_and_sleep_offset(struct timespec
*xtim
,
1596 struct timespec
*wtom
, struct timespec
*sleep
)
1598 struct timekeeper
*tk
= &timekeeper
;
1602 seq
= read_seqcount_begin(&timekeeper_seq
);
1603 *xtim
= tk_xtime(tk
);
1604 *wtom
= tk
->wall_to_monotonic
;
1605 *sleep
= tk
->total_sleep_time
;
1606 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1609 #ifdef CONFIG_HIGH_RES_TIMERS
1611 * ktime_get_update_offsets - hrtimer helper
1612 * @offs_real: pointer to storage for monotonic -> realtime offset
1613 * @offs_boot: pointer to storage for monotonic -> boottime offset
1614 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1616 * Returns current monotonic time and updates the offsets
1617 * Called from hrtimer_interrupt() or retrigger_next_event()
1619 ktime_t
ktime_get_update_offsets(ktime_t
*offs_real
, ktime_t
*offs_boot
,
1622 struct timekeeper
*tk
= &timekeeper
;
1628 seq
= read_seqcount_begin(&timekeeper_seq
);
1630 secs
= tk
->xtime_sec
;
1631 nsecs
= timekeeping_get_ns(tk
);
1633 *offs_real
= tk
->offs_real
;
1634 *offs_boot
= tk
->offs_boot
;
1635 *offs_tai
= tk
->offs_tai
;
1636 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1638 now
= ktime_add_ns(ktime_set(secs
, 0), nsecs
);
1639 now
= ktime_sub(now
, *offs_real
);
1645 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1647 ktime_t
ktime_get_monotonic_offset(void)
1649 struct timekeeper
*tk
= &timekeeper
;
1651 struct timespec wtom
;
1654 seq
= read_seqcount_begin(&timekeeper_seq
);
1655 wtom
= tk
->wall_to_monotonic
;
1656 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1658 return timespec_to_ktime(wtom
);
1660 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset
);
1663 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1665 int do_adjtimex(struct timex
*txc
)
1667 struct timekeeper
*tk
= &timekeeper
;
1668 unsigned long flags
;
1673 /* Validate the data before disabling interrupts */
1674 ret
= ntp_validate_timex(txc
);
1678 if (txc
->modes
& ADJ_SETOFFSET
) {
1679 struct timespec delta
;
1680 delta
.tv_sec
= txc
->time
.tv_sec
;
1681 delta
.tv_nsec
= txc
->time
.tv_usec
;
1682 if (!(txc
->modes
& ADJ_NANO
))
1683 delta
.tv_nsec
*= 1000;
1684 ret
= timekeeping_inject_offset(&delta
);
1689 getnstimeofday(&ts
);
1691 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
1692 write_seqcount_begin(&timekeeper_seq
);
1694 orig_tai
= tai
= tk
->tai_offset
;
1695 ret
= __do_adjtimex(txc
, &ts
, &tai
);
1697 if (tai
!= orig_tai
) {
1698 __timekeeping_set_tai_offset(tk
, tai
);
1699 timekeeping_update(tk
, TK_MIRROR
| TK_CLOCK_WAS_SET
);
1701 write_seqcount_end(&timekeeper_seq
);
1702 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
1704 if (tai
!= orig_tai
)
1707 ntp_notify_cmos_timer();
1712 #ifdef CONFIG_NTP_PPS
1714 * hardpps() - Accessor function to NTP __hardpps function
1716 void hardpps(const struct timespec
*phase_ts
, const struct timespec
*raw_ts
)
1718 unsigned long flags
;
1720 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
1721 write_seqcount_begin(&timekeeper_seq
);
1723 __hardpps(phase_ts
, raw_ts
);
1725 write_seqcount_end(&timekeeper_seq
);
1726 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
1728 EXPORT_SYMBOL(hardpps
);
1732 * xtime_update() - advances the timekeeping infrastructure
1733 * @ticks: number of ticks, that have elapsed since the last call.
1735 * Must be called with interrupts disabled.
1737 void xtime_update(unsigned long ticks
)
1739 write_seqlock(&jiffies_lock
);
1741 write_sequnlock(&jiffies_lock
);