Linux 4.15.6
[linux/fpc-iii.git] / kernel / time / time.c
blobbd4e6c7dd6899d8320259856c8b99e5b98388fd1
1 /*
2 * linux/kernel/time.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file contains the interface functions for the various
7 * time related system calls: time, stime, gettimeofday, settimeofday,
8 * adjtime
9 */
11 * Modification history kernel/time.c
13 * 1993-09-02 Philip Gladstone
14 * Created file with time related functions from sched/core.c and adjtimex()
15 * 1993-10-08 Torsten Duwe
16 * adjtime interface update and CMOS clock write code
17 * 1995-08-13 Torsten Duwe
18 * kernel PLL updated to 1994-12-13 specs (rfc-1589)
19 * 1999-01-16 Ulrich Windl
20 * Introduced error checking for many cases in adjtimex().
21 * Updated NTP code according to technical memorandum Jan '96
22 * "A Kernel Model for Precision Timekeeping" by Dave Mills
23 * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
24 * (Even though the technical memorandum forbids it)
25 * 2004-07-14 Christoph Lameter
26 * Added getnstimeofday to allow the posix timer functions to return
27 * with nanosecond accuracy
30 #include <linux/export.h>
31 #include <linux/timex.h>
32 #include <linux/capability.h>
33 #include <linux/timekeeper_internal.h>
34 #include <linux/errno.h>
35 #include <linux/syscalls.h>
36 #include <linux/security.h>
37 #include <linux/fs.h>
38 #include <linux/math64.h>
39 #include <linux/ptrace.h>
41 #include <linux/uaccess.h>
42 #include <linux/compat.h>
43 #include <asm/unistd.h>
45 #include <generated/timeconst.h>
46 #include "timekeeping.h"
49 * The timezone where the local system is located. Used as a default by some
50 * programs who obtain this value by using gettimeofday.
52 struct timezone sys_tz;
54 EXPORT_SYMBOL(sys_tz);
56 #ifdef __ARCH_WANT_SYS_TIME
59 * sys_time() can be implemented in user-level using
60 * sys_gettimeofday(). Is this for backwards compatibility? If so,
61 * why not move it into the appropriate arch directory (for those
62 * architectures that need it).
64 SYSCALL_DEFINE1(time, time_t __user *, tloc)
66 time_t i = get_seconds();
68 if (tloc) {
69 if (put_user(i,tloc))
70 return -EFAULT;
72 force_successful_syscall_return();
73 return i;
77 * sys_stime() can be implemented in user-level using
78 * sys_settimeofday(). Is this for backwards compatibility? If so,
79 * why not move it into the appropriate arch directory (for those
80 * architectures that need it).
83 SYSCALL_DEFINE1(stime, time_t __user *, tptr)
85 struct timespec64 tv;
86 int err;
88 if (get_user(tv.tv_sec, tptr))
89 return -EFAULT;
91 tv.tv_nsec = 0;
93 err = security_settime64(&tv, NULL);
94 if (err)
95 return err;
97 do_settimeofday64(&tv);
98 return 0;
101 #endif /* __ARCH_WANT_SYS_TIME */
103 #ifdef CONFIG_COMPAT
104 #ifdef __ARCH_WANT_COMPAT_SYS_TIME
106 /* compat_time_t is a 32 bit "long" and needs to get converted. */
107 COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
109 struct timeval tv;
110 compat_time_t i;
112 do_gettimeofday(&tv);
113 i = tv.tv_sec;
115 if (tloc) {
116 if (put_user(i,tloc))
117 return -EFAULT;
119 force_successful_syscall_return();
120 return i;
123 COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
125 struct timespec64 tv;
126 int err;
128 if (get_user(tv.tv_sec, tptr))
129 return -EFAULT;
131 tv.tv_nsec = 0;
133 err = security_settime64(&tv, NULL);
134 if (err)
135 return err;
137 do_settimeofday64(&tv);
138 return 0;
141 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
142 #endif
144 SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
145 struct timezone __user *, tz)
147 if (likely(tv != NULL)) {
148 struct timeval ktv;
149 do_gettimeofday(&ktv);
150 if (copy_to_user(tv, &ktv, sizeof(ktv)))
151 return -EFAULT;
153 if (unlikely(tz != NULL)) {
154 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
155 return -EFAULT;
157 return 0;
161 * In case for some reason the CMOS clock has not already been running
162 * in UTC, but in some local time: The first time we set the timezone,
163 * we will warp the clock so that it is ticking UTC time instead of
164 * local time. Presumably, if someone is setting the timezone then we
165 * are running in an environment where the programs understand about
166 * timezones. This should be done at boot time in the /etc/rc script,
167 * as soon as possible, so that the clock can be set right. Otherwise,
168 * various programs will get confused when the clock gets warped.
171 int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
173 static int firsttime = 1;
174 int error = 0;
176 if (tv && !timespec64_valid(tv))
177 return -EINVAL;
179 error = security_settime64(tv, tz);
180 if (error)
181 return error;
183 if (tz) {
184 /* Verify we're witin the +-15 hrs range */
185 if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
186 return -EINVAL;
188 sys_tz = *tz;
189 update_vsyscall_tz();
190 if (firsttime) {
191 firsttime = 0;
192 if (!tv)
193 timekeeping_warp_clock();
196 if (tv)
197 return do_settimeofday64(tv);
198 return 0;
201 SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
202 struct timezone __user *, tz)
204 struct timespec64 new_ts;
205 struct timeval user_tv;
206 struct timezone new_tz;
208 if (tv) {
209 if (copy_from_user(&user_tv, tv, sizeof(*tv)))
210 return -EFAULT;
212 if (!timeval_valid(&user_tv))
213 return -EINVAL;
215 new_ts.tv_sec = user_tv.tv_sec;
216 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
218 if (tz) {
219 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
220 return -EFAULT;
223 return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
226 #ifdef CONFIG_COMPAT
227 COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
228 struct timezone __user *, tz)
230 if (tv) {
231 struct timeval ktv;
233 do_gettimeofday(&ktv);
234 if (compat_put_timeval(&ktv, tv))
235 return -EFAULT;
237 if (tz) {
238 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
239 return -EFAULT;
242 return 0;
245 COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
246 struct timezone __user *, tz)
248 struct timespec64 new_ts;
249 struct timeval user_tv;
250 struct timezone new_tz;
252 if (tv) {
253 if (compat_get_timeval(&user_tv, tv))
254 return -EFAULT;
255 new_ts.tv_sec = user_tv.tv_sec;
256 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
258 if (tz) {
259 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
260 return -EFAULT;
263 return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
265 #endif
267 SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
269 struct timex txc; /* Local copy of parameter */
270 int ret;
272 /* Copy the user data space into the kernel copy
273 * structure. But bear in mind that the structures
274 * may change
276 if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
277 return -EFAULT;
278 ret = do_adjtimex(&txc);
279 return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
282 #ifdef CONFIG_COMPAT
284 COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
286 struct timex txc;
287 int err, ret;
289 err = compat_get_timex(&txc, utp);
290 if (err)
291 return err;
293 ret = do_adjtimex(&txc);
295 err = compat_put_timex(utp, &txc);
296 if (err)
297 return err;
299 return ret;
301 #endif
304 * Convert jiffies to milliseconds and back.
306 * Avoid unnecessary multiplications/divisions in the
307 * two most common HZ cases:
309 unsigned int jiffies_to_msecs(const unsigned long j)
311 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
312 return (MSEC_PER_SEC / HZ) * j;
313 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
314 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
315 #else
316 # if BITS_PER_LONG == 32
317 return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
318 # else
319 return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
320 # endif
321 #endif
323 EXPORT_SYMBOL(jiffies_to_msecs);
325 unsigned int jiffies_to_usecs(const unsigned long j)
328 * Hz usually doesn't go much further MSEC_PER_SEC.
329 * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
331 BUILD_BUG_ON(HZ > USEC_PER_SEC);
333 #if !(USEC_PER_SEC % HZ)
334 return (USEC_PER_SEC / HZ) * j;
335 #else
336 # if BITS_PER_LONG == 32
337 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
338 # else
339 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
340 # endif
341 #endif
343 EXPORT_SYMBOL(jiffies_to_usecs);
346 * timespec_trunc - Truncate timespec to a granularity
347 * @t: Timespec
348 * @gran: Granularity in ns.
350 * Truncate a timespec to a granularity. Always rounds down. gran must
351 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
353 struct timespec timespec_trunc(struct timespec t, unsigned gran)
355 /* Avoid division in the common cases 1 ns and 1 s. */
356 if (gran == 1) {
357 /* nothing */
358 } else if (gran == NSEC_PER_SEC) {
359 t.tv_nsec = 0;
360 } else if (gran > 1 && gran < NSEC_PER_SEC) {
361 t.tv_nsec -= t.tv_nsec % gran;
362 } else {
363 WARN(1, "illegal file time granularity: %u", gran);
365 return t;
367 EXPORT_SYMBOL(timespec_trunc);
370 * mktime64 - Converts date to seconds.
371 * Converts Gregorian date to seconds since 1970-01-01 00:00:00.
372 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
373 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
375 * [For the Julian calendar (which was used in Russia before 1917,
376 * Britain & colonies before 1752, anywhere else before 1582,
377 * and is still in use by some communities) leave out the
378 * -year/100+year/400 terms, and add 10.]
380 * This algorithm was first published by Gauss (I think).
382 * A leap second can be indicated by calling this function with sec as
383 * 60 (allowable under ISO 8601). The leap second is treated the same
384 * as the following second since they don't exist in UNIX time.
386 * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
387 * tomorrow - (allowable under ISO 8601) is supported.
389 time64_t mktime64(const unsigned int year0, const unsigned int mon0,
390 const unsigned int day, const unsigned int hour,
391 const unsigned int min, const unsigned int sec)
393 unsigned int mon = mon0, year = year0;
395 /* 1..12 -> 11,12,1..10 */
396 if (0 >= (int) (mon -= 2)) {
397 mon += 12; /* Puts Feb last since it has leap day */
398 year -= 1;
401 return ((((time64_t)
402 (year/4 - year/100 + year/400 + 367*mon/12 + day) +
403 year*365 - 719499
404 )*24 + hour /* now have hours - midnight tomorrow handled here */
405 )*60 + min /* now have minutes */
406 )*60 + sec; /* finally seconds */
408 EXPORT_SYMBOL(mktime64);
410 #if __BITS_PER_LONG == 32
412 * set_normalized_timespec - set timespec sec and nsec parts and normalize
414 * @ts: pointer to timespec variable to be set
415 * @sec: seconds to set
416 * @nsec: nanoseconds to set
418 * Set seconds and nanoseconds field of a timespec variable and
419 * normalize to the timespec storage format
421 * Note: The tv_nsec part is always in the range of
422 * 0 <= tv_nsec < NSEC_PER_SEC
423 * For negative values only the tv_sec field is negative !
425 void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
427 while (nsec >= NSEC_PER_SEC) {
429 * The following asm() prevents the compiler from
430 * optimising this loop into a modulo operation. See
431 * also __iter_div_u64_rem() in include/linux/time.h
433 asm("" : "+rm"(nsec));
434 nsec -= NSEC_PER_SEC;
435 ++sec;
437 while (nsec < 0) {
438 asm("" : "+rm"(nsec));
439 nsec += NSEC_PER_SEC;
440 --sec;
442 ts->tv_sec = sec;
443 ts->tv_nsec = nsec;
445 EXPORT_SYMBOL(set_normalized_timespec);
448 * ns_to_timespec - Convert nanoseconds to timespec
449 * @nsec: the nanoseconds value to be converted
451 * Returns the timespec representation of the nsec parameter.
453 struct timespec ns_to_timespec(const s64 nsec)
455 struct timespec ts;
456 s32 rem;
458 if (!nsec)
459 return (struct timespec) {0, 0};
461 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
462 if (unlikely(rem < 0)) {
463 ts.tv_sec--;
464 rem += NSEC_PER_SEC;
466 ts.tv_nsec = rem;
468 return ts;
470 EXPORT_SYMBOL(ns_to_timespec);
471 #endif
474 * ns_to_timeval - Convert nanoseconds to timeval
475 * @nsec: the nanoseconds value to be converted
477 * Returns the timeval representation of the nsec parameter.
479 struct timeval ns_to_timeval(const s64 nsec)
481 struct timespec ts = ns_to_timespec(nsec);
482 struct timeval tv;
484 tv.tv_sec = ts.tv_sec;
485 tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
487 return tv;
489 EXPORT_SYMBOL(ns_to_timeval);
492 * set_normalized_timespec - set timespec sec and nsec parts and normalize
494 * @ts: pointer to timespec variable to be set
495 * @sec: seconds to set
496 * @nsec: nanoseconds to set
498 * Set seconds and nanoseconds field of a timespec variable and
499 * normalize to the timespec storage format
501 * Note: The tv_nsec part is always in the range of
502 * 0 <= tv_nsec < NSEC_PER_SEC
503 * For negative values only the tv_sec field is negative !
505 void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
507 while (nsec >= NSEC_PER_SEC) {
509 * The following asm() prevents the compiler from
510 * optimising this loop into a modulo operation. See
511 * also __iter_div_u64_rem() in include/linux/time.h
513 asm("" : "+rm"(nsec));
514 nsec -= NSEC_PER_SEC;
515 ++sec;
517 while (nsec < 0) {
518 asm("" : "+rm"(nsec));
519 nsec += NSEC_PER_SEC;
520 --sec;
522 ts->tv_sec = sec;
523 ts->tv_nsec = nsec;
525 EXPORT_SYMBOL(set_normalized_timespec64);
528 * ns_to_timespec64 - Convert nanoseconds to timespec64
529 * @nsec: the nanoseconds value to be converted
531 * Returns the timespec64 representation of the nsec parameter.
533 struct timespec64 ns_to_timespec64(const s64 nsec)
535 struct timespec64 ts;
536 s32 rem;
538 if (!nsec)
539 return (struct timespec64) {0, 0};
541 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
542 if (unlikely(rem < 0)) {
543 ts.tv_sec--;
544 rem += NSEC_PER_SEC;
546 ts.tv_nsec = rem;
548 return ts;
550 EXPORT_SYMBOL(ns_to_timespec64);
553 * msecs_to_jiffies: - convert milliseconds to jiffies
554 * @m: time in milliseconds
556 * conversion is done as follows:
558 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
560 * - 'too large' values [that would result in larger than
561 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
563 * - all other values are converted to jiffies by either multiplying
564 * the input value by a factor or dividing it with a factor and
565 * handling any 32-bit overflows.
566 * for the details see __msecs_to_jiffies()
568 * msecs_to_jiffies() checks for the passed in value being a constant
569 * via __builtin_constant_p() allowing gcc to eliminate most of the
570 * code, __msecs_to_jiffies() is called if the value passed does not
571 * allow constant folding and the actual conversion must be done at
572 * runtime.
573 * the _msecs_to_jiffies helpers are the HZ dependent conversion
574 * routines found in include/linux/jiffies.h
576 unsigned long __msecs_to_jiffies(const unsigned int m)
579 * Negative value, means infinite timeout:
581 if ((int)m < 0)
582 return MAX_JIFFY_OFFSET;
583 return _msecs_to_jiffies(m);
585 EXPORT_SYMBOL(__msecs_to_jiffies);
587 unsigned long __usecs_to_jiffies(const unsigned int u)
589 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
590 return MAX_JIFFY_OFFSET;
591 return _usecs_to_jiffies(u);
593 EXPORT_SYMBOL(__usecs_to_jiffies);
596 * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
597 * that a remainder subtract here would not do the right thing as the
598 * resolution values don't fall on second boundries. I.e. the line:
599 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
600 * Note that due to the small error in the multiplier here, this
601 * rounding is incorrect for sufficiently large values of tv_nsec, but
602 * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
603 * OK.
605 * Rather, we just shift the bits off the right.
607 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
608 * value to a scaled second value.
610 static unsigned long
611 __timespec64_to_jiffies(u64 sec, long nsec)
613 nsec = nsec + TICK_NSEC - 1;
615 if (sec >= MAX_SEC_IN_JIFFIES){
616 sec = MAX_SEC_IN_JIFFIES;
617 nsec = 0;
619 return ((sec * SEC_CONVERSION) +
620 (((u64)nsec * NSEC_CONVERSION) >>
621 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
625 static unsigned long
626 __timespec_to_jiffies(unsigned long sec, long nsec)
628 return __timespec64_to_jiffies((u64)sec, nsec);
631 unsigned long
632 timespec64_to_jiffies(const struct timespec64 *value)
634 return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
636 EXPORT_SYMBOL(timespec64_to_jiffies);
638 void
639 jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
642 * Convert jiffies to nanoseconds and separate with
643 * one divide.
645 u32 rem;
646 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
647 NSEC_PER_SEC, &rem);
648 value->tv_nsec = rem;
650 EXPORT_SYMBOL(jiffies_to_timespec64);
653 * We could use a similar algorithm to timespec_to_jiffies (with a
654 * different multiplier for usec instead of nsec). But this has a
655 * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
656 * usec value, since it's not necessarily integral.
658 * We could instead round in the intermediate scaled representation
659 * (i.e. in units of 1/2^(large scale) jiffies) but that's also
660 * perilous: the scaling introduces a small positive error, which
661 * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
662 * units to the intermediate before shifting) leads to accidental
663 * overflow and overestimates.
665 * At the cost of one additional multiplication by a constant, just
666 * use the timespec implementation.
668 unsigned long
669 timeval_to_jiffies(const struct timeval *value)
671 return __timespec_to_jiffies(value->tv_sec,
672 value->tv_usec * NSEC_PER_USEC);
674 EXPORT_SYMBOL(timeval_to_jiffies);
676 void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
679 * Convert jiffies to nanoseconds and separate with
680 * one divide.
682 u32 rem;
684 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
685 NSEC_PER_SEC, &rem);
686 value->tv_usec = rem / NSEC_PER_USEC;
688 EXPORT_SYMBOL(jiffies_to_timeval);
691 * Convert jiffies/jiffies_64 to clock_t and back.
693 clock_t jiffies_to_clock_t(unsigned long x)
695 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
696 # if HZ < USER_HZ
697 return x * (USER_HZ / HZ);
698 # else
699 return x / (HZ / USER_HZ);
700 # endif
701 #else
702 return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
703 #endif
705 EXPORT_SYMBOL(jiffies_to_clock_t);
707 unsigned long clock_t_to_jiffies(unsigned long x)
709 #if (HZ % USER_HZ)==0
710 if (x >= ~0UL / (HZ / USER_HZ))
711 return ~0UL;
712 return x * (HZ / USER_HZ);
713 #else
714 /* Don't worry about loss of precision here .. */
715 if (x >= ~0UL / HZ * USER_HZ)
716 return ~0UL;
718 /* .. but do try to contain it here */
719 return div_u64((u64)x * HZ, USER_HZ);
720 #endif
722 EXPORT_SYMBOL(clock_t_to_jiffies);
724 u64 jiffies_64_to_clock_t(u64 x)
726 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
727 # if HZ < USER_HZ
728 x = div_u64(x * USER_HZ, HZ);
729 # elif HZ > USER_HZ
730 x = div_u64(x, HZ / USER_HZ);
731 # else
732 /* Nothing to do */
733 # endif
734 #else
736 * There are better ways that don't overflow early,
737 * but even this doesn't overflow in hundreds of years
738 * in 64 bits, so..
740 x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
741 #endif
742 return x;
744 EXPORT_SYMBOL(jiffies_64_to_clock_t);
746 u64 nsec_to_clock_t(u64 x)
748 #if (NSEC_PER_SEC % USER_HZ) == 0
749 return div_u64(x, NSEC_PER_SEC / USER_HZ);
750 #elif (USER_HZ % 512) == 0
751 return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
752 #else
754 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
755 * overflow after 64.99 years.
756 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
758 return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
759 #endif
762 u64 jiffies64_to_nsecs(u64 j)
764 #if !(NSEC_PER_SEC % HZ)
765 return (NSEC_PER_SEC / HZ) * j;
766 # else
767 return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
768 #endif
770 EXPORT_SYMBOL(jiffies64_to_nsecs);
773 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
775 * @n: nsecs in u64
777 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
778 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
779 * for scheduler, not for use in device drivers to calculate timeout value.
781 * note:
782 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
783 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
785 u64 nsecs_to_jiffies64(u64 n)
787 #if (NSEC_PER_SEC % HZ) == 0
788 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
789 return div_u64(n, NSEC_PER_SEC / HZ);
790 #elif (HZ % 512) == 0
791 /* overflow after 292 years if HZ = 1024 */
792 return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
793 #else
795 * Generic case - optimized for cases where HZ is a multiple of 3.
796 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
798 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
799 #endif
801 EXPORT_SYMBOL(nsecs_to_jiffies64);
804 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
806 * @n: nsecs in u64
808 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
809 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
810 * for scheduler, not for use in device drivers to calculate timeout value.
812 * note:
813 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
814 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
816 unsigned long nsecs_to_jiffies(u64 n)
818 return (unsigned long)nsecs_to_jiffies64(n);
820 EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
823 * Add two timespec64 values and do a safety check for overflow.
824 * It's assumed that both values are valid (>= 0).
825 * And, each timespec64 is in normalized form.
827 struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
828 const struct timespec64 rhs)
830 struct timespec64 res;
832 set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
833 lhs.tv_nsec + rhs.tv_nsec);
835 if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
836 res.tv_sec = TIME64_MAX;
837 res.tv_nsec = 0;
840 return res;
843 int get_timespec64(struct timespec64 *ts,
844 const struct timespec __user *uts)
846 struct timespec kts;
847 int ret;
849 ret = copy_from_user(&kts, uts, sizeof(kts));
850 if (ret)
851 return -EFAULT;
853 ts->tv_sec = kts.tv_sec;
854 ts->tv_nsec = kts.tv_nsec;
856 return 0;
858 EXPORT_SYMBOL_GPL(get_timespec64);
860 int put_timespec64(const struct timespec64 *ts,
861 struct timespec __user *uts)
863 struct timespec kts = {
864 .tv_sec = ts->tv_sec,
865 .tv_nsec = ts->tv_nsec
867 return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
869 EXPORT_SYMBOL_GPL(put_timespec64);
871 int get_itimerspec64(struct itimerspec64 *it,
872 const struct itimerspec __user *uit)
874 int ret;
876 ret = get_timespec64(&it->it_interval, &uit->it_interval);
877 if (ret)
878 return ret;
880 ret = get_timespec64(&it->it_value, &uit->it_value);
882 return ret;
884 EXPORT_SYMBOL_GPL(get_itimerspec64);
886 int put_itimerspec64(const struct itimerspec64 *it,
887 struct itimerspec __user *uit)
889 int ret;
891 ret = put_timespec64(&it->it_interval, &uit->it_interval);
892 if (ret)
893 return ret;
895 ret = put_timespec64(&it->it_value, &uit->it_value);
897 return ret;
899 EXPORT_SYMBOL_GPL(put_itimerspec64);