mm/hmm.c: remove superfluous RCU protection around radix tree lookup
[linux/fpc-iii.git] / kernel / time / time.c
blob3044d48ebe56d29a690dd5ca809fb73d4da12c6e
1 /*
2 * linux/kernel/time.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file contains the interface functions for the various
7 * time related system calls: time, stime, gettimeofday, settimeofday,
8 * adjtime
9 */
11 * Modification history kernel/time.c
13 * 1993-09-02 Philip Gladstone
14 * Created file with time related functions from sched/core.c and adjtimex()
15 * 1993-10-08 Torsten Duwe
16 * adjtime interface update and CMOS clock write code
17 * 1995-08-13 Torsten Duwe
18 * kernel PLL updated to 1994-12-13 specs (rfc-1589)
19 * 1999-01-16 Ulrich Windl
20 * Introduced error checking for many cases in adjtimex().
21 * Updated NTP code according to technical memorandum Jan '96
22 * "A Kernel Model for Precision Timekeeping" by Dave Mills
23 * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
24 * (Even though the technical memorandum forbids it)
25 * 2004-07-14 Christoph Lameter
26 * Added getnstimeofday to allow the posix timer functions to return
27 * with nanosecond accuracy
30 #include <linux/export.h>
31 #include <linux/timex.h>
32 #include <linux/capability.h>
33 #include <linux/timekeeper_internal.h>
34 #include <linux/errno.h>
35 #include <linux/syscalls.h>
36 #include <linux/security.h>
37 #include <linux/fs.h>
38 #include <linux/math64.h>
39 #include <linux/ptrace.h>
41 #include <linux/uaccess.h>
42 #include <linux/compat.h>
43 #include <asm/unistd.h>
45 #include <generated/timeconst.h>
46 #include "timekeeping.h"
49 * The timezone where the local system is located. Used as a default by some
50 * programs who obtain this value by using gettimeofday.
52 struct timezone sys_tz;
54 EXPORT_SYMBOL(sys_tz);
56 #ifdef __ARCH_WANT_SYS_TIME
59 * sys_time() can be implemented in user-level using
60 * sys_gettimeofday(). Is this for backwards compatibility? If so,
61 * why not move it into the appropriate arch directory (for those
62 * architectures that need it).
64 SYSCALL_DEFINE1(time, time_t __user *, tloc)
66 time_t i = get_seconds();
68 if (tloc) {
69 if (put_user(i,tloc))
70 return -EFAULT;
72 force_successful_syscall_return();
73 return i;
77 * sys_stime() can be implemented in user-level using
78 * sys_settimeofday(). Is this for backwards compatibility? If so,
79 * why not move it into the appropriate arch directory (for those
80 * architectures that need it).
83 SYSCALL_DEFINE1(stime, time_t __user *, tptr)
85 struct timespec64 tv;
86 int err;
88 if (get_user(tv.tv_sec, tptr))
89 return -EFAULT;
91 tv.tv_nsec = 0;
93 err = security_settime64(&tv, NULL);
94 if (err)
95 return err;
97 do_settimeofday64(&tv);
98 return 0;
101 #endif /* __ARCH_WANT_SYS_TIME */
103 #ifdef CONFIG_COMPAT
104 #ifdef __ARCH_WANT_COMPAT_SYS_TIME
106 /* compat_time_t is a 32 bit "long" and needs to get converted. */
107 COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
109 struct timeval tv;
110 compat_time_t i;
112 do_gettimeofday(&tv);
113 i = tv.tv_sec;
115 if (tloc) {
116 if (put_user(i,tloc))
117 return -EFAULT;
119 force_successful_syscall_return();
120 return i;
123 COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
125 struct timespec64 tv;
126 int err;
128 if (get_user(tv.tv_sec, tptr))
129 return -EFAULT;
131 tv.tv_nsec = 0;
133 err = security_settime64(&tv, NULL);
134 if (err)
135 return err;
137 do_settimeofday64(&tv);
138 return 0;
141 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
142 #endif
144 SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
145 struct timezone __user *, tz)
147 if (likely(tv != NULL)) {
148 struct timeval ktv;
149 do_gettimeofday(&ktv);
150 if (copy_to_user(tv, &ktv, sizeof(ktv)))
151 return -EFAULT;
153 if (unlikely(tz != NULL)) {
154 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
155 return -EFAULT;
157 return 0;
161 * In case for some reason the CMOS clock has not already been running
162 * in UTC, but in some local time: The first time we set the timezone,
163 * we will warp the clock so that it is ticking UTC time instead of
164 * local time. Presumably, if someone is setting the timezone then we
165 * are running in an environment where the programs understand about
166 * timezones. This should be done at boot time in the /etc/rc script,
167 * as soon as possible, so that the clock can be set right. Otherwise,
168 * various programs will get confused when the clock gets warped.
171 int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
173 static int firsttime = 1;
174 int error = 0;
176 if (tv && !timespec64_valid(tv))
177 return -EINVAL;
179 error = security_settime64(tv, tz);
180 if (error)
181 return error;
183 if (tz) {
184 /* Verify we're witin the +-15 hrs range */
185 if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
186 return -EINVAL;
188 sys_tz = *tz;
189 update_vsyscall_tz();
190 if (firsttime) {
191 firsttime = 0;
192 if (!tv)
193 timekeeping_warp_clock();
196 if (tv)
197 return do_settimeofday64(tv);
198 return 0;
201 SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
202 struct timezone __user *, tz)
204 struct timespec64 new_ts;
205 struct timeval user_tv;
206 struct timezone new_tz;
208 if (tv) {
209 if (copy_from_user(&user_tv, tv, sizeof(*tv)))
210 return -EFAULT;
212 if (!timeval_valid(&user_tv))
213 return -EINVAL;
215 new_ts.tv_sec = user_tv.tv_sec;
216 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
218 if (tz) {
219 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
220 return -EFAULT;
223 return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
226 #ifdef CONFIG_COMPAT
227 COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
228 struct timezone __user *, tz)
230 if (tv) {
231 struct timeval ktv;
233 do_gettimeofday(&ktv);
234 if (compat_put_timeval(&ktv, tv))
235 return -EFAULT;
237 if (tz) {
238 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
239 return -EFAULT;
242 return 0;
245 COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
246 struct timezone __user *, tz)
248 struct timespec64 new_ts;
249 struct timeval user_tv;
250 struct timezone new_tz;
252 if (tv) {
253 if (compat_get_timeval(&user_tv, tv))
254 return -EFAULT;
255 new_ts.tv_sec = user_tv.tv_sec;
256 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
258 if (tz) {
259 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
260 return -EFAULT;
263 return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
265 #endif
267 SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
269 struct timex txc; /* Local copy of parameter */
270 int ret;
272 /* Copy the user data space into the kernel copy
273 * structure. But bear in mind that the structures
274 * may change
276 if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
277 return -EFAULT;
278 ret = do_adjtimex(&txc);
279 return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
282 #ifdef CONFIG_COMPAT
284 COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
286 struct timex txc;
287 int err, ret;
289 err = compat_get_timex(&txc, utp);
290 if (err)
291 return err;
293 ret = do_adjtimex(&txc);
295 err = compat_put_timex(utp, &txc);
296 if (err)
297 return err;
299 return ret;
301 #endif
304 * Convert jiffies to milliseconds and back.
306 * Avoid unnecessary multiplications/divisions in the
307 * two most common HZ cases:
309 unsigned int jiffies_to_msecs(const unsigned long j)
311 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
312 return (MSEC_PER_SEC / HZ) * j;
313 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
314 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
315 #else
316 # if BITS_PER_LONG == 32
317 return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
318 # else
319 return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
320 # endif
321 #endif
323 EXPORT_SYMBOL(jiffies_to_msecs);
325 unsigned int jiffies_to_usecs(const unsigned long j)
328 * Hz usually doesn't go much further MSEC_PER_SEC.
329 * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
331 BUILD_BUG_ON(HZ > USEC_PER_SEC);
333 #if !(USEC_PER_SEC % HZ)
334 return (USEC_PER_SEC / HZ) * j;
335 #else
336 # if BITS_PER_LONG == 32
337 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
338 # else
339 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
340 # endif
341 #endif
343 EXPORT_SYMBOL(jiffies_to_usecs);
346 * timespec_trunc - Truncate timespec to a granularity
347 * @t: Timespec
348 * @gran: Granularity in ns.
350 * Truncate a timespec to a granularity. Always rounds down. gran must
351 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
353 struct timespec timespec_trunc(struct timespec t, unsigned gran)
355 /* Avoid division in the common cases 1 ns and 1 s. */
356 if (gran == 1) {
357 /* nothing */
358 } else if (gran == NSEC_PER_SEC) {
359 t.tv_nsec = 0;
360 } else if (gran > 1 && gran < NSEC_PER_SEC) {
361 t.tv_nsec -= t.tv_nsec % gran;
362 } else {
363 WARN(1, "illegal file time granularity: %u", gran);
365 return t;
367 EXPORT_SYMBOL(timespec_trunc);
370 * mktime64 - Converts date to seconds.
371 * Converts Gregorian date to seconds since 1970-01-01 00:00:00.
372 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
373 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
375 * [For the Julian calendar (which was used in Russia before 1917,
376 * Britain & colonies before 1752, anywhere else before 1582,
377 * and is still in use by some communities) leave out the
378 * -year/100+year/400 terms, and add 10.]
380 * This algorithm was first published by Gauss (I think).
382 * A leap second can be indicated by calling this function with sec as
383 * 60 (allowable under ISO 8601). The leap second is treated the same
384 * as the following second since they don't exist in UNIX time.
386 * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
387 * tomorrow - (allowable under ISO 8601) is supported.
389 time64_t mktime64(const unsigned int year0, const unsigned int mon0,
390 const unsigned int day, const unsigned int hour,
391 const unsigned int min, const unsigned int sec)
393 unsigned int mon = mon0, year = year0;
395 /* 1..12 -> 11,12,1..10 */
396 if (0 >= (int) (mon -= 2)) {
397 mon += 12; /* Puts Feb last since it has leap day */
398 year -= 1;
401 return ((((time64_t)
402 (year/4 - year/100 + year/400 + 367*mon/12 + day) +
403 year*365 - 719499
404 )*24 + hour /* now have hours - midnight tomorrow handled here */
405 )*60 + min /* now have minutes */
406 )*60 + sec; /* finally seconds */
408 EXPORT_SYMBOL(mktime64);
410 #if __BITS_PER_LONG == 32
412 * set_normalized_timespec - set timespec sec and nsec parts and normalize
414 * @ts: pointer to timespec variable to be set
415 * @sec: seconds to set
416 * @nsec: nanoseconds to set
418 * Set seconds and nanoseconds field of a timespec variable and
419 * normalize to the timespec storage format
421 * Note: The tv_nsec part is always in the range of
422 * 0 <= tv_nsec < NSEC_PER_SEC
423 * For negative values only the tv_sec field is negative !
425 void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
427 while (nsec >= NSEC_PER_SEC) {
429 * The following asm() prevents the compiler from
430 * optimising this loop into a modulo operation. See
431 * also __iter_div_u64_rem() in include/linux/time.h
433 asm("" : "+rm"(nsec));
434 nsec -= NSEC_PER_SEC;
435 ++sec;
437 while (nsec < 0) {
438 asm("" : "+rm"(nsec));
439 nsec += NSEC_PER_SEC;
440 --sec;
442 ts->tv_sec = sec;
443 ts->tv_nsec = nsec;
445 EXPORT_SYMBOL(set_normalized_timespec);
448 * ns_to_timespec - Convert nanoseconds to timespec
449 * @nsec: the nanoseconds value to be converted
451 * Returns the timespec representation of the nsec parameter.
453 struct timespec ns_to_timespec(const s64 nsec)
455 struct timespec ts;
456 s32 rem;
458 if (!nsec)
459 return (struct timespec) {0, 0};
461 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
462 if (unlikely(rem < 0)) {
463 ts.tv_sec--;
464 rem += NSEC_PER_SEC;
466 ts.tv_nsec = rem;
468 return ts;
470 EXPORT_SYMBOL(ns_to_timespec);
471 #endif
474 * ns_to_timeval - Convert nanoseconds to timeval
475 * @nsec: the nanoseconds value to be converted
477 * Returns the timeval representation of the nsec parameter.
479 struct timeval ns_to_timeval(const s64 nsec)
481 struct timespec ts = ns_to_timespec(nsec);
482 struct timeval tv;
484 tv.tv_sec = ts.tv_sec;
485 tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
487 return tv;
489 EXPORT_SYMBOL(ns_to_timeval);
491 struct __kernel_old_timeval ns_to_kernel_old_timeval(const s64 nsec)
493 struct timespec64 ts = ns_to_timespec64(nsec);
494 struct __kernel_old_timeval tv;
496 tv.tv_sec = ts.tv_sec;
497 tv.tv_usec = (suseconds_t)ts.tv_nsec / 1000;
499 return tv;
501 EXPORT_SYMBOL(ns_to_kernel_old_timeval);
504 * set_normalized_timespec - set timespec sec and nsec parts and normalize
506 * @ts: pointer to timespec variable to be set
507 * @sec: seconds to set
508 * @nsec: nanoseconds to set
510 * Set seconds and nanoseconds field of a timespec variable and
511 * normalize to the timespec storage format
513 * Note: The tv_nsec part is always in the range of
514 * 0 <= tv_nsec < NSEC_PER_SEC
515 * For negative values only the tv_sec field is negative !
517 void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
519 while (nsec >= NSEC_PER_SEC) {
521 * The following asm() prevents the compiler from
522 * optimising this loop into a modulo operation. See
523 * also __iter_div_u64_rem() in include/linux/time.h
525 asm("" : "+rm"(nsec));
526 nsec -= NSEC_PER_SEC;
527 ++sec;
529 while (nsec < 0) {
530 asm("" : "+rm"(nsec));
531 nsec += NSEC_PER_SEC;
532 --sec;
534 ts->tv_sec = sec;
535 ts->tv_nsec = nsec;
537 EXPORT_SYMBOL(set_normalized_timespec64);
540 * ns_to_timespec64 - Convert nanoseconds to timespec64
541 * @nsec: the nanoseconds value to be converted
543 * Returns the timespec64 representation of the nsec parameter.
545 struct timespec64 ns_to_timespec64(const s64 nsec)
547 struct timespec64 ts;
548 s32 rem;
550 if (!nsec)
551 return (struct timespec64) {0, 0};
553 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
554 if (unlikely(rem < 0)) {
555 ts.tv_sec--;
556 rem += NSEC_PER_SEC;
558 ts.tv_nsec = rem;
560 return ts;
562 EXPORT_SYMBOL(ns_to_timespec64);
565 * msecs_to_jiffies: - convert milliseconds to jiffies
566 * @m: time in milliseconds
568 * conversion is done as follows:
570 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
572 * - 'too large' values [that would result in larger than
573 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
575 * - all other values are converted to jiffies by either multiplying
576 * the input value by a factor or dividing it with a factor and
577 * handling any 32-bit overflows.
578 * for the details see __msecs_to_jiffies()
580 * msecs_to_jiffies() checks for the passed in value being a constant
581 * via __builtin_constant_p() allowing gcc to eliminate most of the
582 * code, __msecs_to_jiffies() is called if the value passed does not
583 * allow constant folding and the actual conversion must be done at
584 * runtime.
585 * the _msecs_to_jiffies helpers are the HZ dependent conversion
586 * routines found in include/linux/jiffies.h
588 unsigned long __msecs_to_jiffies(const unsigned int m)
591 * Negative value, means infinite timeout:
593 if ((int)m < 0)
594 return MAX_JIFFY_OFFSET;
595 return _msecs_to_jiffies(m);
597 EXPORT_SYMBOL(__msecs_to_jiffies);
599 unsigned long __usecs_to_jiffies(const unsigned int u)
601 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
602 return MAX_JIFFY_OFFSET;
603 return _usecs_to_jiffies(u);
605 EXPORT_SYMBOL(__usecs_to_jiffies);
608 * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
609 * that a remainder subtract here would not do the right thing as the
610 * resolution values don't fall on second boundries. I.e. the line:
611 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
612 * Note that due to the small error in the multiplier here, this
613 * rounding is incorrect for sufficiently large values of tv_nsec, but
614 * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
615 * OK.
617 * Rather, we just shift the bits off the right.
619 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
620 * value to a scaled second value.
622 static unsigned long
623 __timespec64_to_jiffies(u64 sec, long nsec)
625 nsec = nsec + TICK_NSEC - 1;
627 if (sec >= MAX_SEC_IN_JIFFIES){
628 sec = MAX_SEC_IN_JIFFIES;
629 nsec = 0;
631 return ((sec * SEC_CONVERSION) +
632 (((u64)nsec * NSEC_CONVERSION) >>
633 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
637 static unsigned long
638 __timespec_to_jiffies(unsigned long sec, long nsec)
640 return __timespec64_to_jiffies((u64)sec, nsec);
643 unsigned long
644 timespec64_to_jiffies(const struct timespec64 *value)
646 return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
648 EXPORT_SYMBOL(timespec64_to_jiffies);
650 void
651 jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
654 * Convert jiffies to nanoseconds and separate with
655 * one divide.
657 u32 rem;
658 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
659 NSEC_PER_SEC, &rem);
660 value->tv_nsec = rem;
662 EXPORT_SYMBOL(jiffies_to_timespec64);
665 * We could use a similar algorithm to timespec_to_jiffies (with a
666 * different multiplier for usec instead of nsec). But this has a
667 * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
668 * usec value, since it's not necessarily integral.
670 * We could instead round in the intermediate scaled representation
671 * (i.e. in units of 1/2^(large scale) jiffies) but that's also
672 * perilous: the scaling introduces a small positive error, which
673 * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
674 * units to the intermediate before shifting) leads to accidental
675 * overflow and overestimates.
677 * At the cost of one additional multiplication by a constant, just
678 * use the timespec implementation.
680 unsigned long
681 timeval_to_jiffies(const struct timeval *value)
683 return __timespec_to_jiffies(value->tv_sec,
684 value->tv_usec * NSEC_PER_USEC);
686 EXPORT_SYMBOL(timeval_to_jiffies);
688 void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
691 * Convert jiffies to nanoseconds and separate with
692 * one divide.
694 u32 rem;
696 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
697 NSEC_PER_SEC, &rem);
698 value->tv_usec = rem / NSEC_PER_USEC;
700 EXPORT_SYMBOL(jiffies_to_timeval);
703 * Convert jiffies/jiffies_64 to clock_t and back.
705 clock_t jiffies_to_clock_t(unsigned long x)
707 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
708 # if HZ < USER_HZ
709 return x * (USER_HZ / HZ);
710 # else
711 return x / (HZ / USER_HZ);
712 # endif
713 #else
714 return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
715 #endif
717 EXPORT_SYMBOL(jiffies_to_clock_t);
719 unsigned long clock_t_to_jiffies(unsigned long x)
721 #if (HZ % USER_HZ)==0
722 if (x >= ~0UL / (HZ / USER_HZ))
723 return ~0UL;
724 return x * (HZ / USER_HZ);
725 #else
726 /* Don't worry about loss of precision here .. */
727 if (x >= ~0UL / HZ * USER_HZ)
728 return ~0UL;
730 /* .. but do try to contain it here */
731 return div_u64((u64)x * HZ, USER_HZ);
732 #endif
734 EXPORT_SYMBOL(clock_t_to_jiffies);
736 u64 jiffies_64_to_clock_t(u64 x)
738 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
739 # if HZ < USER_HZ
740 x = div_u64(x * USER_HZ, HZ);
741 # elif HZ > USER_HZ
742 x = div_u64(x, HZ / USER_HZ);
743 # else
744 /* Nothing to do */
745 # endif
746 #else
748 * There are better ways that don't overflow early,
749 * but even this doesn't overflow in hundreds of years
750 * in 64 bits, so..
752 x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
753 #endif
754 return x;
756 EXPORT_SYMBOL(jiffies_64_to_clock_t);
758 u64 nsec_to_clock_t(u64 x)
760 #if (NSEC_PER_SEC % USER_HZ) == 0
761 return div_u64(x, NSEC_PER_SEC / USER_HZ);
762 #elif (USER_HZ % 512) == 0
763 return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
764 #else
766 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
767 * overflow after 64.99 years.
768 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
770 return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
771 #endif
774 u64 jiffies64_to_nsecs(u64 j)
776 #if !(NSEC_PER_SEC % HZ)
777 return (NSEC_PER_SEC / HZ) * j;
778 # else
779 return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
780 #endif
782 EXPORT_SYMBOL(jiffies64_to_nsecs);
785 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
787 * @n: nsecs in u64
789 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
790 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
791 * for scheduler, not for use in device drivers to calculate timeout value.
793 * note:
794 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
795 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
797 u64 nsecs_to_jiffies64(u64 n)
799 #if (NSEC_PER_SEC % HZ) == 0
800 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
801 return div_u64(n, NSEC_PER_SEC / HZ);
802 #elif (HZ % 512) == 0
803 /* overflow after 292 years if HZ = 1024 */
804 return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
805 #else
807 * Generic case - optimized for cases where HZ is a multiple of 3.
808 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
810 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
811 #endif
813 EXPORT_SYMBOL(nsecs_to_jiffies64);
816 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
818 * @n: nsecs in u64
820 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
821 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
822 * for scheduler, not for use in device drivers to calculate timeout value.
824 * note:
825 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
826 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
828 unsigned long nsecs_to_jiffies(u64 n)
830 return (unsigned long)nsecs_to_jiffies64(n);
832 EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
835 * Add two timespec64 values and do a safety check for overflow.
836 * It's assumed that both values are valid (>= 0).
837 * And, each timespec64 is in normalized form.
839 struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
840 const struct timespec64 rhs)
842 struct timespec64 res;
844 set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
845 lhs.tv_nsec + rhs.tv_nsec);
847 if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
848 res.tv_sec = TIME64_MAX;
849 res.tv_nsec = 0;
852 return res;
855 int get_timespec64(struct timespec64 *ts,
856 const struct timespec __user *uts)
858 struct timespec kts;
859 int ret;
861 ret = copy_from_user(&kts, uts, sizeof(kts));
862 if (ret)
863 return -EFAULT;
865 ts->tv_sec = kts.tv_sec;
866 ts->tv_nsec = kts.tv_nsec;
868 return 0;
870 EXPORT_SYMBOL_GPL(get_timespec64);
872 int put_timespec64(const struct timespec64 *ts,
873 struct timespec __user *uts)
875 struct timespec kts = {
876 .tv_sec = ts->tv_sec,
877 .tv_nsec = ts->tv_nsec
879 return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
881 EXPORT_SYMBOL_GPL(put_timespec64);
883 int get_itimerspec64(struct itimerspec64 *it,
884 const struct itimerspec __user *uit)
886 int ret;
888 ret = get_timespec64(&it->it_interval, &uit->it_interval);
889 if (ret)
890 return ret;
892 ret = get_timespec64(&it->it_value, &uit->it_value);
894 return ret;
896 EXPORT_SYMBOL_GPL(get_itimerspec64);
898 int put_itimerspec64(const struct itimerspec64 *it,
899 struct itimerspec __user *uit)
901 int ret;
903 ret = put_timespec64(&it->it_interval, &uit->it_interval);
904 if (ret)
905 return ret;
907 ret = put_timespec64(&it->it_value, &uit->it_value);
909 return ret;
911 EXPORT_SYMBOL_GPL(put_itimerspec64);