1 /* $NetBSD: kern_time.c,v 1.162 2009/10/03 20:48:42 elad Exp $ */
4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Christopher G. Demetriou, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.162 2009/10/03 20:48:42 elad Exp $");
66 #include <sys/param.h>
67 #include <sys/resourcevar.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
71 #include <sys/vnode.h>
72 #include <sys/signalvar.h>
73 #include <sys/syslog.h>
74 #include <sys/timetc.h>
75 #include <sys/timex.h>
76 #include <sys/kauth.h>
77 #include <sys/mount.h>
79 #include <sys/savar.h>
80 #include <sys/syscallargs.h>
83 #include <uvm/uvm_extern.h>
87 static void timer_intr(void *);
88 static void itimerfire(struct ptimer
*);
89 static void itimerfree(struct ptimers
*, int);
93 static void *timer_sih
;
94 static TAILQ_HEAD(, ptimer
) timer_queue
;
96 struct pool ptimer_pool
, ptimers_pool
;
99 * Initialize timekeeping.
105 pool_init(&ptimer_pool
, sizeof(struct ptimer
), 0, 0, 0, "ptimerpl",
106 &pool_allocator_nointr
, IPL_NONE
);
107 pool_init(&ptimers_pool
, sizeof(struct ptimers
), 0, 0, 0, "ptimerspl",
108 &pool_allocator_nointr
, IPL_NONE
);
115 TAILQ_INIT(&timer_queue
);
116 mutex_init(&timer_lock
, MUTEX_DEFAULT
, IPL_SCHED
);
117 timer_sih
= softint_establish(SOFTINT_CLOCK
| SOFTINT_MPSAFE
,
121 /* Time of day and interval timer support.
123 * These routines provide the kernel entry points to get and set
124 * the time-of-day and per-process interval timers. Subroutines
125 * here provide support for adding and subtracting timeval structures
126 * and decrementing interval timers, optionally reloading the interval
127 * timers when they expire.
130 /* This function is used by clock_settime and settimeofday */
132 settime1(struct proc
*p
, const struct timespec
*ts
, bool check_kauth
)
134 struct timespec delta
, now
;
137 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
140 timespecsub(ts
, &now
, &delta
);
142 if (check_kauth
&& kauth_authorize_system(kauth_cred_get(),
143 KAUTH_SYSTEM_TIME
, KAUTH_REQ_SYSTEM_TIME_SYSTEM
, __UNCONST(ts
),
144 &delta
, KAUTH_ARG(check_kauth
? false : true)) != 0) {
150 if ((delta
.tv_sec
< 86400) && securelevel
> 0) { /* XXX elad - notyet */
158 timespecadd(&boottime
, &delta
, &boottime
);
167 settime(struct proc
*p
, struct timespec
*ts
)
169 return (settime1(p
, ts
, true));
174 sys___clock_gettime50(struct lwp
*l
,
175 const struct sys___clock_gettime50_args
*uap
, register_t
*retval
)
178 syscallarg(clockid_t) clock_id;
179 syscallarg(struct timespec *) tp;
184 clock_id
= SCARG(uap
, clock_id
);
189 case CLOCK_MONOTONIC
:
196 return copyout(&ats
, SCARG(uap
, tp
), sizeof(ats
));
201 sys___clock_settime50(struct lwp
*l
,
202 const struct sys___clock_settime50_args
*uap
, register_t
*retval
)
205 syscallarg(clockid_t) clock_id;
206 syscallarg(const struct timespec *) tp;
211 if ((error
= copyin(SCARG(uap
, tp
), &ats
, sizeof(ats
))) != 0)
214 return clock_settime1(l
->l_proc
, SCARG(uap
, clock_id
), &ats
, true);
219 clock_settime1(struct proc
*p
, clockid_t clock_id
, const struct timespec
*tp
,
226 if ((error
= settime1(p
, tp
, check_kauth
)) != 0)
229 case CLOCK_MONOTONIC
:
230 return (EINVAL
); /* read-only clock */
239 sys___clock_getres50(struct lwp
*l
, const struct sys___clock_getres50_args
*uap
,
243 syscallarg(clockid_t) clock_id;
244 syscallarg(struct timespec *) tp;
250 clock_id
= SCARG(uap
, clock_id
);
253 case CLOCK_MONOTONIC
:
255 if (tc_getfrequency() > 1000000000)
258 ts
.tv_nsec
= 1000000000 / tc_getfrequency();
265 error
= copyout(&ts
, SCARG(uap
, tp
), sizeof(ts
));
272 sys___nanosleep50(struct lwp
*l
, const struct sys___nanosleep50_args
*uap
,
276 syscallarg(struct timespec *) rqtp;
277 syscallarg(struct timespec *) rmtp;
279 struct timespec rmt
, rqt
;
282 error
= copyin(SCARG(uap
, rqtp
), &rqt
, sizeof(struct timespec
));
286 error
= nanosleep1(l
, &rqt
, SCARG(uap
, rmtp
) ? &rmt
: NULL
);
287 if (SCARG(uap
, rmtp
) == NULL
|| (error
!= 0 && error
!= EINTR
))
290 error1
= copyout(&rmt
, SCARG(uap
, rmtp
), sizeof(rmt
));
291 return error1
? error1
: error
;
295 nanosleep1(struct lwp
*l
, struct timespec
*rqt
, struct timespec
*rmt
)
297 struct timespec rmtstart
;
300 if ((error
= itimespecfix(rqt
)) != 0)
305 * Avoid inadvertantly sleeping forever
309 getnanouptime(&rmtstart
);
311 error
= kpause("nanoslp", true, timo
, NULL
);
312 if (rmt
!= NULL
|| error
== 0) {
313 struct timespec rmtend
;
317 getnanouptime(&rmtend
);
318 t
= (rmt
!= NULL
) ? rmt
: &t0
;
319 timespecsub(&rmtend
, &rmtstart
, t
);
320 timespecsub(rqt
, t
, t
);
330 if (error
== ERESTART
)
332 if (error
== EWOULDBLOCK
)
340 sys___gettimeofday50(struct lwp
*l
, const struct sys___gettimeofday50_args
*uap
,
344 syscallarg(struct timeval *) tp;
345 syscallarg(void *) tzp; really "struct timezone *";
349 struct timezone tzfake
;
351 if (SCARG(uap
, tp
)) {
353 error
= copyout(&atv
, SCARG(uap
, tp
), sizeof(atv
));
357 if (SCARG(uap
, tzp
)) {
359 * NetBSD has no kernel notion of time zone, so we just
360 * fake up a timezone struct and return it if demanded.
362 tzfake
.tz_minuteswest
= 0;
363 tzfake
.tz_dsttime
= 0;
364 error
= copyout(&tzfake
, SCARG(uap
, tzp
), sizeof(tzfake
));
371 sys___settimeofday50(struct lwp
*l
, const struct sys___settimeofday50_args
*uap
,
375 syscallarg(const struct timeval *) tv;
376 syscallarg(const void *) tzp; really "const struct timezone *";
379 return settimeofday1(SCARG(uap
, tv
), true, SCARG(uap
, tzp
), l
, true);
383 settimeofday1(const struct timeval
*utv
, bool userspace
,
384 const void *utzp
, struct lwp
*l
, bool check_kauth
)
390 /* Verify all parameters before changing time. */
393 * NetBSD has no kernel notion of time zone, and only an
394 * obsolete program would try to set it, so we log a warning.
397 log(LOG_WARNING
, "pid %d attempted to set the "
398 "(obsolete) kernel time zone\n", l
->l_proc
->p_pid
);
404 if ((error
= copyin(utv
, &atv
, sizeof(atv
))) != 0)
409 TIMEVAL_TO_TIMESPEC(utv
, &ts
);
410 return settime1(l
->l_proc
, &ts
, check_kauth
);
413 int time_adjusted
; /* set if an adjustment is made */
417 sys___adjtime50(struct lwp
*l
, const struct sys___adjtime50_args
*uap
,
421 syscallarg(const struct timeval *) delta;
422 syscallarg(struct timeval *) olddelta;
425 struct timeval atv
, oldatv
;
427 if ((error
= kauth_authorize_system(l
->l_cred
, KAUTH_SYSTEM_TIME
,
428 KAUTH_REQ_SYSTEM_TIME_ADJTIME
, NULL
, NULL
, NULL
)) != 0)
431 if (SCARG(uap
, delta
)) {
432 error
= copyin(SCARG(uap
, delta
), &atv
,
433 sizeof(*SCARG(uap
, delta
)));
437 adjtime1(SCARG(uap
, delta
) ? &atv
: NULL
,
438 SCARG(uap
, olddelta
) ? &oldatv
: NULL
, l
->l_proc
);
439 if (SCARG(uap
, olddelta
))
440 error
= copyout(&oldatv
, SCARG(uap
, olddelta
),
441 sizeof(*SCARG(uap
, olddelta
)));
446 adjtime1(const struct timeval
*delta
, struct timeval
*olddelta
, struct proc
*p
)
448 extern int64_t time_adjtime
; /* in kern_ntptime.c */
451 mutex_spin_enter(&timecounter_lock
);
452 olddelta
->tv_sec
= time_adjtime
/ 1000000;
453 olddelta
->tv_usec
= time_adjtime
% 1000000;
454 if (olddelta
->tv_usec
< 0) {
455 olddelta
->tv_usec
+= 1000000;
458 mutex_spin_exit(&timecounter_lock
);
462 mutex_spin_enter(&timecounter_lock
);
463 time_adjtime
= delta
->tv_sec
* 1000000 + delta
->tv_usec
;
466 /* We need to save the system time during shutdown */
469 mutex_spin_exit(&timecounter_lock
);
474 * Interval timer support. Both the BSD getitimer() family and the POSIX
475 * timer_*() family of routines are supported.
477 * All timers are kept in an array pointed to by p_timers, which is
478 * allocated on demand - many processes don't use timers at all. The
479 * first three elements in this array are reserved for the BSD timers:
480 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element
481 * 2 is ITIMER_PROF. The rest may be allocated by the timer_create()
484 * Realtime timers are kept in the ptimer structure as an absolute
485 * time; virtual time timers are kept as a linked list of deltas.
486 * Virtual time timers are processed in the hardclock() routine of
487 * kern_clock.c. The real time timer is processed by a callout
488 * routine, called from the softclock() routine. Since a callout may
489 * be delayed in real time due to interrupt processing in the system,
490 * it is possible for the real time timeout routine (realtimeexpire,
491 * given below), to be delayed in real time past when it is supposed
492 * to occur. It does not suffice, therefore, to reload the real timer
493 * .it_value from the real time timers .it_interval. Rather, we
494 * compute the next time in absolute time the timer should go off. */
496 /* Allocate a POSIX realtime timer. */
498 sys_timer_create(struct lwp
*l
, const struct sys_timer_create_args
*uap
,
502 syscallarg(clockid_t) clock_id;
503 syscallarg(struct sigevent *) evp;
504 syscallarg(timer_t *) timerid;
507 return timer_create1(SCARG(uap
, timerid
), SCARG(uap
, clock_id
),
508 SCARG(uap
, evp
), copyin
, l
);
512 timer_create1(timer_t
*tid
, clockid_t id
, struct sigevent
*evp
,
513 copyin_t fetch_event
, struct lwp
*l
)
523 if (id
< CLOCK_REALTIME
|| id
> CLOCK_PROF
)
526 if ((pts
= p
->p_timers
) == NULL
)
527 pts
= timers_alloc(p
);
529 pt
= pool_get(&ptimer_pool
, PR_WAITOK
);
532 (*fetch_event
)(evp
, &pt
->pt_ev
, sizeof(pt
->pt_ev
))) != 0) ||
533 ((pt
->pt_ev
.sigev_notify
< SIGEV_NONE
) ||
534 (pt
->pt_ev
.sigev_notify
> SIGEV_SA
)) ||
535 (pt
->pt_ev
.sigev_notify
== SIGEV_SIGNAL
&&
536 (pt
->pt_ev
.sigev_signo
<= 0 ||
537 pt
->pt_ev
.sigev_signo
>= NSIG
))) {
538 pool_put(&ptimer_pool
, pt
);
539 return (error
? error
: EINVAL
);
543 /* Find a free timer slot, skipping those reserved for setitimer(). */
544 mutex_spin_enter(&timer_lock
);
545 for (timerid
= 3; timerid
< TIMER_MAX
; timerid
++)
546 if (pts
->pts_timers
[timerid
] == NULL
)
548 if (timerid
== TIMER_MAX
) {
549 mutex_spin_exit(&timer_lock
);
550 pool_put(&ptimer_pool
, pt
);
554 pt
->pt_ev
.sigev_notify
= SIGEV_SIGNAL
;
557 pt
->pt_ev
.sigev_signo
= SIGALRM
;
560 pt
->pt_ev
.sigev_signo
= SIGVTALRM
;
563 pt
->pt_ev
.sigev_signo
= SIGPROF
;
566 pt
->pt_ev
.sigev_value
.sival_int
= timerid
;
568 pt
->pt_info
.ksi_signo
= pt
->pt_ev
.sigev_signo
;
569 pt
->pt_info
.ksi_errno
= 0;
570 pt
->pt_info
.ksi_code
= 0;
571 pt
->pt_info
.ksi_pid
= p
->p_pid
;
572 pt
->pt_info
.ksi_uid
= kauth_cred_getuid(l
->l_cred
);
573 pt
->pt_info
.ksi_value
= pt
->pt_ev
.sigev_value
;
577 pt
->pt_poverruns
= 0;
578 pt
->pt_entry
= timerid
;
579 pt
->pt_queued
= false;
580 timespecclear(&pt
->pt_time
.it_value
);
581 if (id
== CLOCK_REALTIME
)
582 callout_init(&pt
->pt_ch
, 0);
586 pts
->pts_timers
[timerid
] = pt
;
587 mutex_spin_exit(&timer_lock
);
589 return copyout(&timerid
, tid
, sizeof(timerid
));
592 /* Delete a POSIX realtime timer */
594 sys_timer_delete(struct lwp
*l
, const struct sys_timer_delete_args
*uap
,
598 syscallarg(timer_t) timerid;
600 struct proc
*p
= l
->l_proc
;
603 struct ptimer
*pt
, *ptn
;
605 timerid
= SCARG(uap
, timerid
);
608 if (pts
== NULL
|| timerid
< 2 || timerid
>= TIMER_MAX
)
611 mutex_spin_enter(&timer_lock
);
612 if ((pt
= pts
->pts_timers
[timerid
]) == NULL
) {
613 mutex_spin_exit(&timer_lock
);
616 if (pt
->pt_type
!= CLOCK_REALTIME
) {
618 ptn
= LIST_NEXT(pt
, pt_list
);
619 LIST_REMOVE(pt
, pt_list
);
620 for ( ; ptn
; ptn
= LIST_NEXT(ptn
, pt_list
))
621 timespecadd(&pt
->pt_time
.it_value
,
622 &ptn
->pt_time
.it_value
,
623 &ptn
->pt_time
.it_value
);
627 itimerfree(pts
, timerid
);
633 * Set up the given timer. The value in pt->pt_time.it_value is taken
634 * to be an absolute time for CLOCK_REALTIME timers and a relative
635 * time for virtual timers.
636 * Must be called at splclock().
639 timer_settime(struct ptimer
*pt
)
641 struct ptimer
*ptn
, *pptn
;
644 KASSERT(mutex_owned(&timer_lock
));
646 if (pt
->pt_type
== CLOCK_REALTIME
) {
647 callout_stop(&pt
->pt_ch
);
648 if (timespecisset(&pt
->pt_time
.it_value
)) {
650 * Don't need to check tshzto() return value, here.
651 * callout_reset() does it for us.
653 callout_reset(&pt
->pt_ch
, tshzto(&pt
->pt_time
.it_value
),
654 realtimerexpire
, pt
);
658 ptn
= LIST_NEXT(pt
, pt_list
);
659 LIST_REMOVE(pt
, pt_list
);
660 for ( ; ptn
; ptn
= LIST_NEXT(ptn
, pt_list
))
661 timespecadd(&pt
->pt_time
.it_value
,
662 &ptn
->pt_time
.it_value
,
663 &ptn
->pt_time
.it_value
);
665 if (timespecisset(&pt
->pt_time
.it_value
)) {
666 if (pt
->pt_type
== CLOCK_VIRTUAL
)
667 ptl
= &pt
->pt_proc
->p_timers
->pts_virtual
;
669 ptl
= &pt
->pt_proc
->p_timers
->pts_prof
;
671 for (ptn
= LIST_FIRST(ptl
), pptn
= NULL
;
672 ptn
&& timespeccmp(&pt
->pt_time
.it_value
,
673 &ptn
->pt_time
.it_value
, >);
674 pptn
= ptn
, ptn
= LIST_NEXT(ptn
, pt_list
))
675 timespecsub(&pt
->pt_time
.it_value
,
676 &ptn
->pt_time
.it_value
,
677 &pt
->pt_time
.it_value
);
680 LIST_INSERT_AFTER(pptn
, pt
, pt_list
);
682 LIST_INSERT_HEAD(ptl
, pt
, pt_list
);
684 for ( ; ptn
; ptn
= LIST_NEXT(ptn
, pt_list
))
685 timespecsub(&ptn
->pt_time
.it_value
,
686 &pt
->pt_time
.it_value
,
687 &ptn
->pt_time
.it_value
);
696 timer_gettime(struct ptimer
*pt
, struct itimerspec
*aits
)
701 KASSERT(mutex_owned(&timer_lock
));
704 if (pt
->pt_type
== CLOCK_REALTIME
) {
706 * Convert from absolute to relative time in .it_value
707 * part of real time timer. If time for real time
708 * timer has passed return 0, else return difference
709 * between current time and time for the timer to go
712 if (timespecisset(&aits
->it_value
)) {
714 if (timespeccmp(&aits
->it_value
, &now
, <))
715 timespecclear(&aits
->it_value
);
717 timespecsub(&aits
->it_value
, &now
,
720 } else if (pt
->pt_active
) {
721 if (pt
->pt_type
== CLOCK_VIRTUAL
)
722 ptn
= LIST_FIRST(&pt
->pt_proc
->p_timers
->pts_virtual
);
724 ptn
= LIST_FIRST(&pt
->pt_proc
->p_timers
->pts_prof
);
725 for ( ; ptn
&& ptn
!= pt
; ptn
= LIST_NEXT(ptn
, pt_list
))
726 timespecadd(&aits
->it_value
,
727 &ptn
->pt_time
.it_value
, &aits
->it_value
);
728 KASSERT(ptn
!= NULL
); /* pt should be findable on the list */
730 timespecclear(&aits
->it_value
);
735 /* Set and arm a POSIX realtime timer */
737 sys___timer_settime50(struct lwp
*l
,
738 const struct sys___timer_settime50_args
*uap
,
742 syscallarg(timer_t) timerid;
743 syscallarg(int) flags;
744 syscallarg(const struct itimerspec *) value;
745 syscallarg(struct itimerspec *) ovalue;
748 struct itimerspec value
, ovalue
, *ovp
= NULL
;
750 if ((error
= copyin(SCARG(uap
, value
), &value
,
751 sizeof(struct itimerspec
))) != 0)
754 if (SCARG(uap
, ovalue
))
757 if ((error
= dotimer_settime(SCARG(uap
, timerid
), &value
, ovp
,
758 SCARG(uap
, flags
), l
->l_proc
)) != 0)
762 return copyout(&ovalue
, SCARG(uap
, ovalue
),
763 sizeof(struct itimerspec
));
768 dotimer_settime(int timerid
, struct itimerspec
*value
,
769 struct itimerspec
*ovalue
, int flags
, struct proc
*p
)
772 struct itimerspec val
, oval
;
779 if (pts
== NULL
|| timerid
< 2 || timerid
>= TIMER_MAX
)
782 if ((error
= itimespecfix(&val
.it_value
)) != 0 ||
783 (error
= itimespecfix(&val
.it_interval
)) != 0)
786 mutex_spin_enter(&timer_lock
);
787 if ((pt
= pts
->pts_timers
[timerid
]) == NULL
) {
788 mutex_spin_exit(&timer_lock
);
796 * If we've been passed a relative time for a realtime timer,
797 * convert it to absolute; if an absolute time for a virtual
798 * timer, convert it to relative and make sure we don't set it
799 * to zero, which would cancel the timer, or let it go
800 * negative, which would confuse the comparison tests.
802 if (timespecisset(&pt
->pt_time
.it_value
)) {
803 if (pt
->pt_type
== CLOCK_REALTIME
) {
804 if ((flags
& TIMER_ABSTIME
) == 0) {
806 timespecadd(&pt
->pt_time
.it_value
, &now
,
807 &pt
->pt_time
.it_value
);
810 if ((flags
& TIMER_ABSTIME
) != 0) {
812 timespecsub(&pt
->pt_time
.it_value
, &now
,
813 &pt
->pt_time
.it_value
);
814 if (!timespecisset(&pt
->pt_time
.it_value
) ||
815 pt
->pt_time
.it_value
.tv_sec
< 0) {
816 pt
->pt_time
.it_value
.tv_sec
= 0;
817 pt
->pt_time
.it_value
.tv_nsec
= 1;
824 mutex_spin_exit(&timer_lock
);
832 /* Return the time remaining until a POSIX timer fires. */
834 sys___timer_gettime50(struct lwp
*l
,
835 const struct sys___timer_gettime50_args
*uap
, register_t
*retval
)
838 syscallarg(timer_t) timerid;
839 syscallarg(struct itimerspec *) value;
841 struct itimerspec its
;
844 if ((error
= dotimer_gettime(SCARG(uap
, timerid
), l
->l_proc
,
848 return copyout(&its
, SCARG(uap
, value
), sizeof(its
));
852 dotimer_gettime(int timerid
, struct proc
*p
, struct itimerspec
*its
)
858 if (pts
== NULL
|| timerid
< 2 || timerid
>= TIMER_MAX
)
860 mutex_spin_enter(&timer_lock
);
861 if ((pt
= pts
->pts_timers
[timerid
]) == NULL
) {
862 mutex_spin_exit(&timer_lock
);
865 timer_gettime(pt
, its
);
866 mutex_spin_exit(&timer_lock
);
872 * Return the count of the number of times a periodic timer expired
873 * while a notification was already pending. The counter is reset when
874 * a timer expires and a notification can be posted.
877 sys_timer_getoverrun(struct lwp
*l
, const struct sys_timer_getoverrun_args
*uap
,
881 syscallarg(timer_t) timerid;
883 struct proc
*p
= l
->l_proc
;
888 timerid
= SCARG(uap
, timerid
);
891 if (pts
== NULL
|| timerid
< 2 || timerid
>= TIMER_MAX
)
893 mutex_spin_enter(&timer_lock
);
894 if ((pt
= pts
->pts_timers
[timerid
]) == NULL
) {
895 mutex_spin_exit(&timer_lock
);
898 *retval
= pt
->pt_poverruns
;
899 mutex_spin_exit(&timer_lock
);
905 /* Glue function that triggers an upcall; called from userret(). */
907 timerupcall(struct lwp
*l
)
909 struct ptimers
*pt
= l
->l_proc
->p_timers
;
910 struct proc
*p
= l
->l_proc
;
911 unsigned int i
, fired
, done
;
913 KDASSERT(l
->l_proc
->p_sa
);
914 /* Bail out if we do not own the virtual processor */
915 if (l
->l_savp
->savp_lwp
!= l
)
918 mutex_enter(p
->p_lock
);
920 fired
= pt
->pts_fired
;
922 while ((i
= ffs(fired
)) != 0) {
927 f
= ~l
->l_pflag
& LP_SA_NOBLOCK
;
928 l
->l_pflag
|= LP_SA_NOBLOCK
;
929 si
= siginfo_alloc(PR_WAITOK
);
930 si
->_info
= pt
->pts_timers
[i
]->pt_info
.ksi_info
;
931 if (sa_upcall(l
, SA_UPCALL_SIGEV
| SA_UPCALL_DEFER
, NULL
, l
,
932 sizeof(*si
), si
, siginfo_free
) != 0) {
934 /* XXX What do we do here?? */
940 pt
->pts_fired
&= ~done
;
941 if (pt
->pts_fired
== 0)
942 l
->l_proc
->p_timerpend
= 0;
944 mutex_exit(p
->p_lock
);
949 * Real interval timer expired:
950 * send process whose timer expired an alarm signal.
951 * If time is not set up to reload, then just return.
952 * Else compute next time timer should go off which is > current time.
953 * This is where delay in processing this timeout causes multiple
954 * SIGALRM calls to be compressed into one.
957 realtimerexpire(void *arg
)
959 uint64_t last_val
, next_val
, interval
, now_ms
;
960 struct timespec now
, next
;
966 mutex_spin_enter(&timer_lock
);
969 if (!timespecisset(&pt
->pt_time
.it_interval
)) {
970 timespecclear(&pt
->pt_time
.it_value
);
971 mutex_spin_exit(&timer_lock
);
976 backwards
= (timespeccmp(&pt
->pt_time
.it_value
, &now
, >));
977 timespecadd(&pt
->pt_time
.it_value
, &pt
->pt_time
.it_interval
, &next
);
978 /* Handle the easy case of non-overflown timers first. */
979 if (!backwards
&& timespeccmp(&next
, &now
, >)) {
980 pt
->pt_time
.it_value
= next
;
982 now_ms
= timespec2ns(&now
);
983 last_val
= timespec2ns(&pt
->pt_time
.it_value
);
984 interval
= timespec2ns(&pt
->pt_time
.it_interval
);
987 (now_ms
- last_val
+ interval
- 1) % interval
;
990 next_val
+= interval
;
992 pt
->pt_overruns
+= (now_ms
- last_val
) / interval
;
994 pt
->pt_time
.it_value
.tv_sec
= next_val
/ 1000000000;
995 pt
->pt_time
.it_value
.tv_nsec
= next_val
% 1000000000;
999 * Don't need to check tshzto() return value, here.
1000 * callout_reset() does it for us.
1002 callout_reset(&pt
->pt_ch
, tshzto(&pt
->pt_time
.it_value
),
1003 realtimerexpire
, pt
);
1004 mutex_spin_exit(&timer_lock
);
1007 /* BSD routine to get the value of an interval timer. */
1010 sys___getitimer50(struct lwp
*l
, const struct sys___getitimer50_args
*uap
,
1014 syscallarg(int) which;
1015 syscallarg(struct itimerval *) itv;
1017 struct proc
*p
= l
->l_proc
;
1018 struct itimerval aitv
;
1021 error
= dogetitimer(p
, SCARG(uap
, which
), &aitv
);
1024 return (copyout(&aitv
, SCARG(uap
, itv
), sizeof(struct itimerval
)));
1028 dogetitimer(struct proc
*p
, int which
, struct itimerval
*itvp
)
1030 struct ptimers
*pts
;
1032 struct itimerspec its
;
1034 if ((u_int
)which
> ITIMER_PROF
)
1037 mutex_spin_enter(&timer_lock
);
1039 if (pts
== NULL
|| (pt
= pts
->pts_timers
[which
]) == NULL
) {
1040 timerclear(&itvp
->it_value
);
1041 timerclear(&itvp
->it_interval
);
1043 timer_gettime(pt
, &its
);
1044 TIMESPEC_TO_TIMEVAL(&itvp
->it_value
, &its
.it_value
);
1045 TIMESPEC_TO_TIMEVAL(&itvp
->it_interval
, &its
.it_interval
);
1047 mutex_spin_exit(&timer_lock
);
1052 /* BSD routine to set/arm an interval timer. */
1055 sys___setitimer50(struct lwp
*l
, const struct sys___setitimer50_args
*uap
,
1059 syscallarg(int) which;
1060 syscallarg(const struct itimerval *) itv;
1061 syscallarg(struct itimerval *) oitv;
1063 struct proc
*p
= l
->l_proc
;
1064 int which
= SCARG(uap
, which
);
1065 struct sys___getitimer50_args getargs
;
1066 const struct itimerval
*itvp
;
1067 struct itimerval aitv
;
1070 if ((u_int
)which
> ITIMER_PROF
)
1072 itvp
= SCARG(uap
, itv
);
1074 (error
= copyin(itvp
, &aitv
, sizeof(struct itimerval
)) != 0))
1076 if (SCARG(uap
, oitv
) != NULL
) {
1077 SCARG(&getargs
, which
) = which
;
1078 SCARG(&getargs
, itv
) = SCARG(uap
, oitv
);
1079 if ((error
= sys___getitimer50(l
, &getargs
, retval
)) != 0)
1085 return dosetitimer(p
, which
, &aitv
);
1089 dosetitimer(struct proc
*p
, int which
, struct itimerval
*itvp
)
1091 struct timespec now
;
1092 struct ptimers
*pts
;
1093 struct ptimer
*pt
, *spare
;
1095 if (itimerfix(&itvp
->it_value
) || itimerfix(&itvp
->it_interval
))
1099 * Don't bother allocating data structures if the process just
1100 * wants to clear the timer.
1105 if (!timerisset(&itvp
->it_value
) && (pts
== NULL
||
1106 pts
->pts_timers
[which
] == NULL
))
1109 pts
= timers_alloc(p
);
1110 mutex_spin_enter(&timer_lock
);
1111 pt
= pts
->pts_timers
[which
];
1113 if (spare
== NULL
) {
1114 mutex_spin_exit(&timer_lock
);
1115 spare
= pool_get(&ptimer_pool
, PR_WAITOK
);
1120 pt
->pt_ev
.sigev_notify
= SIGEV_SIGNAL
;
1121 pt
->pt_ev
.sigev_value
.sival_int
= which
;
1122 pt
->pt_overruns
= 0;
1124 pt
->pt_type
= which
;
1125 pt
->pt_entry
= which
;
1126 pt
->pt_queued
= false;
1127 if (pt
->pt_type
== CLOCK_REALTIME
)
1128 callout_init(&pt
->pt_ch
, CALLOUT_MPSAFE
);
1134 pt
->pt_ev
.sigev_signo
= SIGALRM
;
1136 case ITIMER_VIRTUAL
:
1137 pt
->pt_ev
.sigev_signo
= SIGVTALRM
;
1140 pt
->pt_ev
.sigev_signo
= SIGPROF
;
1143 pts
->pts_timers
[which
] = pt
;
1146 TIMEVAL_TO_TIMESPEC(&itvp
->it_value
, &pt
->pt_time
.it_value
);
1147 TIMEVAL_TO_TIMESPEC(&itvp
->it_interval
, &pt
->pt_time
.it_interval
);
1149 if ((which
== ITIMER_REAL
) && timespecisset(&pt
->pt_time
.it_value
)) {
1150 /* Convert to absolute time */
1151 /* XXX need to wrap in splclock for timecounters case? */
1153 timespecadd(&pt
->pt_time
.it_value
, &now
, &pt
->pt_time
.it_value
);
1156 mutex_spin_exit(&timer_lock
);
1158 pool_put(&ptimer_pool
, spare
);
1163 /* Utility routines to manage the array of pointers to timers. */
1165 timers_alloc(struct proc
*p
)
1167 struct ptimers
*pts
;
1170 pts
= pool_get(&ptimers_pool
, PR_WAITOK
);
1171 LIST_INIT(&pts
->pts_virtual
);
1172 LIST_INIT(&pts
->pts_prof
);
1173 for (i
= 0; i
< TIMER_MAX
; i
++)
1174 pts
->pts_timers
[i
] = NULL
;
1176 mutex_spin_enter(&timer_lock
);
1177 if (p
->p_timers
== NULL
) {
1179 mutex_spin_exit(&timer_lock
);
1182 mutex_spin_exit(&timer_lock
);
1183 pool_put(&ptimers_pool
, pts
);
1188 * Clean up the per-process timers. If "which" is set to TIMERS_ALL,
1189 * then clean up all timers and free all the data structures. If
1190 * "which" is set to TIMERS_POSIX, only clean up the timers allocated
1191 * by timer_create(), not the BSD setitimer() timers, and only free the
1192 * structure if none of those remain.
1195 timers_free(struct proc
*p
, int which
)
1197 struct ptimers
*pts
;
1202 if (p
->p_timers
== NULL
)
1206 mutex_spin_enter(&timer_lock
);
1207 if (which
== TIMERS_ALL
) {
1212 for (ptn
= LIST_FIRST(&pts
->pts_virtual
);
1213 ptn
&& ptn
!= pts
->pts_timers
[ITIMER_VIRTUAL
];
1214 ptn
= LIST_NEXT(ptn
, pt_list
)) {
1215 KASSERT(ptn
->pt_type
!= CLOCK_REALTIME
);
1216 timespecadd(&ts
, &ptn
->pt_time
.it_value
, &ts
);
1218 LIST_FIRST(&pts
->pts_virtual
) = NULL
;
1220 KASSERT(ptn
->pt_type
!= CLOCK_REALTIME
);
1221 timespecadd(&ts
, &ptn
->pt_time
.it_value
,
1222 &ptn
->pt_time
.it_value
);
1223 LIST_INSERT_HEAD(&pts
->pts_virtual
, ptn
, pt_list
);
1226 for (ptn
= LIST_FIRST(&pts
->pts_prof
);
1227 ptn
&& ptn
!= pts
->pts_timers
[ITIMER_PROF
];
1228 ptn
= LIST_NEXT(ptn
, pt_list
)) {
1229 KASSERT(ptn
->pt_type
!= CLOCK_REALTIME
);
1230 timespecadd(&ts
, &ptn
->pt_time
.it_value
, &ts
);
1232 LIST_FIRST(&pts
->pts_prof
) = NULL
;
1234 KASSERT(ptn
->pt_type
!= CLOCK_REALTIME
);
1235 timespecadd(&ts
, &ptn
->pt_time
.it_value
,
1236 &ptn
->pt_time
.it_value
);
1237 LIST_INSERT_HEAD(&pts
->pts_prof
, ptn
, pt_list
);
1241 for ( ; i
< TIMER_MAX
; i
++) {
1242 if (pts
->pts_timers
[i
] != NULL
) {
1244 mutex_spin_enter(&timer_lock
);
1247 if (pts
->pts_timers
[0] == NULL
&& pts
->pts_timers
[1] == NULL
&&
1248 pts
->pts_timers
[2] == NULL
) {
1250 mutex_spin_exit(&timer_lock
);
1251 pool_put(&ptimers_pool
, pts
);
1253 mutex_spin_exit(&timer_lock
);
1257 itimerfree(struct ptimers
*pts
, int index
)
1261 KASSERT(mutex_owned(&timer_lock
));
1263 pt
= pts
->pts_timers
[index
];
1264 pts
->pts_timers
[index
] = NULL
;
1265 if (pt
->pt_type
== CLOCK_REALTIME
)
1266 callout_halt(&pt
->pt_ch
, &timer_lock
);
1267 else if (pt
->pt_queued
)
1268 TAILQ_REMOVE(&timer_queue
, pt
, pt_chain
);
1269 mutex_spin_exit(&timer_lock
);
1270 if (pt
->pt_type
== CLOCK_REALTIME
)
1271 callout_destroy(&pt
->pt_ch
);
1272 pool_put(&ptimer_pool
, pt
);
1276 * Decrement an interval timer by a specified number
1277 * of nanoseconds, which must be less than a second,
1278 * i.e. < 1000000000. If the timer expires, then reload
1279 * it. In this case, carry over (nsec - old value) to
1280 * reduce the value reloaded into the timer so that
1281 * the timer does not drift. This routine assumes
1282 * that it is called in a context where the timers
1283 * on which it is operating cannot change in value.
1286 itimerdecr(struct ptimer
*pt
, int nsec
)
1288 struct itimerspec
*itp
;
1290 KASSERT(mutex_owned(&timer_lock
));
1293 if (itp
->it_value
.tv_nsec
< nsec
) {
1294 if (itp
->it_value
.tv_sec
== 0) {
1295 /* expired, and already in next interval */
1296 nsec
-= itp
->it_value
.tv_nsec
;
1299 itp
->it_value
.tv_nsec
+= 1000000000;
1300 itp
->it_value
.tv_sec
--;
1302 itp
->it_value
.tv_nsec
-= nsec
;
1304 if (timespecisset(&itp
->it_value
))
1306 /* expired, exactly at end of interval */
1308 if (timespecisset(&itp
->it_interval
)) {
1309 itp
->it_value
= itp
->it_interval
;
1310 itp
->it_value
.tv_nsec
-= nsec
;
1311 if (itp
->it_value
.tv_nsec
< 0) {
1312 itp
->it_value
.tv_nsec
+= 1000000000;
1313 itp
->it_value
.tv_sec
--;
1317 itp
->it_value
.tv_nsec
= 0; /* sec is already 0 */
1322 itimerfire(struct ptimer
*pt
)
1325 KASSERT(mutex_owned(&timer_lock
));
1328 * XXX Can overrun, but we don't do signal queueing yet, anyway.
1329 * XXX Relying on the clock interrupt is stupid.
1331 if ((pt
->pt_ev
.sigev_notify
== SIGEV_SA
&& pt
->pt_proc
->p_sa
== NULL
) ||
1332 (pt
->pt_ev
.sigev_notify
!= SIGEV_SIGNAL
&&
1333 pt
->pt_ev
.sigev_notify
!= SIGEV_SA
) || pt
->pt_queued
)
1335 TAILQ_INSERT_TAIL(&timer_queue
, pt
, pt_chain
);
1336 pt
->pt_queued
= true;
1337 softint_schedule(timer_sih
);
1341 timer_tick(lwp_t
*l
, bool user
)
1343 struct ptimers
*pts
;
1348 if (p
->p_timers
== NULL
)
1351 mutex_spin_enter(&timer_lock
);
1352 if ((pts
= l
->l_proc
->p_timers
) != NULL
) {
1354 * Run current process's virtual and profile time, as needed.
1356 if (user
&& (pt
= LIST_FIRST(&pts
->pts_virtual
)) != NULL
)
1357 if (itimerdecr(pt
, tick
* 1000) == 0)
1359 if ((pt
= LIST_FIRST(&pts
->pts_prof
)) != NULL
)
1360 if (itimerdecr(pt
, tick
* 1000) == 0)
1363 mutex_spin_exit(&timer_lock
);
1370 * SIGEV_SA handling for timer_intr(). We are called (and return)
1371 * with the timer lock held. We know that the process had SA enabled
1372 * when this timer was enqueued. As timer_intr() is a soft interrupt
1373 * handler, SA should still be enabled by the time we get here.
1376 timer_sa_intr(struct ptimer
*pt
, proc_t
*p
)
1380 struct sadata_vp
*vp
;
1382 /* Cause the process to generate an upcall when it returns. */
1383 if (!p
->p_timerpend
) {
1385 * XXX stop signals can be processed inside tsleep,
1386 * which can be inside sa_yield's inner loop, which
1387 * makes testing for sa_idle alone insuffucent to
1388 * determine if we really should call setrunnable.
1390 pt
->pt_poverruns
= pt
->pt_overruns
;
1391 pt
->pt_overruns
= 0;
1392 i
= 1 << pt
->pt_entry
;
1393 p
->p_timers
->pts_fired
= i
;
1397 mutex_enter(&sa
->sa_mutex
);
1398 SLIST_FOREACH(vp
, &sa
->sa_vps
, savp_next
) {
1399 struct lwp
*vp_lwp
= vp
->savp_lwp
;
1401 lwp_need_userret(vp_lwp
);
1402 if (vp_lwp
->l_flag
& LW_SA_IDLE
) {
1403 vp_lwp
->l_flag
&= ~LW_SA_IDLE
;
1404 lwp_unsleep(vp_lwp
, true);
1409 mutex_exit(&sa
->sa_mutex
);
1411 i
= 1 << pt
->pt_entry
;
1412 if ((p
->p_timers
->pts_fired
& i
) == 0) {
1413 pt
->pt_poverruns
= pt
->pt_overruns
;
1414 pt
->pt_overruns
= 0;
1415 p
->p_timers
->pts_fired
|= i
;
1420 #endif /* KERN_SA */
1423 timer_intr(void *cookie
)
1429 mutex_enter(proc_lock
);
1430 mutex_spin_enter(&timer_lock
);
1431 while ((pt
= TAILQ_FIRST(&timer_queue
)) != NULL
) {
1432 TAILQ_REMOVE(&timer_queue
, pt
, pt_chain
);
1433 KASSERT(pt
->pt_queued
);
1434 pt
->pt_queued
= false;
1436 if (pt
->pt_proc
->p_timers
== NULL
) {
1437 /* Process is dying. */
1442 if (pt
->pt_ev
.sigev_notify
== SIGEV_SA
) {
1443 timer_sa_intr(pt
, p
);
1446 #endif /* KERN_SA */
1447 if (pt
->pt_ev
.sigev_notify
!= SIGEV_SIGNAL
)
1449 if (sigismember(&p
->p_sigpend
.sp_set
, pt
->pt_ev
.sigev_signo
)) {
1455 ksi
.ksi_signo
= pt
->pt_ev
.sigev_signo
;
1456 ksi
.ksi_code
= SI_TIMER
;
1457 ksi
.ksi_value
= pt
->pt_ev
.sigev_value
;
1458 pt
->pt_poverruns
= pt
->pt_overruns
;
1459 pt
->pt_overruns
= 0;
1460 mutex_spin_exit(&timer_lock
);
1461 kpsignal(p
, &ksi
, NULL
);
1462 mutex_spin_enter(&timer_lock
);
1464 mutex_spin_exit(&timer_lock
);
1465 mutex_exit(proc_lock
);
1469 * Check if the time will wrap if set to ts.
1471 * ts - timespec describing the new time
1472 * delta - the delta between the current time and ts
1475 time_wraps(struct timespec
*ts
, struct timespec
*delta
)
1479 * Don't allow the time to be set forward so far it
1480 * will wrap and become negative, thus allowing an
1481 * attacker to bypass the next check below. The
1482 * cutoff is 1 year before rollover occurs, so even
1483 * if the attacker uses adjtime(2) to move the time
1484 * past the cutoff, it will take a very long time
1485 * to get to the wrap point.
1487 if ((ts
->tv_sec
> LLONG_MAX
- 365*24*60*60) ||
1488 (delta
->tv_sec
< 0 || delta
->tv_nsec
< 0))