No empty .Rs/.Re
[netbsd-mini2440.git] / sys / kern / kern_time.c
blob7a135f7abe3efa98ad234db974e79a5df036e1f0
1 /* $NetBSD: kern_time.c,v 1.162 2009/10/03 20:48:42 elad Exp $ */
3 /*-
4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Christopher G. Demetriou, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.162 2009/10/03 20:48:42 elad Exp $");
66 #include <sys/param.h>
67 #include <sys/resourcevar.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/proc.h>
71 #include <sys/vnode.h>
72 #include <sys/signalvar.h>
73 #include <sys/syslog.h>
74 #include <sys/timetc.h>
75 #include <sys/timex.h>
76 #include <sys/kauth.h>
77 #include <sys/mount.h>
78 #include <sys/sa.h>
79 #include <sys/savar.h>
80 #include <sys/syscallargs.h>
81 #include <sys/cpu.h>
83 #include <uvm/uvm_extern.h>
85 #include "opt_sa.h"
87 static void timer_intr(void *);
88 static void itimerfire(struct ptimer *);
89 static void itimerfree(struct ptimers *, int);
91 kmutex_t timer_lock;
93 static void *timer_sih;
94 static TAILQ_HEAD(, ptimer) timer_queue;
96 struct pool ptimer_pool, ptimers_pool;
99 * Initialize timekeeping.
101 void
102 time_init(void)
105 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
106 &pool_allocator_nointr, IPL_NONE);
107 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl",
108 &pool_allocator_nointr, IPL_NONE);
111 void
112 time_init2(void)
115 TAILQ_INIT(&timer_queue);
116 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED);
117 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
118 timer_intr, NULL);
121 /* Time of day and interval timer support.
123 * These routines provide the kernel entry points to get and set
124 * the time-of-day and per-process interval timers. Subroutines
125 * here provide support for adding and subtracting timeval structures
126 * and decrementing interval timers, optionally reloading the interval
127 * timers when they expire.
130 /* This function is used by clock_settime and settimeofday */
131 static int
132 settime1(struct proc *p, const struct timespec *ts, bool check_kauth)
134 struct timespec delta, now;
135 int s;
137 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
138 s = splclock();
139 nanotime(&now);
140 timespecsub(ts, &now, &delta);
142 if (check_kauth && kauth_authorize_system(kauth_cred_get(),
143 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts),
144 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) {
145 splx(s);
146 return (EPERM);
149 #ifdef notyet
150 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
151 splx(s);
152 return (EPERM);
154 #endif
156 tc_setclock(ts);
158 timespecadd(&boottime, &delta, &boottime);
160 resettodr();
161 splx(s);
163 return (0);
167 settime(struct proc *p, struct timespec *ts)
169 return (settime1(p, ts, true));
172 /* ARGSUSED */
174 sys___clock_gettime50(struct lwp *l,
175 const struct sys___clock_gettime50_args *uap, register_t *retval)
177 /* {
178 syscallarg(clockid_t) clock_id;
179 syscallarg(struct timespec *) tp;
180 } */
181 clockid_t clock_id;
182 struct timespec ats;
184 clock_id = SCARG(uap, clock_id);
185 switch (clock_id) {
186 case CLOCK_REALTIME:
187 nanotime(&ats);
188 break;
189 case CLOCK_MONOTONIC:
190 nanouptime(&ats);
191 break;
192 default:
193 return (EINVAL);
196 return copyout(&ats, SCARG(uap, tp), sizeof(ats));
199 /* ARGSUSED */
201 sys___clock_settime50(struct lwp *l,
202 const struct sys___clock_settime50_args *uap, register_t *retval)
204 /* {
205 syscallarg(clockid_t) clock_id;
206 syscallarg(const struct timespec *) tp;
207 } */
208 int error;
209 struct timespec ats;
211 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
212 return error;
214 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true);
219 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp,
220 bool check_kauth)
222 int error;
224 switch (clock_id) {
225 case CLOCK_REALTIME:
226 if ((error = settime1(p, tp, check_kauth)) != 0)
227 return (error);
228 break;
229 case CLOCK_MONOTONIC:
230 return (EINVAL); /* read-only clock */
231 default:
232 return (EINVAL);
235 return 0;
239 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap,
240 register_t *retval)
242 /* {
243 syscallarg(clockid_t) clock_id;
244 syscallarg(struct timespec *) tp;
245 } */
246 clockid_t clock_id;
247 struct timespec ts;
248 int error = 0;
250 clock_id = SCARG(uap, clock_id);
251 switch (clock_id) {
252 case CLOCK_REALTIME:
253 case CLOCK_MONOTONIC:
254 ts.tv_sec = 0;
255 if (tc_getfrequency() > 1000000000)
256 ts.tv_nsec = 1;
257 else
258 ts.tv_nsec = 1000000000 / tc_getfrequency();
259 break;
260 default:
261 return (EINVAL);
264 if (SCARG(uap, tp))
265 error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
267 return error;
270 /* ARGSUSED */
272 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap,
273 register_t *retval)
275 /* {
276 syscallarg(struct timespec *) rqtp;
277 syscallarg(struct timespec *) rmtp;
278 } */
279 struct timespec rmt, rqt;
280 int error, error1;
282 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
283 if (error)
284 return (error);
286 error = nanosleep1(l, &rqt, SCARG(uap, rmtp) ? &rmt : NULL);
287 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR))
288 return error;
290 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt));
291 return error1 ? error1 : error;
295 nanosleep1(struct lwp *l, struct timespec *rqt, struct timespec *rmt)
297 struct timespec rmtstart;
298 int error, timo;
300 if ((error = itimespecfix(rqt)) != 0)
301 return error;
303 timo = tstohz(rqt);
305 * Avoid inadvertantly sleeping forever
307 if (timo == 0)
308 timo = 1;
309 getnanouptime(&rmtstart);
310 again:
311 error = kpause("nanoslp", true, timo, NULL);
312 if (rmt != NULL || error == 0) {
313 struct timespec rmtend;
314 struct timespec t0;
315 struct timespec *t;
317 getnanouptime(&rmtend);
318 t = (rmt != NULL) ? rmt : &t0;
319 timespecsub(&rmtend, &rmtstart, t);
320 timespecsub(rqt, t, t);
321 if (t->tv_sec < 0)
322 timespecclear(t);
323 if (error == 0) {
324 timo = tstohz(t);
325 if (timo > 0)
326 goto again;
330 if (error == ERESTART)
331 error = EINTR;
332 if (error == EWOULDBLOCK)
333 error = 0;
335 return error;
338 /* ARGSUSED */
340 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap,
341 register_t *retval)
343 /* {
344 syscallarg(struct timeval *) tp;
345 syscallarg(void *) tzp; really "struct timezone *";
346 } */
347 struct timeval atv;
348 int error = 0;
349 struct timezone tzfake;
351 if (SCARG(uap, tp)) {
352 microtime(&atv);
353 error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
354 if (error)
355 return (error);
357 if (SCARG(uap, tzp)) {
359 * NetBSD has no kernel notion of time zone, so we just
360 * fake up a timezone struct and return it if demanded.
362 tzfake.tz_minuteswest = 0;
363 tzfake.tz_dsttime = 0;
364 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
366 return (error);
369 /* ARGSUSED */
371 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap,
372 register_t *retval)
374 /* {
375 syscallarg(const struct timeval *) tv;
376 syscallarg(const void *) tzp; really "const struct timezone *";
377 } */
379 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true);
383 settimeofday1(const struct timeval *utv, bool userspace,
384 const void *utzp, struct lwp *l, bool check_kauth)
386 struct timeval atv;
387 struct timespec ts;
388 int error;
390 /* Verify all parameters before changing time. */
393 * NetBSD has no kernel notion of time zone, and only an
394 * obsolete program would try to set it, so we log a warning.
396 if (utzp)
397 log(LOG_WARNING, "pid %d attempted to set the "
398 "(obsolete) kernel time zone\n", l->l_proc->p_pid);
400 if (utv == NULL)
401 return 0;
403 if (userspace) {
404 if ((error = copyin(utv, &atv, sizeof(atv))) != 0)
405 return error;
406 utv = &atv;
409 TIMEVAL_TO_TIMESPEC(utv, &ts);
410 return settime1(l->l_proc, &ts, check_kauth);
413 int time_adjusted; /* set if an adjustment is made */
415 /* ARGSUSED */
417 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap,
418 register_t *retval)
420 /* {
421 syscallarg(const struct timeval *) delta;
422 syscallarg(struct timeval *) olddelta;
423 } */
424 int error = 0;
425 struct timeval atv, oldatv;
427 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
428 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0)
429 return error;
431 if (SCARG(uap, delta)) {
432 error = copyin(SCARG(uap, delta), &atv,
433 sizeof(*SCARG(uap, delta)));
434 if (error)
435 return (error);
437 adjtime1(SCARG(uap, delta) ? &atv : NULL,
438 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc);
439 if (SCARG(uap, olddelta))
440 error = copyout(&oldatv, SCARG(uap, olddelta),
441 sizeof(*SCARG(uap, olddelta)));
442 return error;
445 void
446 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p)
448 extern int64_t time_adjtime; /* in kern_ntptime.c */
450 if (olddelta) {
451 mutex_spin_enter(&timecounter_lock);
452 olddelta->tv_sec = time_adjtime / 1000000;
453 olddelta->tv_usec = time_adjtime % 1000000;
454 if (olddelta->tv_usec < 0) {
455 olddelta->tv_usec += 1000000;
456 olddelta->tv_sec--;
458 mutex_spin_exit(&timecounter_lock);
461 if (delta) {
462 mutex_spin_enter(&timecounter_lock);
463 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec;
465 if (time_adjtime) {
466 /* We need to save the system time during shutdown */
467 time_adjusted |= 1;
469 mutex_spin_exit(&timecounter_lock);
474 * Interval timer support. Both the BSD getitimer() family and the POSIX
475 * timer_*() family of routines are supported.
477 * All timers are kept in an array pointed to by p_timers, which is
478 * allocated on demand - many processes don't use timers at all. The
479 * first three elements in this array are reserved for the BSD timers:
480 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element
481 * 2 is ITIMER_PROF. The rest may be allocated by the timer_create()
482 * syscall.
484 * Realtime timers are kept in the ptimer structure as an absolute
485 * time; virtual time timers are kept as a linked list of deltas.
486 * Virtual time timers are processed in the hardclock() routine of
487 * kern_clock.c. The real time timer is processed by a callout
488 * routine, called from the softclock() routine. Since a callout may
489 * be delayed in real time due to interrupt processing in the system,
490 * it is possible for the real time timeout routine (realtimeexpire,
491 * given below), to be delayed in real time past when it is supposed
492 * to occur. It does not suffice, therefore, to reload the real timer
493 * .it_value from the real time timers .it_interval. Rather, we
494 * compute the next time in absolute time the timer should go off. */
496 /* Allocate a POSIX realtime timer. */
498 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap,
499 register_t *retval)
501 /* {
502 syscallarg(clockid_t) clock_id;
503 syscallarg(struct sigevent *) evp;
504 syscallarg(timer_t *) timerid;
505 } */
507 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id),
508 SCARG(uap, evp), copyin, l);
512 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
513 copyin_t fetch_event, struct lwp *l)
515 int error;
516 timer_t timerid;
517 struct ptimers *pts;
518 struct ptimer *pt;
519 struct proc *p;
521 p = l->l_proc;
523 if (id < CLOCK_REALTIME || id > CLOCK_PROF)
524 return (EINVAL);
526 if ((pts = p->p_timers) == NULL)
527 pts = timers_alloc(p);
529 pt = pool_get(&ptimer_pool, PR_WAITOK);
530 if (evp != NULL) {
531 if (((error =
532 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
533 ((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
534 (pt->pt_ev.sigev_notify > SIGEV_SA)) ||
535 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL &&
536 (pt->pt_ev.sigev_signo <= 0 ||
537 pt->pt_ev.sigev_signo >= NSIG))) {
538 pool_put(&ptimer_pool, pt);
539 return (error ? error : EINVAL);
543 /* Find a free timer slot, skipping those reserved for setitimer(). */
544 mutex_spin_enter(&timer_lock);
545 for (timerid = 3; timerid < TIMER_MAX; timerid++)
546 if (pts->pts_timers[timerid] == NULL)
547 break;
548 if (timerid == TIMER_MAX) {
549 mutex_spin_exit(&timer_lock);
550 pool_put(&ptimer_pool, pt);
551 return EAGAIN;
553 if (evp == NULL) {
554 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
555 switch (id) {
556 case CLOCK_REALTIME:
557 pt->pt_ev.sigev_signo = SIGALRM;
558 break;
559 case CLOCK_VIRTUAL:
560 pt->pt_ev.sigev_signo = SIGVTALRM;
561 break;
562 case CLOCK_PROF:
563 pt->pt_ev.sigev_signo = SIGPROF;
564 break;
566 pt->pt_ev.sigev_value.sival_int = timerid;
568 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo;
569 pt->pt_info.ksi_errno = 0;
570 pt->pt_info.ksi_code = 0;
571 pt->pt_info.ksi_pid = p->p_pid;
572 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred);
573 pt->pt_info.ksi_value = pt->pt_ev.sigev_value;
574 pt->pt_type = id;
575 pt->pt_proc = p;
576 pt->pt_overruns = 0;
577 pt->pt_poverruns = 0;
578 pt->pt_entry = timerid;
579 pt->pt_queued = false;
580 timespecclear(&pt->pt_time.it_value);
581 if (id == CLOCK_REALTIME)
582 callout_init(&pt->pt_ch, 0);
583 else
584 pt->pt_active = 0;
586 pts->pts_timers[timerid] = pt;
587 mutex_spin_exit(&timer_lock);
589 return copyout(&timerid, tid, sizeof(timerid));
592 /* Delete a POSIX realtime timer */
594 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap,
595 register_t *retval)
597 /* {
598 syscallarg(timer_t) timerid;
599 } */
600 struct proc *p = l->l_proc;
601 timer_t timerid;
602 struct ptimers *pts;
603 struct ptimer *pt, *ptn;
605 timerid = SCARG(uap, timerid);
606 pts = p->p_timers;
608 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
609 return (EINVAL);
611 mutex_spin_enter(&timer_lock);
612 if ((pt = pts->pts_timers[timerid]) == NULL) {
613 mutex_spin_exit(&timer_lock);
614 return (EINVAL);
616 if (pt->pt_type != CLOCK_REALTIME) {
617 if (pt->pt_active) {
618 ptn = LIST_NEXT(pt, pt_list);
619 LIST_REMOVE(pt, pt_list);
620 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
621 timespecadd(&pt->pt_time.it_value,
622 &ptn->pt_time.it_value,
623 &ptn->pt_time.it_value);
624 pt->pt_active = 0;
627 itimerfree(pts, timerid);
629 return (0);
633 * Set up the given timer. The value in pt->pt_time.it_value is taken
634 * to be an absolute time for CLOCK_REALTIME timers and a relative
635 * time for virtual timers.
636 * Must be called at splclock().
638 void
639 timer_settime(struct ptimer *pt)
641 struct ptimer *ptn, *pptn;
642 struct ptlist *ptl;
644 KASSERT(mutex_owned(&timer_lock));
646 if (pt->pt_type == CLOCK_REALTIME) {
647 callout_stop(&pt->pt_ch);
648 if (timespecisset(&pt->pt_time.it_value)) {
650 * Don't need to check tshzto() return value, here.
651 * callout_reset() does it for us.
653 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value),
654 realtimerexpire, pt);
656 } else {
657 if (pt->pt_active) {
658 ptn = LIST_NEXT(pt, pt_list);
659 LIST_REMOVE(pt, pt_list);
660 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
661 timespecadd(&pt->pt_time.it_value,
662 &ptn->pt_time.it_value,
663 &ptn->pt_time.it_value);
665 if (timespecisset(&pt->pt_time.it_value)) {
666 if (pt->pt_type == CLOCK_VIRTUAL)
667 ptl = &pt->pt_proc->p_timers->pts_virtual;
668 else
669 ptl = &pt->pt_proc->p_timers->pts_prof;
671 for (ptn = LIST_FIRST(ptl), pptn = NULL;
672 ptn && timespeccmp(&pt->pt_time.it_value,
673 &ptn->pt_time.it_value, >);
674 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list))
675 timespecsub(&pt->pt_time.it_value,
676 &ptn->pt_time.it_value,
677 &pt->pt_time.it_value);
679 if (pptn)
680 LIST_INSERT_AFTER(pptn, pt, pt_list);
681 else
682 LIST_INSERT_HEAD(ptl, pt, pt_list);
684 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list))
685 timespecsub(&ptn->pt_time.it_value,
686 &pt->pt_time.it_value,
687 &ptn->pt_time.it_value);
689 pt->pt_active = 1;
690 } else
691 pt->pt_active = 0;
695 void
696 timer_gettime(struct ptimer *pt, struct itimerspec *aits)
698 struct timespec now;
699 struct ptimer *ptn;
701 KASSERT(mutex_owned(&timer_lock));
703 *aits = pt->pt_time;
704 if (pt->pt_type == CLOCK_REALTIME) {
706 * Convert from absolute to relative time in .it_value
707 * part of real time timer. If time for real time
708 * timer has passed return 0, else return difference
709 * between current time and time for the timer to go
710 * off.
712 if (timespecisset(&aits->it_value)) {
713 getnanotime(&now);
714 if (timespeccmp(&aits->it_value, &now, <))
715 timespecclear(&aits->it_value);
716 else
717 timespecsub(&aits->it_value, &now,
718 &aits->it_value);
720 } else if (pt->pt_active) {
721 if (pt->pt_type == CLOCK_VIRTUAL)
722 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual);
723 else
724 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof);
725 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list))
726 timespecadd(&aits->it_value,
727 &ptn->pt_time.it_value, &aits->it_value);
728 KASSERT(ptn != NULL); /* pt should be findable on the list */
729 } else
730 timespecclear(&aits->it_value);
735 /* Set and arm a POSIX realtime timer */
737 sys___timer_settime50(struct lwp *l,
738 const struct sys___timer_settime50_args *uap,
739 register_t *retval)
741 /* {
742 syscallarg(timer_t) timerid;
743 syscallarg(int) flags;
744 syscallarg(const struct itimerspec *) value;
745 syscallarg(struct itimerspec *) ovalue;
746 } */
747 int error;
748 struct itimerspec value, ovalue, *ovp = NULL;
750 if ((error = copyin(SCARG(uap, value), &value,
751 sizeof(struct itimerspec))) != 0)
752 return (error);
754 if (SCARG(uap, ovalue))
755 ovp = &ovalue;
757 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp,
758 SCARG(uap, flags), l->l_proc)) != 0)
759 return error;
761 if (ovp)
762 return copyout(&ovalue, SCARG(uap, ovalue),
763 sizeof(struct itimerspec));
764 return 0;
768 dotimer_settime(int timerid, struct itimerspec *value,
769 struct itimerspec *ovalue, int flags, struct proc *p)
771 struct timespec now;
772 struct itimerspec val, oval;
773 struct ptimers *pts;
774 struct ptimer *pt;
775 int error;
777 pts = p->p_timers;
779 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
780 return EINVAL;
781 val = *value;
782 if ((error = itimespecfix(&val.it_value)) != 0 ||
783 (error = itimespecfix(&val.it_interval)) != 0)
784 return error;
786 mutex_spin_enter(&timer_lock);
787 if ((pt = pts->pts_timers[timerid]) == NULL) {
788 mutex_spin_exit(&timer_lock);
789 return EINVAL;
792 oval = pt->pt_time;
793 pt->pt_time = val;
796 * If we've been passed a relative time for a realtime timer,
797 * convert it to absolute; if an absolute time for a virtual
798 * timer, convert it to relative and make sure we don't set it
799 * to zero, which would cancel the timer, or let it go
800 * negative, which would confuse the comparison tests.
802 if (timespecisset(&pt->pt_time.it_value)) {
803 if (pt->pt_type == CLOCK_REALTIME) {
804 if ((flags & TIMER_ABSTIME) == 0) {
805 getnanotime(&now);
806 timespecadd(&pt->pt_time.it_value, &now,
807 &pt->pt_time.it_value);
809 } else {
810 if ((flags & TIMER_ABSTIME) != 0) {
811 getnanotime(&now);
812 timespecsub(&pt->pt_time.it_value, &now,
813 &pt->pt_time.it_value);
814 if (!timespecisset(&pt->pt_time.it_value) ||
815 pt->pt_time.it_value.tv_sec < 0) {
816 pt->pt_time.it_value.tv_sec = 0;
817 pt->pt_time.it_value.tv_nsec = 1;
823 timer_settime(pt);
824 mutex_spin_exit(&timer_lock);
826 if (ovalue)
827 *ovalue = oval;
829 return (0);
832 /* Return the time remaining until a POSIX timer fires. */
834 sys___timer_gettime50(struct lwp *l,
835 const struct sys___timer_gettime50_args *uap, register_t *retval)
837 /* {
838 syscallarg(timer_t) timerid;
839 syscallarg(struct itimerspec *) value;
840 } */
841 struct itimerspec its;
842 int error;
844 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc,
845 &its)) != 0)
846 return error;
848 return copyout(&its, SCARG(uap, value), sizeof(its));
852 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its)
854 struct ptimer *pt;
855 struct ptimers *pts;
857 pts = p->p_timers;
858 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
859 return (EINVAL);
860 mutex_spin_enter(&timer_lock);
861 if ((pt = pts->pts_timers[timerid]) == NULL) {
862 mutex_spin_exit(&timer_lock);
863 return (EINVAL);
865 timer_gettime(pt, its);
866 mutex_spin_exit(&timer_lock);
868 return 0;
872 * Return the count of the number of times a periodic timer expired
873 * while a notification was already pending. The counter is reset when
874 * a timer expires and a notification can be posted.
877 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap,
878 register_t *retval)
880 /* {
881 syscallarg(timer_t) timerid;
882 } */
883 struct proc *p = l->l_proc;
884 struct ptimers *pts;
885 int timerid;
886 struct ptimer *pt;
888 timerid = SCARG(uap, timerid);
890 pts = p->p_timers;
891 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
892 return (EINVAL);
893 mutex_spin_enter(&timer_lock);
894 if ((pt = pts->pts_timers[timerid]) == NULL) {
895 mutex_spin_exit(&timer_lock);
896 return (EINVAL);
898 *retval = pt->pt_poverruns;
899 mutex_spin_exit(&timer_lock);
901 return (0);
904 #ifdef KERN_SA
905 /* Glue function that triggers an upcall; called from userret(). */
906 void
907 timerupcall(struct lwp *l)
909 struct ptimers *pt = l->l_proc->p_timers;
910 struct proc *p = l->l_proc;
911 unsigned int i, fired, done;
913 KDASSERT(l->l_proc->p_sa);
914 /* Bail out if we do not own the virtual processor */
915 if (l->l_savp->savp_lwp != l)
916 return ;
918 mutex_enter(p->p_lock);
920 fired = pt->pts_fired;
921 done = 0;
922 while ((i = ffs(fired)) != 0) {
923 siginfo_t *si;
924 int mask = 1 << --i;
925 int f;
927 f = ~l->l_pflag & LP_SA_NOBLOCK;
928 l->l_pflag |= LP_SA_NOBLOCK;
929 si = siginfo_alloc(PR_WAITOK);
930 si->_info = pt->pts_timers[i]->pt_info.ksi_info;
931 if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l,
932 sizeof(*si), si, siginfo_free) != 0) {
933 siginfo_free(si);
934 /* XXX What do we do here?? */
935 } else
936 done |= mask;
937 fired &= ~mask;
938 l->l_pflag ^= f;
940 pt->pts_fired &= ~done;
941 if (pt->pts_fired == 0)
942 l->l_proc->p_timerpend = 0;
944 mutex_exit(p->p_lock);
946 #endif /* KERN_SA */
949 * Real interval timer expired:
950 * send process whose timer expired an alarm signal.
951 * If time is not set up to reload, then just return.
952 * Else compute next time timer should go off which is > current time.
953 * This is where delay in processing this timeout causes multiple
954 * SIGALRM calls to be compressed into one.
956 void
957 realtimerexpire(void *arg)
959 uint64_t last_val, next_val, interval, now_ms;
960 struct timespec now, next;
961 struct ptimer *pt;
962 int backwards;
964 pt = arg;
966 mutex_spin_enter(&timer_lock);
967 itimerfire(pt);
969 if (!timespecisset(&pt->pt_time.it_interval)) {
970 timespecclear(&pt->pt_time.it_value);
971 mutex_spin_exit(&timer_lock);
972 return;
975 getnanotime(&now);
976 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >));
977 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next);
978 /* Handle the easy case of non-overflown timers first. */
979 if (!backwards && timespeccmp(&next, &now, >)) {
980 pt->pt_time.it_value = next;
981 } else {
982 now_ms = timespec2ns(&now);
983 last_val = timespec2ns(&pt->pt_time.it_value);
984 interval = timespec2ns(&pt->pt_time.it_interval);
986 next_val = now_ms +
987 (now_ms - last_val + interval - 1) % interval;
989 if (backwards)
990 next_val += interval;
991 else
992 pt->pt_overruns += (now_ms - last_val) / interval;
994 pt->pt_time.it_value.tv_sec = next_val / 1000000000;
995 pt->pt_time.it_value.tv_nsec = next_val % 1000000000;
999 * Don't need to check tshzto() return value, here.
1000 * callout_reset() does it for us.
1002 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value),
1003 realtimerexpire, pt);
1004 mutex_spin_exit(&timer_lock);
1007 /* BSD routine to get the value of an interval timer. */
1008 /* ARGSUSED */
1010 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap,
1011 register_t *retval)
1013 /* {
1014 syscallarg(int) which;
1015 syscallarg(struct itimerval *) itv;
1016 } */
1017 struct proc *p = l->l_proc;
1018 struct itimerval aitv;
1019 int error;
1021 error = dogetitimer(p, SCARG(uap, which), &aitv);
1022 if (error)
1023 return error;
1024 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval)));
1028 dogetitimer(struct proc *p, int which, struct itimerval *itvp)
1030 struct ptimers *pts;
1031 struct ptimer *pt;
1032 struct itimerspec its;
1034 if ((u_int)which > ITIMER_PROF)
1035 return (EINVAL);
1037 mutex_spin_enter(&timer_lock);
1038 pts = p->p_timers;
1039 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) {
1040 timerclear(&itvp->it_value);
1041 timerclear(&itvp->it_interval);
1042 } else {
1043 timer_gettime(pt, &its);
1044 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value);
1045 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval);
1047 mutex_spin_exit(&timer_lock);
1049 return 0;
1052 /* BSD routine to set/arm an interval timer. */
1053 /* ARGSUSED */
1055 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap,
1056 register_t *retval)
1058 /* {
1059 syscallarg(int) which;
1060 syscallarg(const struct itimerval *) itv;
1061 syscallarg(struct itimerval *) oitv;
1062 } */
1063 struct proc *p = l->l_proc;
1064 int which = SCARG(uap, which);
1065 struct sys___getitimer50_args getargs;
1066 const struct itimerval *itvp;
1067 struct itimerval aitv;
1068 int error;
1070 if ((u_int)which > ITIMER_PROF)
1071 return (EINVAL);
1072 itvp = SCARG(uap, itv);
1073 if (itvp &&
1074 (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0))
1075 return (error);
1076 if (SCARG(uap, oitv) != NULL) {
1077 SCARG(&getargs, which) = which;
1078 SCARG(&getargs, itv) = SCARG(uap, oitv);
1079 if ((error = sys___getitimer50(l, &getargs, retval)) != 0)
1080 return (error);
1082 if (itvp == 0)
1083 return (0);
1085 return dosetitimer(p, which, &aitv);
1089 dosetitimer(struct proc *p, int which, struct itimerval *itvp)
1091 struct timespec now;
1092 struct ptimers *pts;
1093 struct ptimer *pt, *spare;
1095 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval))
1096 return (EINVAL);
1099 * Don't bother allocating data structures if the process just
1100 * wants to clear the timer.
1102 spare = NULL;
1103 pts = p->p_timers;
1104 retry:
1105 if (!timerisset(&itvp->it_value) && (pts == NULL ||
1106 pts->pts_timers[which] == NULL))
1107 return (0);
1108 if (pts == NULL)
1109 pts = timers_alloc(p);
1110 mutex_spin_enter(&timer_lock);
1111 pt = pts->pts_timers[which];
1112 if (pt == NULL) {
1113 if (spare == NULL) {
1114 mutex_spin_exit(&timer_lock);
1115 spare = pool_get(&ptimer_pool, PR_WAITOK);
1116 goto retry;
1118 pt = spare;
1119 spare = NULL;
1120 pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
1121 pt->pt_ev.sigev_value.sival_int = which;
1122 pt->pt_overruns = 0;
1123 pt->pt_proc = p;
1124 pt->pt_type = which;
1125 pt->pt_entry = which;
1126 pt->pt_queued = false;
1127 if (pt->pt_type == CLOCK_REALTIME)
1128 callout_init(&pt->pt_ch, CALLOUT_MPSAFE);
1129 else
1130 pt->pt_active = 0;
1132 switch (which) {
1133 case ITIMER_REAL:
1134 pt->pt_ev.sigev_signo = SIGALRM;
1135 break;
1136 case ITIMER_VIRTUAL:
1137 pt->pt_ev.sigev_signo = SIGVTALRM;
1138 break;
1139 case ITIMER_PROF:
1140 pt->pt_ev.sigev_signo = SIGPROF;
1141 break;
1143 pts->pts_timers[which] = pt;
1146 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value);
1147 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval);
1149 if ((which == ITIMER_REAL) && timespecisset(&pt->pt_time.it_value)) {
1150 /* Convert to absolute time */
1151 /* XXX need to wrap in splclock for timecounters case? */
1152 getnanotime(&now);
1153 timespecadd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value);
1155 timer_settime(pt);
1156 mutex_spin_exit(&timer_lock);
1157 if (spare != NULL)
1158 pool_put(&ptimer_pool, spare);
1160 return (0);
1163 /* Utility routines to manage the array of pointers to timers. */
1164 struct ptimers *
1165 timers_alloc(struct proc *p)
1167 struct ptimers *pts;
1168 int i;
1170 pts = pool_get(&ptimers_pool, PR_WAITOK);
1171 LIST_INIT(&pts->pts_virtual);
1172 LIST_INIT(&pts->pts_prof);
1173 for (i = 0; i < TIMER_MAX; i++)
1174 pts->pts_timers[i] = NULL;
1175 pts->pts_fired = 0;
1176 mutex_spin_enter(&timer_lock);
1177 if (p->p_timers == NULL) {
1178 p->p_timers = pts;
1179 mutex_spin_exit(&timer_lock);
1180 return pts;
1182 mutex_spin_exit(&timer_lock);
1183 pool_put(&ptimers_pool, pts);
1184 return p->p_timers;
1188 * Clean up the per-process timers. If "which" is set to TIMERS_ALL,
1189 * then clean up all timers and free all the data structures. If
1190 * "which" is set to TIMERS_POSIX, only clean up the timers allocated
1191 * by timer_create(), not the BSD setitimer() timers, and only free the
1192 * structure if none of those remain.
1194 void
1195 timers_free(struct proc *p, int which)
1197 struct ptimers *pts;
1198 struct ptimer *ptn;
1199 struct timespec ts;
1200 int i;
1202 if (p->p_timers == NULL)
1203 return;
1205 pts = p->p_timers;
1206 mutex_spin_enter(&timer_lock);
1207 if (which == TIMERS_ALL) {
1208 p->p_timers = NULL;
1209 i = 0;
1210 } else {
1211 timespecclear(&ts);
1212 for (ptn = LIST_FIRST(&pts->pts_virtual);
1213 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL];
1214 ptn = LIST_NEXT(ptn, pt_list)) {
1215 KASSERT(ptn->pt_type != CLOCK_REALTIME);
1216 timespecadd(&ts, &ptn->pt_time.it_value, &ts);
1218 LIST_FIRST(&pts->pts_virtual) = NULL;
1219 if (ptn) {
1220 KASSERT(ptn->pt_type != CLOCK_REALTIME);
1221 timespecadd(&ts, &ptn->pt_time.it_value,
1222 &ptn->pt_time.it_value);
1223 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list);
1225 timespecclear(&ts);
1226 for (ptn = LIST_FIRST(&pts->pts_prof);
1227 ptn && ptn != pts->pts_timers[ITIMER_PROF];
1228 ptn = LIST_NEXT(ptn, pt_list)) {
1229 KASSERT(ptn->pt_type != CLOCK_REALTIME);
1230 timespecadd(&ts, &ptn->pt_time.it_value, &ts);
1232 LIST_FIRST(&pts->pts_prof) = NULL;
1233 if (ptn) {
1234 KASSERT(ptn->pt_type != CLOCK_REALTIME);
1235 timespecadd(&ts, &ptn->pt_time.it_value,
1236 &ptn->pt_time.it_value);
1237 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list);
1239 i = 3;
1241 for ( ; i < TIMER_MAX; i++) {
1242 if (pts->pts_timers[i] != NULL) {
1243 itimerfree(pts, i);
1244 mutex_spin_enter(&timer_lock);
1247 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL &&
1248 pts->pts_timers[2] == NULL) {
1249 p->p_timers = NULL;
1250 mutex_spin_exit(&timer_lock);
1251 pool_put(&ptimers_pool, pts);
1252 } else
1253 mutex_spin_exit(&timer_lock);
1256 static void
1257 itimerfree(struct ptimers *pts, int index)
1259 struct ptimer *pt;
1261 KASSERT(mutex_owned(&timer_lock));
1263 pt = pts->pts_timers[index];
1264 pts->pts_timers[index] = NULL;
1265 if (pt->pt_type == CLOCK_REALTIME)
1266 callout_halt(&pt->pt_ch, &timer_lock);
1267 else if (pt->pt_queued)
1268 TAILQ_REMOVE(&timer_queue, pt, pt_chain);
1269 mutex_spin_exit(&timer_lock);
1270 if (pt->pt_type == CLOCK_REALTIME)
1271 callout_destroy(&pt->pt_ch);
1272 pool_put(&ptimer_pool, pt);
1276 * Decrement an interval timer by a specified number
1277 * of nanoseconds, which must be less than a second,
1278 * i.e. < 1000000000. If the timer expires, then reload
1279 * it. In this case, carry over (nsec - old value) to
1280 * reduce the value reloaded into the timer so that
1281 * the timer does not drift. This routine assumes
1282 * that it is called in a context where the timers
1283 * on which it is operating cannot change in value.
1285 static int
1286 itimerdecr(struct ptimer *pt, int nsec)
1288 struct itimerspec *itp;
1290 KASSERT(mutex_owned(&timer_lock));
1292 itp = &pt->pt_time;
1293 if (itp->it_value.tv_nsec < nsec) {
1294 if (itp->it_value.tv_sec == 0) {
1295 /* expired, and already in next interval */
1296 nsec -= itp->it_value.tv_nsec;
1297 goto expire;
1299 itp->it_value.tv_nsec += 1000000000;
1300 itp->it_value.tv_sec--;
1302 itp->it_value.tv_nsec -= nsec;
1303 nsec = 0;
1304 if (timespecisset(&itp->it_value))
1305 return (1);
1306 /* expired, exactly at end of interval */
1307 expire:
1308 if (timespecisset(&itp->it_interval)) {
1309 itp->it_value = itp->it_interval;
1310 itp->it_value.tv_nsec -= nsec;
1311 if (itp->it_value.tv_nsec < 0) {
1312 itp->it_value.tv_nsec += 1000000000;
1313 itp->it_value.tv_sec--;
1315 timer_settime(pt);
1316 } else
1317 itp->it_value.tv_nsec = 0; /* sec is already 0 */
1318 return (0);
1321 static void
1322 itimerfire(struct ptimer *pt)
1325 KASSERT(mutex_owned(&timer_lock));
1328 * XXX Can overrun, but we don't do signal queueing yet, anyway.
1329 * XXX Relying on the clock interrupt is stupid.
1331 if ((pt->pt_ev.sigev_notify == SIGEV_SA && pt->pt_proc->p_sa == NULL) ||
1332 (pt->pt_ev.sigev_notify != SIGEV_SIGNAL &&
1333 pt->pt_ev.sigev_notify != SIGEV_SA) || pt->pt_queued)
1334 return;
1335 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain);
1336 pt->pt_queued = true;
1337 softint_schedule(timer_sih);
1340 void
1341 timer_tick(lwp_t *l, bool user)
1343 struct ptimers *pts;
1344 struct ptimer *pt;
1345 proc_t *p;
1347 p = l->l_proc;
1348 if (p->p_timers == NULL)
1349 return;
1351 mutex_spin_enter(&timer_lock);
1352 if ((pts = l->l_proc->p_timers) != NULL) {
1354 * Run current process's virtual and profile time, as needed.
1356 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL)
1357 if (itimerdecr(pt, tick * 1000) == 0)
1358 itimerfire(pt);
1359 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL)
1360 if (itimerdecr(pt, tick * 1000) == 0)
1361 itimerfire(pt);
1363 mutex_spin_exit(&timer_lock);
1366 #ifdef KERN_SA
1368 * timer_sa_intr:
1370 * SIGEV_SA handling for timer_intr(). We are called (and return)
1371 * with the timer lock held. We know that the process had SA enabled
1372 * when this timer was enqueued. As timer_intr() is a soft interrupt
1373 * handler, SA should still be enabled by the time we get here.
1375 static void
1376 timer_sa_intr(struct ptimer *pt, proc_t *p)
1378 unsigned int i;
1379 struct sadata *sa;
1380 struct sadata_vp *vp;
1382 /* Cause the process to generate an upcall when it returns. */
1383 if (!p->p_timerpend) {
1385 * XXX stop signals can be processed inside tsleep,
1386 * which can be inside sa_yield's inner loop, which
1387 * makes testing for sa_idle alone insuffucent to
1388 * determine if we really should call setrunnable.
1390 pt->pt_poverruns = pt->pt_overruns;
1391 pt->pt_overruns = 0;
1392 i = 1 << pt->pt_entry;
1393 p->p_timers->pts_fired = i;
1394 p->p_timerpend = 1;
1396 sa = p->p_sa;
1397 mutex_enter(&sa->sa_mutex);
1398 SLIST_FOREACH(vp, &sa->sa_vps, savp_next) {
1399 struct lwp *vp_lwp = vp->savp_lwp;
1400 lwp_lock(vp_lwp);
1401 lwp_need_userret(vp_lwp);
1402 if (vp_lwp->l_flag & LW_SA_IDLE) {
1403 vp_lwp->l_flag &= ~LW_SA_IDLE;
1404 lwp_unsleep(vp_lwp, true);
1405 break;
1407 lwp_unlock(vp_lwp);
1409 mutex_exit(&sa->sa_mutex);
1410 } else {
1411 i = 1 << pt->pt_entry;
1412 if ((p->p_timers->pts_fired & i) == 0) {
1413 pt->pt_poverruns = pt->pt_overruns;
1414 pt->pt_overruns = 0;
1415 p->p_timers->pts_fired |= i;
1416 } else
1417 pt->pt_overruns++;
1420 #endif /* KERN_SA */
1422 static void
1423 timer_intr(void *cookie)
1425 ksiginfo_t ksi;
1426 struct ptimer *pt;
1427 proc_t *p;
1429 mutex_enter(proc_lock);
1430 mutex_spin_enter(&timer_lock);
1431 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) {
1432 TAILQ_REMOVE(&timer_queue, pt, pt_chain);
1433 KASSERT(pt->pt_queued);
1434 pt->pt_queued = false;
1436 if (pt->pt_proc->p_timers == NULL) {
1437 /* Process is dying. */
1438 continue;
1440 p = pt->pt_proc;
1441 #ifdef KERN_SA
1442 if (pt->pt_ev.sigev_notify == SIGEV_SA) {
1443 timer_sa_intr(pt, p);
1444 continue;
1446 #endif /* KERN_SA */
1447 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL)
1448 continue;
1449 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) {
1450 pt->pt_overruns++;
1451 continue;
1454 KSI_INIT(&ksi);
1455 ksi.ksi_signo = pt->pt_ev.sigev_signo;
1456 ksi.ksi_code = SI_TIMER;
1457 ksi.ksi_value = pt->pt_ev.sigev_value;
1458 pt->pt_poverruns = pt->pt_overruns;
1459 pt->pt_overruns = 0;
1460 mutex_spin_exit(&timer_lock);
1461 kpsignal(p, &ksi, NULL);
1462 mutex_spin_enter(&timer_lock);
1464 mutex_spin_exit(&timer_lock);
1465 mutex_exit(proc_lock);
1469 * Check if the time will wrap if set to ts.
1471 * ts - timespec describing the new time
1472 * delta - the delta between the current time and ts
1474 bool
1475 time_wraps(struct timespec *ts, struct timespec *delta)
1479 * Don't allow the time to be set forward so far it
1480 * will wrap and become negative, thus allowing an
1481 * attacker to bypass the next check below. The
1482 * cutoff is 1 year before rollover occurs, so even
1483 * if the attacker uses adjtime(2) to move the time
1484 * past the cutoff, it will take a very long time
1485 * to get to the wrap point.
1487 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) ||
1488 (delta->tv_sec < 0 || delta->tv_nsec < 0))
1489 return true;
1491 return false;