4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
31 #include <sys/timer.h>
32 #include <sys/systm.h>
33 #include <sys/param.h>
35 #include <sys/debug.h>
36 #include <sys/policy.h>
37 #include <sys/port_impl.h>
38 #include <sys/port_kernel.h>
39 #include <sys/contract/process_impl.h>
41 static kmem_cache_t
*clock_timer_cache
;
42 static clock_backend_t
*clock_backend
[CLOCK_MAX
];
43 static int timer_port_callback(void *, int *, pid_t
, int, void *);
44 static void timer_close_port(void *, int, pid_t
, int);
46 #define CLOCK_BACKEND(clk) \
47 ((clk) < CLOCK_MAX && (clk) >= 0 ? clock_backend[(clk)] : NULL)
50 * Tunable to increase the maximum number of POSIX timers per-process. This
51 * may _only_ be tuned in /etc/system or by patching the kernel binary; it
52 * _cannot_ be tuned on a running system.
54 int timer_max
= _TIMER_MAX
;
57 * timer_lock() locks the specified interval timer. It doesn't look at the
58 * ITLK_REMOVE bit; it's up to callers to look at this if they need to
59 * care. p_lock must be held on entry; it may be dropped and reaquired,
60 * but timer_lock() will always return with p_lock held.
62 * Note that timer_create() doesn't call timer_lock(); it creates timers
63 * with the ITLK_LOCKED bit explictly set.
66 timer_lock(proc_t
*p
, itimer_t
*it
)
68 ASSERT(MUTEX_HELD(&p
->p_lock
));
70 while (it
->it_lock
& ITLK_LOCKED
) {
72 cv_wait(&it
->it_cv
, &p
->p_lock
);
76 it
->it_lock
|= ITLK_LOCKED
;
80 * timer_unlock() unlocks the specified interval timer, waking up any
81 * waiters. p_lock must be held on entry; it will not be dropped by
85 timer_unlock(proc_t
*p
, itimer_t
*it
)
87 ASSERT(MUTEX_HELD(&p
->p_lock
));
88 ASSERT(it
->it_lock
& ITLK_LOCKED
);
89 it
->it_lock
&= ~ITLK_LOCKED
;
90 cv_signal(&it
->it_cv
);
94 * timer_delete_locked() takes a proc pointer, timer ID and locked interval
95 * timer, and deletes the specified timer. It must be called with p_lock
96 * held, and cannot be called on a timer which already has ITLK_REMOVE set;
97 * the caller must check this. timer_delete_locked() will set the ITLK_REMOVE
98 * bit and will iteratively unlock and lock the interval timer until all
99 * blockers have seen the ITLK_REMOVE and cleared out. It will then zero
100 * out the specified entry in the p_itimer array, and call into the clock
101 * backend to complete the deletion.
103 * This function will always return with p_lock held.
106 timer_delete_locked(proc_t
*p
, timer_t tid
, itimer_t
*it
)
108 ASSERT(MUTEX_HELD(&p
->p_lock
));
109 ASSERT(!(it
->it_lock
& ITLK_REMOVE
));
110 ASSERT(it
->it_lock
& ITLK_LOCKED
);
112 it
->it_lock
|= ITLK_REMOVE
;
115 * If there are threads waiting to lock this timer, we'll unlock
116 * the timer, and block on the cv. Threads blocking our removal will
117 * have the opportunity to run; when they see the ITLK_REMOVE flag
118 * set, they will immediately unlock the timer.
120 while (it
->it_blockers
) {
122 cv_wait(&it
->it_cv
, &p
->p_lock
);
126 ASSERT(p
->p_itimer
[tid
] == it
);
127 p
->p_itimer
[tid
] = NULL
;
130 * No one is blocked on this timer, and no one will be (we've set
131 * p_itimer[tid] to be NULL; no one can find it). Now we call into
132 * the clock backend to delete the timer; it is up to the backend to
133 * guarantee that timer_fire() has completed (and will never again
134 * be called) for this timer.
136 mutex_exit(&p
->p_lock
);
138 it
->it_backend
->clk_timer_delete(it
);
141 mutex_enter(&it
->it_mutex
);
144 /* dissociate timer from the event port */
145 (void) port_dissociate_ksource(it
->it_portfd
,
146 PORT_SOURCE_TIMER
, (port_source_t
*)it
->it_portsrc
);
147 pev
= (port_kevent_t
*)it
->it_portev
;
148 it
->it_portev
= NULL
;
149 it
->it_flags
&= ~IT_PORT
;
151 mutex_exit(&it
->it_mutex
);
152 (void) port_remove_done_event(pev
);
153 port_free_event(pev
);
155 mutex_exit(&it
->it_mutex
);
159 mutex_enter(&p
->p_lock
);
162 * We need to be careful freeing the sigqueue for this timer;
163 * if a signal is pending, the sigqueue needs to be freed
164 * synchronously in siginfofree(). The need to free the sigqueue
165 * in siginfofree() is indicated by setting sq_func to NULL.
167 if (it
->it_pending
> 0) {
168 it
->it_sigq
->sq_func
= NULL
;
170 kmem_free(it
->it_sigq
, sizeof (sigqueue_t
));
173 ASSERT(it
->it_blockers
== 0);
174 kmem_cache_free(clock_timer_cache
, it
);
178 * timer_grab() and its companion routine, timer_release(), are wrappers
179 * around timer_lock()/_unlock() which allow the timer_*(3R) routines to
180 * (a) share error handling code and (b) not grab p_lock themselves. Routines
181 * which are called with p_lock held (e.g. timer_lwpbind(), timer_lwpexit())
182 * must call timer_lock()/_unlock() explictly.
184 * timer_grab() takes a proc and a timer ID, and returns a pointer to a
185 * locked interval timer. p_lock must _not_ be held on entry; timer_grab()
186 * may acquire p_lock, but will always return with p_lock dropped.
188 * If timer_grab() fails, it will return NULL. timer_grab() will fail if
189 * one or more of the following is true:
191 * (a) The specified timer ID is out of range.
193 * (b) The specified timer ID does not correspond to a timer ID returned
194 * from timer_create(3R).
196 * (c) The specified timer ID is currently being removed.
200 timer_grab(proc_t
*p
, timer_t tid
)
204 if (tid
>= timer_max
|| tid
< 0)
207 mutex_enter(&p
->p_lock
);
209 if ((itp
= p
->p_itimer
) == NULL
|| (it
= itp
[tid
]) == NULL
) {
210 mutex_exit(&p
->p_lock
);
216 if (it
->it_lock
& ITLK_REMOVE
) {
218 * Someone is removing this timer; it will soon be invalid.
221 mutex_exit(&p
->p_lock
);
225 mutex_exit(&p
->p_lock
);
231 * timer_release() releases a timer acquired with timer_grab(). p_lock
232 * should not be held on entry; timer_release() will acquire p_lock but
233 * will drop it before returning.
236 timer_release(proc_t
*p
, itimer_t
*it
)
238 mutex_enter(&p
->p_lock
);
240 mutex_exit(&p
->p_lock
);
244 * timer_delete_grabbed() deletes a timer acquired with timer_grab().
245 * p_lock should not be held on entry; timer_delete_grabbed() will acquire
246 * p_lock, but will drop it before returning.
249 timer_delete_grabbed(proc_t
*p
, timer_t tid
, itimer_t
*it
)
251 mutex_enter(&p
->p_lock
);
252 timer_delete_locked(p
, tid
, it
);
253 mutex_exit(&p
->p_lock
);
259 clock_timer_cache
= kmem_cache_create("timer_cache",
260 sizeof (itimer_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
264 clock_add_backend(clockid_t clock
, clock_backend_t
*backend
)
266 ASSERT(clock
>= 0 && clock
< CLOCK_MAX
);
267 ASSERT(clock_backend
[clock
] == NULL
);
269 clock_backend
[clock
] = backend
;
273 clock_get_backend(clockid_t clock
)
275 if (clock
< 0 || clock
>= CLOCK_MAX
)
278 return (clock_backend
[clock
]);
282 clock_settime(clockid_t clock
, timespec_t
*tp
)
285 clock_backend_t
*backend
;
288 if ((backend
= CLOCK_BACKEND(clock
)) == NULL
)
289 return (set_errno(EINVAL
));
291 if (secpolicy_settime(CRED()) != 0)
292 return (set_errno(EPERM
));
294 if (get_udatamodel() == DATAMODEL_NATIVE
) {
295 if (copyin(tp
, &t
, sizeof (timespec_t
)) != 0)
296 return (set_errno(EFAULT
));
300 if (copyin(tp
, &t32
, sizeof (timespec32_t
)) != 0)
301 return (set_errno(EFAULT
));
303 TIMESPEC32_TO_TIMESPEC(&t
, &t32
);
306 if (itimerspecfix(&t
))
307 return (set_errno(EINVAL
));
309 error
= backend
->clk_clock_settime(&t
);
312 return (set_errno(error
));
318 clock_gettime(clockid_t clock
, timespec_t
*tp
)
321 clock_backend_t
*backend
;
324 if ((backend
= CLOCK_BACKEND(clock
)) == NULL
)
325 return (set_errno(EINVAL
));
327 error
= backend
->clk_clock_gettime(&t
);
330 return (set_errno(error
));
332 if (get_udatamodel() == DATAMODEL_NATIVE
) {
333 if (copyout(&t
, tp
, sizeof (timespec_t
)) != 0)
334 return (set_errno(EFAULT
));
338 if (TIMESPEC_OVERFLOW(&t
))
339 return (set_errno(EOVERFLOW
));
340 TIMESPEC_TO_TIMESPEC32(&t32
, &t
);
342 if (copyout(&t32
, tp
, sizeof (timespec32_t
)) != 0)
343 return (set_errno(EFAULT
));
350 clock_getres(clockid_t clock
, timespec_t
*tp
)
353 clock_backend_t
*backend
;
357 * Strangely, the standard defines clock_getres() with a NULL tp
358 * to do nothing (regardless of the validity of the specified
359 * clock_id). Go figure.
364 if ((backend
= CLOCK_BACKEND(clock
)) == NULL
)
365 return (set_errno(EINVAL
));
367 error
= backend
->clk_clock_getres(&t
);
370 return (set_errno(error
));
372 if (get_udatamodel() == DATAMODEL_NATIVE
) {
373 if (copyout(&t
, tp
, sizeof (timespec_t
)) != 0)
374 return (set_errno(EFAULT
));
378 if (TIMESPEC_OVERFLOW(&t
))
379 return (set_errno(EOVERFLOW
));
380 TIMESPEC_TO_TIMESPEC32(&t32
, &t
);
382 if (copyout(&t32
, tp
, sizeof (timespec32_t
)) != 0)
383 return (set_errno(EFAULT
));
390 timer_signal(sigqueue_t
*sigq
)
392 itimer_t
*it
= (itimer_t
*)sigq
->sq_backptr
;
395 * There are some conditions during a fork or an exit when we can
396 * call siginfofree() without p_lock held. To prevent a race
397 * between timer_signal() and timer_fire() with regard to it_pending,
398 * we therefore acquire it_mutex in both paths.
400 mutex_enter(&it
->it_mutex
);
401 ASSERT(it
->it_pending
> 0);
402 it
->it_overrun
= it
->it_pending
- 1;
404 mutex_exit(&it
->it_mutex
);
408 * This routine is called from the clock backend.
411 timer_fire(itimer_t
*it
)
416 if (it
->it_flags
& IT_SIGNAL
) {
418 * See the comment in timer_signal() for why it is not
419 * sufficient to only grab p_lock here. Because p_lock can be
420 * held on entry to timer_signal(), the lock ordering is
421 * necessarily p_lock before it_mutex.
426 mutex_enter(&p
->p_lock
);
430 * If a timer was ever programmed to send events to a port,
431 * the IT_PORT flag will remain set until:
432 * a) the timer is deleted (see timer_delete_locked()) or
433 * b) the port is being closed (see timer_close_port()).
434 * Both cases are synchronized with the it_mutex.
435 * We don't need to use the p_lock because it is only
436 * required in the IT_SIGNAL case.
437 * If IT_PORT was set and the port is being closed then
438 * the timer notification is set to NONE. In such a case
439 * the timer itself and the it_pending counter remain active
440 * until the application deletes the counter or the process
445 mutex_enter(&it
->it_mutex
);
447 if (it
->it_pending
> 0) {
448 if (it
->it_pending
< INT_MAX
)
450 mutex_exit(&it
->it_mutex
);
452 if (it
->it_flags
& IT_PORT
) {
454 port_send_event((port_kevent_t
*)it
->it_portev
);
455 mutex_exit(&it
->it_mutex
);
456 } else if (it
->it_flags
& IT_SIGNAL
) {
458 mutex_exit(&it
->it_mutex
);
459 sigaddqa(p
, NULL
, it
->it_sigq
);
461 mutex_exit(&it
->it_mutex
);
466 mutex_exit(&p
->p_lock
);
470 timer_create(clockid_t clock
, struct sigevent
*evp
, timer_t
*tid
)
474 clock_backend_t
*backend
;
480 port_notify_t tim_pnevp
;
481 port_kevent_t
*pkevp
= NULL
;
483 if ((backend
= CLOCK_BACKEND(clock
)) == NULL
)
484 return (set_errno(EINVAL
));
488 * short copyin() for binary compatibility
489 * fetch oldsigevent to determine how much to copy in.
491 if (get_udatamodel() == DATAMODEL_NATIVE
) {
492 if (copyin(evp
, &ev
, sizeof (struct oldsigevent
)))
493 return (set_errno(EFAULT
));
495 if (ev
.sigev_notify
== SIGEV_PORT
||
496 ev
.sigev_notify
== SIGEV_THREAD
) {
497 if (copyin(ev
.sigev_value
.sival_ptr
, &tim_pnevp
,
498 sizeof (port_notify_t
)))
499 return (set_errno(EFAULT
));
501 #ifdef _SYSCALL32_IMPL
503 struct sigevent32 ev32
;
504 port_notify32_t tim_pnevp32
;
506 if (copyin(evp
, &ev32
, sizeof (struct oldsigevent32
)))
507 return (set_errno(EFAULT
));
508 ev
.sigev_notify
= ev32
.sigev_notify
;
509 ev
.sigev_signo
= ev32
.sigev_signo
;
511 * See comment in sigqueue32() on handling of 32-bit
512 * sigvals in a 64-bit kernel.
514 ev
.sigev_value
.sival_int
= ev32
.sigev_value
.sival_int
;
515 if (ev
.sigev_notify
== SIGEV_PORT
||
516 ev
.sigev_notify
== SIGEV_THREAD
) {
517 if (copyin((void *)(uintptr_t)
518 ev32
.sigev_value
.sival_ptr
,
519 (void *)&tim_pnevp32
,
520 sizeof (port_notify32_t
)))
521 return (set_errno(EFAULT
));
522 tim_pnevp
.portnfy_port
=
523 tim_pnevp32
.portnfy_port
;
524 tim_pnevp
.portnfy_user
=
525 (void *)(uintptr_t)tim_pnevp32
.portnfy_user
;
529 switch (ev
.sigev_notify
) {
533 if (ev
.sigev_signo
< 1 || ev
.sigev_signo
>= NSIG
)
534 return (set_errno(EINVAL
));
540 return (set_errno(EINVAL
));
544 * Use the clock's default sigevent (this is a structure copy).
546 ev
= backend
->clk_default
;
550 * We'll allocate our timer and sigqueue now, before we grab p_lock.
551 * If we can't find an empty slot, we'll free them before returning.
553 it
= kmem_cache_alloc(clock_timer_cache
, KM_SLEEP
);
554 bzero(it
, sizeof (itimer_t
));
555 mutex_init(&it
->it_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
556 sigq
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
558 mutex_enter(&p
->p_lock
);
561 * If this is this process' first timer, we need to attempt to allocate
562 * an array of timerstr_t pointers. We drop p_lock to perform the
563 * allocation; if we return to discover that p_itimer is non-NULL,
564 * we will free our allocation and drive on.
566 if ((itp
= p
->p_itimer
) == NULL
) {
567 mutex_exit(&p
->p_lock
);
568 itp
= kmem_zalloc(timer_max
* sizeof (itimer_t
*), KM_SLEEP
);
569 mutex_enter(&p
->p_lock
);
571 if (p
->p_itimer
== NULL
)
574 kmem_free(itp
, timer_max
* sizeof (itimer_t
*));
579 for (i
= 0; i
< timer_max
&& itp
[i
] != NULL
; i
++)
582 if (i
== timer_max
) {
584 * We couldn't find a slot. Drop p_lock, free the preallocated
585 * timer and sigqueue, and return an error.
587 mutex_exit(&p
->p_lock
);
588 kmem_cache_free(clock_timer_cache
, it
);
589 kmem_free(sigq
, sizeof (sigqueue_t
));
591 return (set_errno(EAGAIN
));
594 ASSERT(i
< timer_max
&& itp
[i
] == NULL
);
597 * If we develop other notification mechanisms, this will need
598 * to call into (yet another) backend.
600 sigq
->sq_info
.si_signo
= ev
.sigev_signo
;
602 sigq
->sq_info
.si_value
.sival_int
= i
;
604 sigq
->sq_info
.si_value
= ev
.sigev_value
;
605 sigq
->sq_info
.si_code
= SI_TIMER
;
606 sigq
->sq_info
.si_pid
= p
->p_pid
;
607 sigq
->sq_info
.si_ctid
= PRCTID(p
);
608 sigq
->sq_info
.si_zoneid
= getzoneid();
609 sigq
->sq_info
.si_uid
= crgetruid(cr
);
610 sigq
->sq_func
= timer_signal
;
611 sigq
->sq_next
= NULL
;
612 sigq
->sq_backptr
= it
;
614 it
->it_backend
= backend
;
615 it
->it_lock
= ITLK_LOCKED
;
619 if (ev
.sigev_notify
== SIGEV_THREAD
||
620 ev
.sigev_notify
== SIGEV_PORT
) {
624 * This timer is programmed to use event port notification when
626 * - allocate a port event structure and prepare it to be sent
627 * to the port as soon as the timer fires.
628 * - when the timer fires :
629 * - if event structure was already sent to the port then this
630 * is a timer fire overflow => increment overflow counter.
631 * - otherwise send pre-allocated event structure to the port.
632 * - the events field of the port_event_t structure counts the
633 * number of timer fired events.
634 * - The event structured is allocated using the
635 * PORT_ALLOC_CACHED flag.
636 * This flag indicates that the timer itself will manage and
637 * free the event structure when required.
640 it
->it_flags
|= IT_PORT
;
641 port
= tim_pnevp
.portnfy_port
;
643 /* associate timer as event source with the port */
644 error
= port_associate_ksource(port
, PORT_SOURCE_TIMER
,
645 (port_source_t
**)&it
->it_portsrc
, timer_close_port
,
648 itp
[i
] = NULL
; /* clear slot */
649 mutex_exit(&p
->p_lock
);
650 kmem_cache_free(clock_timer_cache
, it
);
651 kmem_free(sigq
, sizeof (sigqueue_t
));
652 return (set_errno(error
));
655 /* allocate an event structure/slot */
656 error
= port_alloc_event(port
, PORT_ALLOC_SCACHED
,
657 PORT_SOURCE_TIMER
, &pkevp
);
659 (void) port_dissociate_ksource(port
, PORT_SOURCE_TIMER
,
660 (port_source_t
*)it
->it_portsrc
);
661 itp
[i
] = NULL
; /* clear slot */
662 mutex_exit(&p
->p_lock
);
663 kmem_cache_free(clock_timer_cache
, it
);
664 kmem_free(sigq
, sizeof (sigqueue_t
));
665 return (set_errno(error
));
668 /* initialize event data */
669 port_init_event(pkevp
, i
, tim_pnevp
.portnfy_user
,
670 timer_port_callback
, it
);
671 it
->it_portev
= pkevp
;
672 it
->it_portfd
= port
;
674 if (ev
.sigev_notify
== SIGEV_SIGNAL
)
675 it
->it_flags
|= IT_SIGNAL
;
678 mutex_exit(&p
->p_lock
);
681 * Call on the backend to verify the event argument (or return
682 * EINVAL if this clock type does not support timers).
684 if ((error
= backend
->clk_timer_create(it
, timer_fire
)) != 0)
687 it
->it_lwp
= ttolwp(curthread
);
690 if (copyout(&i
, tid
, sizeof (timer_t
)) != 0) {
696 * If we're here, then we have successfully created the timer; we
697 * just need to release the timer and return.
699 timer_release(p
, it
);
705 * If we're here, an error has occurred late in the timer creation
706 * process. We need to regrab p_lock, and delete the incipient timer.
707 * Since we never unlocked the timer (it was born locked), it's
708 * impossible for a removal to be pending.
710 ASSERT(!(it
->it_lock
& ITLK_REMOVE
));
711 timer_delete_grabbed(p
, i
, it
);
713 return (set_errno(error
));
717 timer_gettime(timer_t tid
, itimerspec_t
*val
)
724 if ((it
= timer_grab(p
, tid
)) == NULL
)
725 return (set_errno(EINVAL
));
727 error
= it
->it_backend
->clk_timer_gettime(it
, &when
);
729 timer_release(p
, it
);
732 if (get_udatamodel() == DATAMODEL_NATIVE
) {
733 if (copyout(&when
, val
, sizeof (itimerspec_t
)))
736 if (ITIMERSPEC_OVERFLOW(&when
))
741 ITIMERSPEC_TO_ITIMERSPEC32(&w32
, &when
)
742 if (copyout(&w32
, val
, sizeof (itimerspec32_t
)))
748 return (error
? set_errno(error
) : 0);
752 timer_settime(timer_t tid
, int flags
, itimerspec_t
*val
, itimerspec_t
*oval
)
760 if ((error
= timer_gettime(tid
, oval
)) != 0)
764 if (get_udatamodel() == DATAMODEL_NATIVE
) {
765 if (copyin(val
, &when
, sizeof (itimerspec_t
)))
766 return (set_errno(EFAULT
));
770 if (copyin(val
, &w32
, sizeof (itimerspec32_t
)))
771 return (set_errno(EFAULT
));
773 ITIMERSPEC32_TO_ITIMERSPEC(&when
, &w32
);
776 if (itimerspecfix(&when
.it_value
) ||
777 (itimerspecfix(&when
.it_interval
) &&
778 timerspecisset(&when
.it_value
))) {
779 return (set_errno(EINVAL
));
782 if ((it
= timer_grab(p
, tid
)) == NULL
)
783 return (set_errno(EINVAL
));
785 error
= it
->it_backend
->clk_timer_settime(it
, flags
, &when
);
787 timer_release(p
, it
);
789 return (error
? set_errno(error
) : 0);
793 timer_delete(timer_t tid
)
798 if ((it
= timer_grab(p
, tid
)) == NULL
)
799 return (set_errno(EINVAL
));
801 timer_delete_grabbed(p
, tid
, it
);
807 timer_getoverrun(timer_t tid
)
813 if ((it
= timer_grab(p
, tid
)) == NULL
)
814 return (set_errno(EINVAL
));
817 * The it_overrun field is protected by p_lock; we need to acquire
818 * it before looking at the value.
820 mutex_enter(&p
->p_lock
);
821 overrun
= it
->it_overrun
;
822 mutex_exit(&p
->p_lock
);
824 timer_release(p
, it
);
830 * Entered/exited with p_lock held, but will repeatedly drop and regrab p_lock.
837 klwp_t
*lwp
= ttolwp(curthread
);
840 ASSERT(MUTEX_HELD(&p
->p_lock
));
842 if ((itp
= p
->p_itimer
) == NULL
)
845 for (i
= 0; i
< timer_max
; i
++) {
846 if ((it
= itp
[i
]) == NULL
)
851 if ((it
->it_lock
& ITLK_REMOVE
) || it
->it_lwp
!= lwp
) {
853 * This timer is either being removed or it isn't
854 * associated with this lwp.
861 * The LWP that created this timer is going away. To the user,
862 * our behavior here is explicitly undefined. We will simply
863 * null out the it_lwp field; if the LWP was bound to a CPU,
864 * the cyclic will stay bound to that CPU until the process
873 * Called to notify of an LWP binding change. Entered/exited with p_lock
874 * held, but will repeatedly drop and regrab p_lock.
881 klwp_t
*lwp
= ttolwp(curthread
);
884 ASSERT(MUTEX_HELD(&p
->p_lock
));
886 if ((itp
= p
->p_itimer
) == NULL
)
889 for (i
= 0; i
< timer_max
; i
++) {
890 if ((it
= itp
[i
]) == NULL
)
895 if (!(it
->it_lock
& ITLK_REMOVE
) && it
->it_lwp
== lwp
) {
897 * Drop p_lock and jump into the backend.
899 mutex_exit(&p
->p_lock
);
900 it
->it_backend
->clk_timer_lwpbind(it
);
901 mutex_enter(&p
->p_lock
);
909 * This function should only be called if p_itimer is non-NULL.
917 ASSERT(p
->p_itimer
!= NULL
);
919 for (i
= 0; i
< timer_max
; i
++)
920 (void) timer_delete(i
);
922 kmem_free(p
->p_itimer
, timer_max
* sizeof (itimer_t
*));
927 * timer_port_callback() is a callback function which is associated with the
928 * timer event and is activated just before the event is delivered to the user.
929 * The timer uses this function to update/set the overflow counter and
930 * to reenable the use of the event structure.
935 timer_port_callback(void *arg
, int *events
, pid_t pid
, int flag
, void *evp
)
939 mutex_enter(&it
->it_mutex
);
940 if (curproc
!= it
->it_proc
) {
941 /* can not deliver timer events to another proc */
942 mutex_exit(&it
->it_mutex
);
945 *events
= it
->it_pending
; /* 1 = 1 event, >1 # of overflows */
946 it
->it_pending
= 0; /* reinit overflow counter */
948 * This function can also be activated when the port is being closed
949 * and a timer event is already submitted to the port.
950 * In such a case the event port framework will use the
951 * close-callback function to notify the events sources.
952 * The timer close-callback function is timer_close_port() which
953 * will free all allocated resources (including the allocated
954 * port event structure).
955 * For that reason we don't need to check the value of flag here.
957 mutex_exit(&it
->it_mutex
);
962 * port is being closed ... free all allocated port event structures
963 * The delivered arg currently correspond to the first timer associated with
964 * the port and it is not useable in this case.
965 * We have to scan the list of activated timers in the current proc and
966 * compare them with the delivered port id.
971 timer_close_port(void *arg
, int port
, pid_t pid
, int lastclose
)
977 for (tid
= 0; tid
< timer_max
; tid
++) {
978 if ((it
= timer_grab(p
, tid
)) == NULL
)
981 mutex_enter(&it
->it_mutex
);
982 if (it
->it_portfd
== port
) {
984 pev
= (port_kevent_t
*)it
->it_portev
;
985 it
->it_portev
= NULL
;
986 it
->it_flags
&= ~IT_PORT
;
987 mutex_exit(&it
->it_mutex
);
988 (void) port_remove_done_event(pev
);
989 port_free_event(pev
);
991 mutex_exit(&it
->it_mutex
);
994 timer_release(p
, it
);