1 // SPDX-License-Identifier: GPL-2.0+
3 * 2002-10-15 Posix Clocks & timers
4 * by George Anzinger george@mvista.com
5 * Copyright (C) 2002 2003 by MontaVista Software.
7 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
8 * Copyright (C) 2004 Boris Hu
10 * These are all the functions necessary to implement POSIX clocks & timers
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/time.h>
16 #include <linux/mutex.h>
17 #include <linux/sched/task.h>
19 #include <linux/uaccess.h>
20 #include <linux/list.h>
21 #include <linux/init.h>
22 #include <linux/compiler.h>
23 #include <linux/hash.h>
24 #include <linux/posix-clock.h>
25 #include <linux/posix-timers.h>
26 #include <linux/syscalls.h>
27 #include <linux/wait.h>
28 #include <linux/workqueue.h>
29 #include <linux/export.h>
30 #include <linux/hashtable.h>
31 #include <linux/compat.h>
32 #include <linux/nospec.h>
33 #include <linux/time_namespace.h>
35 #include "timekeeping.h"
36 #include "posix-timers.h"
38 static struct kmem_cache
*posix_timers_cache
;
41 * Timers are managed in a hash table for lockless lookup. The hash key is
42 * constructed from current::signal and the timer ID and the timer is
43 * matched against current::signal and the timer ID when walking the hash
46 * This allows checkpoint/restore to reconstruct the exact timer IDs for
49 static DEFINE_HASHTABLE(posix_timers_hashtable
, 9);
50 static DEFINE_SPINLOCK(hash_lock
);
52 static const struct k_clock
* const posix_clocks
[];
53 static const struct k_clock
*clockid_to_kclock(const clockid_t id
);
54 static const struct k_clock clock_realtime
, clock_monotonic
;
56 /* SIGEV_THREAD_ID cannot share a bit with the other SIGEV values. */
57 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
58 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
59 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
62 static struct k_itimer
*__lock_timer(timer_t timer_id
, unsigned long *flags
);
64 #define lock_timer(tid, flags) \
65 ({ struct k_itimer *__timr; \
66 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
70 static int hash(struct signal_struct
*sig
, unsigned int nr
)
72 return hash_32(hash32_ptr(sig
) ^ nr
, HASH_BITS(posix_timers_hashtable
));
75 static struct k_itimer
*__posix_timers_find(struct hlist_head
*head
,
76 struct signal_struct
*sig
,
79 struct k_itimer
*timer
;
81 hlist_for_each_entry_rcu(timer
, head
, t_hash
, lockdep_is_held(&hash_lock
)) {
82 /* timer->it_signal can be set concurrently */
83 if ((READ_ONCE(timer
->it_signal
) == sig
) && (timer
->it_id
== id
))
89 static struct k_itimer
*posix_timer_by_id(timer_t id
)
91 struct signal_struct
*sig
= current
->signal
;
92 struct hlist_head
*head
= &posix_timers_hashtable
[hash(sig
, id
)];
94 return __posix_timers_find(head
, sig
, id
);
97 static int posix_timer_add(struct k_itimer
*timer
)
99 struct signal_struct
*sig
= current
->signal
;
100 struct hlist_head
*head
;
101 unsigned int cnt
, id
;
104 * FIXME: Replace this by a per signal struct xarray once there is
105 * a plan to handle the resulting CRIU regression gracefully.
107 for (cnt
= 0; cnt
<= INT_MAX
; cnt
++) {
108 spin_lock(&hash_lock
);
109 id
= sig
->next_posix_timer_id
;
111 /* Write the next ID back. Clamp it to the positive space */
112 sig
->next_posix_timer_id
= (id
+ 1) & INT_MAX
;
114 head
= &posix_timers_hashtable
[hash(sig
, id
)];
115 if (!__posix_timers_find(head
, sig
, id
)) {
116 hlist_add_head_rcu(&timer
->t_hash
, head
);
117 spin_unlock(&hash_lock
);
120 spin_unlock(&hash_lock
);
122 /* POSIX return code when no timer ID could be allocated */
126 static inline void unlock_timer(struct k_itimer
*timr
, unsigned long flags
)
128 spin_unlock_irqrestore(&timr
->it_lock
, flags
);
131 static int posix_get_realtime_timespec(clockid_t which_clock
, struct timespec64
*tp
)
133 ktime_get_real_ts64(tp
);
137 static ktime_t
posix_get_realtime_ktime(clockid_t which_clock
)
139 return ktime_get_real();
142 static int posix_clock_realtime_set(const clockid_t which_clock
,
143 const struct timespec64
*tp
)
145 return do_sys_settimeofday64(tp
, NULL
);
148 static int posix_clock_realtime_adj(const clockid_t which_clock
,
149 struct __kernel_timex
*t
)
151 return do_adjtimex(t
);
154 static int posix_get_monotonic_timespec(clockid_t which_clock
, struct timespec64
*tp
)
157 timens_add_monotonic(tp
);
161 static ktime_t
posix_get_monotonic_ktime(clockid_t which_clock
)
166 static int posix_get_monotonic_raw(clockid_t which_clock
, struct timespec64
*tp
)
168 ktime_get_raw_ts64(tp
);
169 timens_add_monotonic(tp
);
173 static int posix_get_realtime_coarse(clockid_t which_clock
, struct timespec64
*tp
)
175 ktime_get_coarse_real_ts64(tp
);
179 static int posix_get_monotonic_coarse(clockid_t which_clock
,
180 struct timespec64
*tp
)
182 ktime_get_coarse_ts64(tp
);
183 timens_add_monotonic(tp
);
187 static int posix_get_coarse_res(const clockid_t which_clock
, struct timespec64
*tp
)
189 *tp
= ktime_to_timespec64(KTIME_LOW_RES
);
193 static int posix_get_boottime_timespec(const clockid_t which_clock
, struct timespec64
*tp
)
195 ktime_get_boottime_ts64(tp
);
196 timens_add_boottime(tp
);
200 static ktime_t
posix_get_boottime_ktime(const clockid_t which_clock
)
202 return ktime_get_boottime();
205 static int posix_get_tai_timespec(clockid_t which_clock
, struct timespec64
*tp
)
207 ktime_get_clocktai_ts64(tp
);
211 static ktime_t
posix_get_tai_ktime(clockid_t which_clock
)
213 return ktime_get_clocktai();
216 static int posix_get_hrtimer_res(clockid_t which_clock
, struct timespec64
*tp
)
219 tp
->tv_nsec
= hrtimer_resolution
;
223 static __init
int init_posix_timers(void)
225 posix_timers_cache
= kmem_cache_create("posix_timers_cache",
226 sizeof(struct k_itimer
), 0,
227 SLAB_PANIC
| SLAB_ACCOUNT
, NULL
);
230 __initcall(init_posix_timers
);
233 * The siginfo si_overrun field and the return value of timer_getoverrun(2)
234 * are of type int. Clamp the overrun value to INT_MAX
236 static inline int timer_overrun_to_int(struct k_itimer
*timr
)
238 if (timr
->it_overrun_last
> (s64
)INT_MAX
)
241 return (int)timr
->it_overrun_last
;
244 static void common_hrtimer_rearm(struct k_itimer
*timr
)
246 struct hrtimer
*timer
= &timr
->it
.real
.timer
;
248 timr
->it_overrun
+= hrtimer_forward(timer
, timer
->base
->get_time(),
250 hrtimer_restart(timer
);
253 static bool __posixtimer_deliver_signal(struct kernel_siginfo
*info
, struct k_itimer
*timr
)
255 guard(spinlock
)(&timr
->it_lock
);
258 * Check if the timer is still alive or whether it got modified
259 * since the signal was queued. In either case, don't rearm and
262 if (timr
->it_signal_seq
!= timr
->it_sigqueue_seq
|| WARN_ON_ONCE(!timr
->it_signal
))
265 if (!timr
->it_interval
|| WARN_ON_ONCE(timr
->it_status
!= POSIX_TIMER_REQUEUE_PENDING
))
268 timr
->kclock
->timer_rearm(timr
);
269 timr
->it_status
= POSIX_TIMER_ARMED
;
270 timr
->it_overrun_last
= timr
->it_overrun
;
271 timr
->it_overrun
= -1LL;
272 ++timr
->it_signal_seq
;
273 info
->si_overrun
= timer_overrun_to_int(timr
);
278 * This function is called from the signal delivery code. It decides
279 * whether the signal should be dropped and rearms interval timers. The
280 * timer can be unconditionally accessed as there is a reference held on
283 bool posixtimer_deliver_signal(struct kernel_siginfo
*info
, struct sigqueue
*timer_sigq
)
285 struct k_itimer
*timr
= container_of(timer_sigq
, struct k_itimer
, sigq
);
289 * Release siglock to ensure proper locking order versus
290 * timr::it_lock. Keep interrupts disabled.
292 spin_unlock(¤t
->sighand
->siglock
);
294 ret
= __posixtimer_deliver_signal(info
, timr
);
296 /* Drop the reference which was acquired when the signal was queued */
297 posixtimer_putref(timr
);
299 spin_lock(¤t
->sighand
->siglock
);
303 void posix_timer_queue_signal(struct k_itimer
*timr
)
305 lockdep_assert_held(&timr
->it_lock
);
307 timr
->it_status
= timr
->it_interval
? POSIX_TIMER_REQUEUE_PENDING
: POSIX_TIMER_DISARMED
;
308 posixtimer_send_sigqueue(timr
);
312 * This function gets called when a POSIX.1b interval timer expires from
313 * the HRTIMER interrupt (soft interrupt on RT kernels).
315 * Handles CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME and CLOCK_TAI
318 static enum hrtimer_restart
posix_timer_fn(struct hrtimer
*timer
)
320 struct k_itimer
*timr
= container_of(timer
, struct k_itimer
, it
.real
.timer
);
322 guard(spinlock_irqsave
)(&timr
->it_lock
);
323 posix_timer_queue_signal(timr
);
324 return HRTIMER_NORESTART
;
327 static struct pid
*good_sigevent(sigevent_t
* event
)
329 struct pid
*pid
= task_tgid(current
);
330 struct task_struct
*rtn
;
332 switch (event
->sigev_notify
) {
333 case SIGEV_SIGNAL
| SIGEV_THREAD_ID
:
334 pid
= find_vpid(event
->sigev_notify_thread_id
);
335 rtn
= pid_task(pid
, PIDTYPE_PID
);
336 if (!rtn
|| !same_thread_group(rtn
, current
))
341 if (event
->sigev_signo
<= 0 || event
->sigev_signo
> SIGRTMAX
)
351 static struct k_itimer
*alloc_posix_timer(void)
353 struct k_itimer
*tmr
= kmem_cache_zalloc(posix_timers_cache
, GFP_KERNEL
);
358 if (unlikely(!posixtimer_init_sigqueue(&tmr
->sigq
))) {
359 kmem_cache_free(posix_timers_cache
, tmr
);
362 rcuref_init(&tmr
->rcuref
, 1);
366 void posixtimer_free_timer(struct k_itimer
*tmr
)
368 put_pid(tmr
->it_pid
);
369 if (tmr
->sigq
.ucounts
)
370 dec_rlimit_put_ucounts(tmr
->sigq
.ucounts
, UCOUNT_RLIMIT_SIGPENDING
);
374 static void posix_timer_unhash_and_free(struct k_itimer
*tmr
)
376 spin_lock(&hash_lock
);
377 hlist_del_rcu(&tmr
->t_hash
);
378 spin_unlock(&hash_lock
);
379 posixtimer_putref(tmr
);
382 static int common_timer_create(struct k_itimer
*new_timer
)
384 hrtimer_init(&new_timer
->it
.real
.timer
, new_timer
->it_clock
, 0);
388 /* Create a POSIX.1b interval timer. */
389 static int do_timer_create(clockid_t which_clock
, struct sigevent
*event
,
390 timer_t __user
*created_timer_id
)
392 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
393 struct k_itimer
*new_timer
;
394 int error
, new_timer_id
;
398 if (!kc
->timer_create
)
401 new_timer
= alloc_posix_timer();
402 if (unlikely(!new_timer
))
405 spin_lock_init(&new_timer
->it_lock
);
408 * Add the timer to the hash table. The timer is not yet valid
409 * because new_timer::it_signal is still NULL. The timer id is also
410 * not yet visible to user space.
412 new_timer_id
= posix_timer_add(new_timer
);
413 if (new_timer_id
< 0) {
414 posixtimer_free_timer(new_timer
);
418 new_timer
->it_id
= (timer_t
) new_timer_id
;
419 new_timer
->it_clock
= which_clock
;
420 new_timer
->kclock
= kc
;
421 new_timer
->it_overrun
= -1LL;
425 new_timer
->it_pid
= get_pid(good_sigevent(event
));
427 if (!new_timer
->it_pid
) {
431 new_timer
->it_sigev_notify
= event
->sigev_notify
;
432 new_timer
->sigq
.info
.si_signo
= event
->sigev_signo
;
433 new_timer
->sigq
.info
.si_value
= event
->sigev_value
;
435 new_timer
->it_sigev_notify
= SIGEV_SIGNAL
;
436 new_timer
->sigq
.info
.si_signo
= SIGALRM
;
437 memset(&new_timer
->sigq
.info
.si_value
, 0, sizeof(sigval_t
));
438 new_timer
->sigq
.info
.si_value
.sival_int
= new_timer
->it_id
;
439 new_timer
->it_pid
= get_pid(task_tgid(current
));
442 if (new_timer
->it_sigev_notify
& SIGEV_THREAD_ID
)
443 new_timer
->it_pid_type
= PIDTYPE_PID
;
445 new_timer
->it_pid_type
= PIDTYPE_TGID
;
447 new_timer
->sigq
.info
.si_tid
= new_timer
->it_id
;
448 new_timer
->sigq
.info
.si_code
= SI_TIMER
;
450 if (copy_to_user(created_timer_id
, &new_timer_id
, sizeof (new_timer_id
))) {
455 * After succesful copy out, the timer ID is visible to user space
456 * now but not yet valid because new_timer::signal is still NULL.
458 * Complete the initialization with the clock specific create
461 error
= kc
->timer_create(new_timer
);
465 spin_lock_irq(¤t
->sighand
->siglock
);
466 /* This makes the timer valid in the hash table */
467 WRITE_ONCE(new_timer
->it_signal
, current
->signal
);
468 hlist_add_head(&new_timer
->list
, ¤t
->signal
->posix_timers
);
469 spin_unlock_irq(¤t
->sighand
->siglock
);
471 * After unlocking sighand::siglock @new_timer is subject to
472 * concurrent removal and cannot be touched anymore
476 posix_timer_unhash_and_free(new_timer
);
480 SYSCALL_DEFINE3(timer_create
, const clockid_t
, which_clock
,
481 struct sigevent __user
*, timer_event_spec
,
482 timer_t __user
*, created_timer_id
)
484 if (timer_event_spec
) {
487 if (copy_from_user(&event
, timer_event_spec
, sizeof (event
)))
489 return do_timer_create(which_clock
, &event
, created_timer_id
);
491 return do_timer_create(which_clock
, NULL
, created_timer_id
);
495 COMPAT_SYSCALL_DEFINE3(timer_create
, clockid_t
, which_clock
,
496 struct compat_sigevent __user
*, timer_event_spec
,
497 timer_t __user
*, created_timer_id
)
499 if (timer_event_spec
) {
502 if (get_compat_sigevent(&event
, timer_event_spec
))
504 return do_timer_create(which_clock
, &event
, created_timer_id
);
506 return do_timer_create(which_clock
, NULL
, created_timer_id
);
510 static struct k_itimer
*__lock_timer(timer_t timer_id
, unsigned long *flags
)
512 struct k_itimer
*timr
;
515 * timer_t could be any type >= int and we want to make sure any
516 * @timer_id outside positive int range fails lookup.
518 if ((unsigned long long)timer_id
> INT_MAX
)
522 * The hash lookup and the timers are RCU protected.
524 * Timers are added to the hash in invalid state where
525 * timr::it_signal == NULL. timer::it_signal is only set after the
526 * rest of the initialization succeeded.
528 * Timer destruction happens in steps:
529 * 1) Set timr::it_signal to NULL with timr::it_lock held
530 * 2) Release timr::it_lock
531 * 3) Remove from the hash under hash_lock
532 * 4) Put the reference count.
534 * The reference count might not drop to zero if timr::sigq is
535 * queued. In that case the signal delivery or flush will put the
536 * last reference count.
538 * When the reference count reaches zero, the timer is scheduled
539 * for RCU removal after the grace period.
541 * Holding rcu_read_lock() accross the lookup ensures that
542 * the timer cannot be freed.
544 * The lookup validates locklessly that timr::it_signal ==
545 * current::it_signal and timr::it_id == @timer_id. timr::it_id
546 * can't change, but timr::it_signal becomes NULL during
550 timr
= posix_timer_by_id(timer_id
);
552 spin_lock_irqsave(&timr
->it_lock
, *flags
);
554 * Validate under timr::it_lock that timr::it_signal is
555 * still valid. Pairs with #1 above.
557 if (timr
->it_signal
== current
->signal
) {
561 spin_unlock_irqrestore(&timr
->it_lock
, *flags
);
568 static ktime_t
common_hrtimer_remaining(struct k_itimer
*timr
, ktime_t now
)
570 struct hrtimer
*timer
= &timr
->it
.real
.timer
;
572 return __hrtimer_expires_remaining_adjusted(timer
, now
);
575 static s64
common_hrtimer_forward(struct k_itimer
*timr
, ktime_t now
)
577 struct hrtimer
*timer
= &timr
->it
.real
.timer
;
579 return hrtimer_forward(timer
, now
, timr
->it_interval
);
583 * Get the time remaining on a POSIX.1b interval timer.
585 * Two issues to handle here:
587 * 1) The timer has a requeue pending. The return value must appear as
588 * if the timer has been requeued right now.
590 * 2) The timer is a SIGEV_NONE timer. These timers are never enqueued
591 * into the hrtimer queue and therefore never expired. Emulate expiry
592 * here taking #1 into account.
594 void common_timer_get(struct k_itimer
*timr
, struct itimerspec64
*cur_setting
)
596 const struct k_clock
*kc
= timr
->kclock
;
597 ktime_t now
, remaining
, iv
;
600 sig_none
= timr
->it_sigev_notify
== SIGEV_NONE
;
601 iv
= timr
->it_interval
;
603 /* interval timer ? */
605 cur_setting
->it_interval
= ktime_to_timespec64(iv
);
606 } else if (timr
->it_status
== POSIX_TIMER_DISARMED
) {
608 * SIGEV_NONE oneshot timers are never queued and therefore
609 * timr->it_status is always DISARMED. The check below
610 * vs. remaining time will handle this case.
612 * For all other timers there is nothing to update here, so
619 now
= kc
->clock_get_ktime(timr
->it_clock
);
622 * If this is an interval timer and either has requeue pending or
623 * is a SIGEV_NONE timer move the expiry time forward by intervals,
624 * so expiry is > now.
626 if (iv
&& timr
->it_status
!= POSIX_TIMER_ARMED
)
627 timr
->it_overrun
+= kc
->timer_forward(timr
, now
);
629 remaining
= kc
->timer_remaining(timr
, now
);
631 * As @now is retrieved before a possible timer_forward() and
632 * cannot be reevaluated by the compiler @remaining is based on the
633 * same @now value. Therefore @remaining is consistent vs. @now.
635 * Consequently all interval timers, i.e. @iv > 0, cannot have a
636 * remaining time <= 0 because timer_forward() guarantees to move
637 * them forward so that the next timer expiry is > @now.
639 if (remaining
<= 0) {
641 * A single shot SIGEV_NONE timer must return 0, when it is
642 * expired! Timers which have a real signal delivery mode
643 * must return a remaining time greater than 0 because the
644 * signal has not yet been delivered.
647 cur_setting
->it_value
.tv_nsec
= 1;
649 cur_setting
->it_value
= ktime_to_timespec64(remaining
);
653 static int do_timer_gettime(timer_t timer_id
, struct itimerspec64
*setting
)
655 const struct k_clock
*kc
;
656 struct k_itimer
*timr
;
660 timr
= lock_timer(timer_id
, &flags
);
664 memset(setting
, 0, sizeof(*setting
));
666 if (WARN_ON_ONCE(!kc
|| !kc
->timer_get
))
669 kc
->timer_get(timr
, setting
);
671 unlock_timer(timr
, flags
);
675 /* Get the time remaining on a POSIX.1b interval timer. */
676 SYSCALL_DEFINE2(timer_gettime
, timer_t
, timer_id
,
677 struct __kernel_itimerspec __user
*, setting
)
679 struct itimerspec64 cur_setting
;
681 int ret
= do_timer_gettime(timer_id
, &cur_setting
);
683 if (put_itimerspec64(&cur_setting
, setting
))
689 #ifdef CONFIG_COMPAT_32BIT_TIME
691 SYSCALL_DEFINE2(timer_gettime32
, timer_t
, timer_id
,
692 struct old_itimerspec32 __user
*, setting
)
694 struct itimerspec64 cur_setting
;
696 int ret
= do_timer_gettime(timer_id
, &cur_setting
);
698 if (put_old_itimerspec32(&cur_setting
, setting
))
707 * sys_timer_getoverrun - Get the number of overruns of a POSIX.1b interval timer
708 * @timer_id: The timer ID which identifies the timer
710 * The "overrun count" of a timer is one plus the number of expiration
711 * intervals which have elapsed between the first expiry, which queues the
712 * signal and the actual signal delivery. On signal delivery the "overrun
713 * count" is calculated and cached, so it can be returned directly here.
715 * As this is relative to the last queued signal the returned overrun count
716 * is meaningless outside of the signal delivery path and even there it
717 * does not accurately reflect the current state when user space evaluates
721 * -EINVAL @timer_id is invalid
722 * 1..INT_MAX The number of overruns related to the last delivered signal
724 SYSCALL_DEFINE1(timer_getoverrun
, timer_t
, timer_id
)
726 struct k_itimer
*timr
;
730 timr
= lock_timer(timer_id
, &flags
);
734 overrun
= timer_overrun_to_int(timr
);
735 unlock_timer(timr
, flags
);
740 static void common_hrtimer_arm(struct k_itimer
*timr
, ktime_t expires
,
741 bool absolute
, bool sigev_none
)
743 struct hrtimer
*timer
= &timr
->it
.real
.timer
;
744 enum hrtimer_mode mode
;
746 mode
= absolute
? HRTIMER_MODE_ABS
: HRTIMER_MODE_REL
;
748 * Posix magic: Relative CLOCK_REALTIME timers are not affected by
749 * clock modifications, so they become CLOCK_MONOTONIC based under the
750 * hood. See hrtimer_init(). Update timr->kclock, so the generic
751 * functions which use timr->kclock->clock_get_*() work.
753 * Note: it_clock stays unmodified, because the next timer_set() might
754 * use ABSTIME, so it needs to switch back.
756 if (timr
->it_clock
== CLOCK_REALTIME
)
757 timr
->kclock
= absolute
? &clock_realtime
: &clock_monotonic
;
759 hrtimer_init(&timr
->it
.real
.timer
, timr
->it_clock
, mode
);
760 timr
->it
.real
.timer
.function
= posix_timer_fn
;
763 expires
= ktime_add_safe(expires
, timer
->base
->get_time());
764 hrtimer_set_expires(timer
, expires
);
767 hrtimer_start_expires(timer
, HRTIMER_MODE_ABS
);
770 static int common_hrtimer_try_to_cancel(struct k_itimer
*timr
)
772 return hrtimer_try_to_cancel(&timr
->it
.real
.timer
);
775 static void common_timer_wait_running(struct k_itimer
*timer
)
777 hrtimer_cancel_wait_running(&timer
->it
.real
.timer
);
781 * On PREEMPT_RT this prevents priority inversion and a potential livelock
782 * against the ksoftirqd thread in case that ksoftirqd gets preempted while
783 * executing a hrtimer callback.
785 * See the comments in hrtimer_cancel_wait_running(). For PREEMPT_RT=n this
786 * just results in a cpu_relax().
788 * For POSIX CPU timers with CONFIG_POSIX_CPU_TIMERS_TASK_WORK=n this is
789 * just a cpu_relax(). With CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y this
790 * prevents spinning on an eventually scheduled out task and a livelock
791 * when the task which tries to delete or disarm the timer has preempted
792 * the task which runs the expiry in task work context.
794 static struct k_itimer
*timer_wait_running(struct k_itimer
*timer
,
795 unsigned long *flags
)
797 const struct k_clock
*kc
= READ_ONCE(timer
->kclock
);
798 timer_t timer_id
= READ_ONCE(timer
->it_id
);
800 /* Prevent kfree(timer) after dropping the lock */
802 unlock_timer(timer
, *flags
);
805 * kc->timer_wait_running() might drop RCU lock. So @timer
806 * cannot be touched anymore after the function returns!
808 if (!WARN_ON_ONCE(!kc
->timer_wait_running
))
809 kc
->timer_wait_running(timer
);
812 /* Relock the timer. It might be not longer hashed. */
813 return lock_timer(timer_id
, flags
);
817 * Set up the new interval and reset the signal delivery data
819 void posix_timer_set_common(struct k_itimer
*timer
, struct itimerspec64
*new_setting
)
821 if (new_setting
->it_value
.tv_sec
|| new_setting
->it_value
.tv_nsec
)
822 timer
->it_interval
= timespec64_to_ktime(new_setting
->it_interval
);
824 timer
->it_interval
= 0;
826 /* Reset overrun accounting */
827 timer
->it_overrun_last
= 0;
828 timer
->it_overrun
= -1LL;
831 /* Set a POSIX.1b interval timer. */
832 int common_timer_set(struct k_itimer
*timr
, int flags
,
833 struct itimerspec64
*new_setting
,
834 struct itimerspec64
*old_setting
)
836 const struct k_clock
*kc
= timr
->kclock
;
841 common_timer_get(timr
, old_setting
);
844 * Careful here. On SMP systems the timer expiry function could be
845 * active and spinning on timr->it_lock.
847 if (kc
->timer_try_to_cancel(timr
) < 0)
850 timr
->it_status
= POSIX_TIMER_DISARMED
;
851 posix_timer_set_common(timr
, new_setting
);
853 /* Keep timer disarmed when it_value is zero */
854 if (!new_setting
->it_value
.tv_sec
&& !new_setting
->it_value
.tv_nsec
)
857 expires
= timespec64_to_ktime(new_setting
->it_value
);
858 if (flags
& TIMER_ABSTIME
)
859 expires
= timens_ktime_to_host(timr
->it_clock
, expires
);
860 sigev_none
= timr
->it_sigev_notify
== SIGEV_NONE
;
862 kc
->timer_arm(timr
, expires
, flags
& TIMER_ABSTIME
, sigev_none
);
864 timr
->it_status
= POSIX_TIMER_ARMED
;
868 static int do_timer_settime(timer_t timer_id
, int tmr_flags
,
869 struct itimerspec64
*new_spec64
,
870 struct itimerspec64
*old_spec64
)
872 const struct k_clock
*kc
;
873 struct k_itimer
*timr
;
877 if (!timespec64_valid(&new_spec64
->it_interval
) ||
878 !timespec64_valid(&new_spec64
->it_value
))
882 memset(old_spec64
, 0, sizeof(*old_spec64
));
884 timr
= lock_timer(timer_id
, &flags
);
890 old_spec64
->it_interval
= ktime_to_timespec64(timr
->it_interval
);
892 /* Prevent signal delivery and rearming. */
893 timr
->it_signal_seq
++;
896 if (WARN_ON_ONCE(!kc
|| !kc
->timer_set
))
899 error
= kc
->timer_set(timr
, tmr_flags
, new_spec64
, old_spec64
);
901 if (error
== TIMER_RETRY
) {
902 // We already got the old time...
904 /* Unlocks and relocks the timer if it still exists */
905 timr
= timer_wait_running(timr
, &flags
);
908 unlock_timer(timr
, flags
);
913 /* Set a POSIX.1b interval timer */
914 SYSCALL_DEFINE4(timer_settime
, timer_t
, timer_id
, int, flags
,
915 const struct __kernel_itimerspec __user
*, new_setting
,
916 struct __kernel_itimerspec __user
*, old_setting
)
918 struct itimerspec64 new_spec
, old_spec
, *rtn
;
924 if (get_itimerspec64(&new_spec
, new_setting
))
927 rtn
= old_setting
? &old_spec
: NULL
;
928 error
= do_timer_settime(timer_id
, flags
, &new_spec
, rtn
);
929 if (!error
&& old_setting
) {
930 if (put_itimerspec64(&old_spec
, old_setting
))
936 #ifdef CONFIG_COMPAT_32BIT_TIME
937 SYSCALL_DEFINE4(timer_settime32
, timer_t
, timer_id
, int, flags
,
938 struct old_itimerspec32 __user
*, new,
939 struct old_itimerspec32 __user
*, old
)
941 struct itimerspec64 new_spec
, old_spec
;
942 struct itimerspec64
*rtn
= old
? &old_spec
: NULL
;
947 if (get_old_itimerspec32(&new_spec
, new))
950 error
= do_timer_settime(timer_id
, flags
, &new_spec
, rtn
);
952 if (put_old_itimerspec32(&old_spec
, old
))
959 int common_timer_del(struct k_itimer
*timer
)
961 const struct k_clock
*kc
= timer
->kclock
;
963 if (kc
->timer_try_to_cancel(timer
) < 0)
965 timer
->it_status
= POSIX_TIMER_DISARMED
;
970 * If the deleted timer is on the ignored list, remove it and
971 * drop the associated reference.
973 static inline void posix_timer_cleanup_ignored(struct k_itimer
*tmr
)
975 if (!hlist_unhashed(&tmr
->ignored_list
)) {
976 hlist_del_init(&tmr
->ignored_list
);
977 posixtimer_putref(tmr
);
981 static inline int timer_delete_hook(struct k_itimer
*timer
)
983 const struct k_clock
*kc
= timer
->kclock
;
985 /* Prevent signal delivery and rearming. */
986 timer
->it_signal_seq
++;
988 if (WARN_ON_ONCE(!kc
|| !kc
->timer_del
))
990 return kc
->timer_del(timer
);
993 /* Delete a POSIX.1b interval timer. */
994 SYSCALL_DEFINE1(timer_delete
, timer_t
, timer_id
)
996 struct k_itimer
*timer
;
999 timer
= lock_timer(timer_id
, &flags
);
1005 if (unlikely(timer_delete_hook(timer
) == TIMER_RETRY
)) {
1006 /* Unlocks and relocks the timer if it still exists */
1007 timer
= timer_wait_running(timer
, &flags
);
1011 spin_lock(¤t
->sighand
->siglock
);
1012 hlist_del(&timer
->list
);
1013 posix_timer_cleanup_ignored(timer
);
1015 * A concurrent lookup could check timer::it_signal lockless. It
1016 * will reevaluate with timer::it_lock held and observe the NULL.
1018 * It must be written with siglock held so that the signal code
1019 * observes timer->it_signal == NULL in do_sigaction(SIG_IGN),
1020 * which prevents it from moving a pending signal of a deleted
1021 * timer to the ignore list.
1023 WRITE_ONCE(timer
->it_signal
, NULL
);
1024 spin_unlock(¤t
->sighand
->siglock
);
1026 unlock_timer(timer
, flags
);
1027 posix_timer_unhash_and_free(timer
);
1032 * Delete a timer if it is armed, remove it from the hash and schedule it
1035 static void itimer_delete(struct k_itimer
*timer
)
1037 unsigned long flags
;
1040 * irqsave is required to make timer_wait_running() work.
1042 spin_lock_irqsave(&timer
->it_lock
, flags
);
1046 * Even if the timer is not longer accessible from other tasks
1047 * it still might be armed and queued in the underlying timer
1048 * mechanism. Worse, that timer mechanism might run the expiry
1049 * function concurrently.
1051 if (timer_delete_hook(timer
) == TIMER_RETRY
) {
1053 * Timer is expired concurrently, prevent livelocks
1054 * and pointless spinning on RT.
1056 * timer_wait_running() drops timer::it_lock, which opens
1057 * the possibility for another task to delete the timer.
1059 * That's not possible here because this is invoked from
1060 * do_exit() only for the last thread of the thread group.
1061 * So no other task can access and delete that timer.
1063 if (WARN_ON_ONCE(timer_wait_running(timer
, &flags
) != timer
))
1068 hlist_del(&timer
->list
);
1070 posix_timer_cleanup_ignored(timer
);
1073 * Setting timer::it_signal to NULL is technically not required
1074 * here as nothing can access the timer anymore legitimately via
1075 * the hash table. Set it to NULL nevertheless so that all deletion
1076 * paths are consistent.
1078 WRITE_ONCE(timer
->it_signal
, NULL
);
1080 spin_unlock_irqrestore(&timer
->it_lock
, flags
);
1081 posix_timer_unhash_and_free(timer
);
1085 * Invoked from do_exit() when the last thread of a thread group exits.
1086 * At that point no other task can access the timers of the dying
1089 void exit_itimers(struct task_struct
*tsk
)
1091 struct hlist_head timers
;
1093 if (hlist_empty(&tsk
->signal
->posix_timers
))
1096 /* Protect against concurrent read via /proc/$PID/timers */
1097 spin_lock_irq(&tsk
->sighand
->siglock
);
1098 hlist_move_list(&tsk
->signal
->posix_timers
, &timers
);
1099 spin_unlock_irq(&tsk
->sighand
->siglock
);
1101 /* The timers are not longer accessible via tsk::signal */
1102 while (!hlist_empty(&timers
))
1103 itimer_delete(hlist_entry(timers
.first
, struct k_itimer
, list
));
1106 * There should be no timers on the ignored list. itimer_delete() has
1109 if (!WARN_ON_ONCE(!hlist_empty(&tsk
->signal
->ignored_posix_timers
)))
1112 hlist_move_list(&tsk
->signal
->ignored_posix_timers
, &timers
);
1113 while (!hlist_empty(&timers
)) {
1114 posix_timer_cleanup_ignored(hlist_entry(timers
.first
, struct k_itimer
,
1119 SYSCALL_DEFINE2(clock_settime
, const clockid_t
, which_clock
,
1120 const struct __kernel_timespec __user
*, tp
)
1122 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
1123 struct timespec64 new_tp
;
1125 if (!kc
|| !kc
->clock_set
)
1128 if (get_timespec64(&new_tp
, tp
))
1132 * Permission checks have to be done inside the clock specific
1135 return kc
->clock_set(which_clock
, &new_tp
);
1138 SYSCALL_DEFINE2(clock_gettime
, const clockid_t
, which_clock
,
1139 struct __kernel_timespec __user
*, tp
)
1141 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
1142 struct timespec64 kernel_tp
;
1148 error
= kc
->clock_get_timespec(which_clock
, &kernel_tp
);
1150 if (!error
&& put_timespec64(&kernel_tp
, tp
))
1156 int do_clock_adjtime(const clockid_t which_clock
, struct __kernel_timex
* ktx
)
1158 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
1165 return kc
->clock_adj(which_clock
, ktx
);
1168 SYSCALL_DEFINE2(clock_adjtime
, const clockid_t
, which_clock
,
1169 struct __kernel_timex __user
*, utx
)
1171 struct __kernel_timex ktx
;
1174 if (copy_from_user(&ktx
, utx
, sizeof(ktx
)))
1177 err
= do_clock_adjtime(which_clock
, &ktx
);
1179 if (err
>= 0 && copy_to_user(utx
, &ktx
, sizeof(ktx
)))
1186 * sys_clock_getres - Get the resolution of a clock
1187 * @which_clock: The clock to get the resolution for
1188 * @tp: Pointer to a a user space timespec64 for storage
1192 * "The clock_getres() function shall return the resolution of any
1193 * clock. Clock resolutions are implementation-defined and cannot be set by
1194 * a process. If the argument res is not NULL, the resolution of the
1195 * specified clock shall be stored in the location pointed to by res. If
1196 * res is NULL, the clock resolution is not returned. If the time argument
1197 * of clock_settime() is not a multiple of res, then the value is truncated
1198 * to a multiple of res."
1200 * Due to the various hardware constraints the real resolution can vary
1201 * wildly and even change during runtime when the underlying devices are
1202 * replaced. The kernel also can use hardware devices with different
1203 * resolutions for reading the time and for arming timers.
1205 * The kernel therefore deviates from the POSIX spec in various aspects:
1207 * 1) The resolution returned to user space
1209 * For CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME, CLOCK_TAI,
1210 * CLOCK_REALTIME_ALARM, CLOCK_BOOTTIME_ALAREM and CLOCK_MONOTONIC_RAW
1211 * the kernel differentiates only two cases:
1213 * I) Low resolution mode:
1215 * When high resolution timers are disabled at compile or runtime
1216 * the resolution returned is nanoseconds per tick, which represents
1217 * the precision at which timers expire.
1219 * II) High resolution mode:
1221 * When high resolution timers are enabled the resolution returned
1222 * is always one nanosecond independent of the actual resolution of
1223 * the underlying hardware devices.
1225 * For CLOCK_*_ALARM the actual resolution depends on system
1226 * state. When system is running the resolution is the same as the
1227 * resolution of the other clocks. During suspend the actual
1228 * resolution is the resolution of the underlying RTC device which
1229 * might be way less precise than the clockevent device used during
1232 * For CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE the resolution
1233 * returned is always nanoseconds per tick.
1235 * For CLOCK_PROCESS_CPUTIME and CLOCK_THREAD_CPUTIME the resolution
1236 * returned is always one nanosecond under the assumption that the
1237 * underlying scheduler clock has a better resolution than nanoseconds
1240 * For dynamic POSIX clocks (PTP devices) the resolution returned is
1241 * always one nanosecond.
1243 * 2) Affect on sys_clock_settime()
1245 * The kernel does not truncate the time which is handed in to
1246 * sys_clock_settime(). The kernel internal timekeeping is always using
1247 * nanoseconds precision independent of the clocksource device which is
1248 * used to read the time from. The resolution of that device only
1249 * affects the presicion of the time returned by sys_clock_gettime().
1252 * 0 Success. @tp contains the resolution
1253 * -EINVAL @which_clock is not a valid clock ID
1254 * -EFAULT Copying the resolution to @tp faulted
1255 * -ENODEV Dynamic POSIX clock is not backed by a device
1256 * -EOPNOTSUPP Dynamic POSIX clock does not support getres()
1258 SYSCALL_DEFINE2(clock_getres
, const clockid_t
, which_clock
,
1259 struct __kernel_timespec __user
*, tp
)
1261 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
1262 struct timespec64 rtn_tp
;
1268 error
= kc
->clock_getres(which_clock
, &rtn_tp
);
1270 if (!error
&& tp
&& put_timespec64(&rtn_tp
, tp
))
1276 #ifdef CONFIG_COMPAT_32BIT_TIME
1278 SYSCALL_DEFINE2(clock_settime32
, clockid_t
, which_clock
,
1279 struct old_timespec32 __user
*, tp
)
1281 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
1282 struct timespec64 ts
;
1284 if (!kc
|| !kc
->clock_set
)
1287 if (get_old_timespec32(&ts
, tp
))
1290 return kc
->clock_set(which_clock
, &ts
);
1293 SYSCALL_DEFINE2(clock_gettime32
, clockid_t
, which_clock
,
1294 struct old_timespec32 __user
*, tp
)
1296 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
1297 struct timespec64 ts
;
1303 err
= kc
->clock_get_timespec(which_clock
, &ts
);
1305 if (!err
&& put_old_timespec32(&ts
, tp
))
1311 SYSCALL_DEFINE2(clock_adjtime32
, clockid_t
, which_clock
,
1312 struct old_timex32 __user
*, utp
)
1314 struct __kernel_timex ktx
;
1317 err
= get_old_timex32(&ktx
, utp
);
1321 err
= do_clock_adjtime(which_clock
, &ktx
);
1323 if (err
>= 0 && put_old_timex32(utp
, &ktx
))
1329 SYSCALL_DEFINE2(clock_getres_time32
, clockid_t
, which_clock
,
1330 struct old_timespec32 __user
*, tp
)
1332 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
1333 struct timespec64 ts
;
1339 err
= kc
->clock_getres(which_clock
, &ts
);
1340 if (!err
&& tp
&& put_old_timespec32(&ts
, tp
))
1349 * sys_clock_nanosleep() for CLOCK_REALTIME and CLOCK_TAI
1351 static int common_nsleep(const clockid_t which_clock
, int flags
,
1352 const struct timespec64
*rqtp
)
1354 ktime_t texp
= timespec64_to_ktime(*rqtp
);
1356 return hrtimer_nanosleep(texp
, flags
& TIMER_ABSTIME
?
1357 HRTIMER_MODE_ABS
: HRTIMER_MODE_REL
,
1362 * sys_clock_nanosleep() for CLOCK_MONOTONIC and CLOCK_BOOTTIME
1364 * Absolute nanosleeps for these clocks are time-namespace adjusted.
1366 static int common_nsleep_timens(const clockid_t which_clock
, int flags
,
1367 const struct timespec64
*rqtp
)
1369 ktime_t texp
= timespec64_to_ktime(*rqtp
);
1371 if (flags
& TIMER_ABSTIME
)
1372 texp
= timens_ktime_to_host(which_clock
, texp
);
1374 return hrtimer_nanosleep(texp
, flags
& TIMER_ABSTIME
?
1375 HRTIMER_MODE_ABS
: HRTIMER_MODE_REL
,
1379 SYSCALL_DEFINE4(clock_nanosleep
, const clockid_t
, which_clock
, int, flags
,
1380 const struct __kernel_timespec __user
*, rqtp
,
1381 struct __kernel_timespec __user
*, rmtp
)
1383 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
1384 struct timespec64 t
;
1391 if (get_timespec64(&t
, rqtp
))
1394 if (!timespec64_valid(&t
))
1396 if (flags
& TIMER_ABSTIME
)
1398 current
->restart_block
.fn
= do_no_restart_syscall
;
1399 current
->restart_block
.nanosleep
.type
= rmtp
? TT_NATIVE
: TT_NONE
;
1400 current
->restart_block
.nanosleep
.rmtp
= rmtp
;
1402 return kc
->nsleep(which_clock
, flags
, &t
);
1405 #ifdef CONFIG_COMPAT_32BIT_TIME
1407 SYSCALL_DEFINE4(clock_nanosleep_time32
, clockid_t
, which_clock
, int, flags
,
1408 struct old_timespec32 __user
*, rqtp
,
1409 struct old_timespec32 __user
*, rmtp
)
1411 const struct k_clock
*kc
= clockid_to_kclock(which_clock
);
1412 struct timespec64 t
;
1419 if (get_old_timespec32(&t
, rqtp
))
1422 if (!timespec64_valid(&t
))
1424 if (flags
& TIMER_ABSTIME
)
1426 current
->restart_block
.fn
= do_no_restart_syscall
;
1427 current
->restart_block
.nanosleep
.type
= rmtp
? TT_COMPAT
: TT_NONE
;
1428 current
->restart_block
.nanosleep
.compat_rmtp
= rmtp
;
1430 return kc
->nsleep(which_clock
, flags
, &t
);
1435 static const struct k_clock clock_realtime
= {
1436 .clock_getres
= posix_get_hrtimer_res
,
1437 .clock_get_timespec
= posix_get_realtime_timespec
,
1438 .clock_get_ktime
= posix_get_realtime_ktime
,
1439 .clock_set
= posix_clock_realtime_set
,
1440 .clock_adj
= posix_clock_realtime_adj
,
1441 .nsleep
= common_nsleep
,
1442 .timer_create
= common_timer_create
,
1443 .timer_set
= common_timer_set
,
1444 .timer_get
= common_timer_get
,
1445 .timer_del
= common_timer_del
,
1446 .timer_rearm
= common_hrtimer_rearm
,
1447 .timer_forward
= common_hrtimer_forward
,
1448 .timer_remaining
= common_hrtimer_remaining
,
1449 .timer_try_to_cancel
= common_hrtimer_try_to_cancel
,
1450 .timer_wait_running
= common_timer_wait_running
,
1451 .timer_arm
= common_hrtimer_arm
,
1454 static const struct k_clock clock_monotonic
= {
1455 .clock_getres
= posix_get_hrtimer_res
,
1456 .clock_get_timespec
= posix_get_monotonic_timespec
,
1457 .clock_get_ktime
= posix_get_monotonic_ktime
,
1458 .nsleep
= common_nsleep_timens
,
1459 .timer_create
= common_timer_create
,
1460 .timer_set
= common_timer_set
,
1461 .timer_get
= common_timer_get
,
1462 .timer_del
= common_timer_del
,
1463 .timer_rearm
= common_hrtimer_rearm
,
1464 .timer_forward
= common_hrtimer_forward
,
1465 .timer_remaining
= common_hrtimer_remaining
,
1466 .timer_try_to_cancel
= common_hrtimer_try_to_cancel
,
1467 .timer_wait_running
= common_timer_wait_running
,
1468 .timer_arm
= common_hrtimer_arm
,
1471 static const struct k_clock clock_monotonic_raw
= {
1472 .clock_getres
= posix_get_hrtimer_res
,
1473 .clock_get_timespec
= posix_get_monotonic_raw
,
1476 static const struct k_clock clock_realtime_coarse
= {
1477 .clock_getres
= posix_get_coarse_res
,
1478 .clock_get_timespec
= posix_get_realtime_coarse
,
1481 static const struct k_clock clock_monotonic_coarse
= {
1482 .clock_getres
= posix_get_coarse_res
,
1483 .clock_get_timespec
= posix_get_monotonic_coarse
,
1486 static const struct k_clock clock_tai
= {
1487 .clock_getres
= posix_get_hrtimer_res
,
1488 .clock_get_ktime
= posix_get_tai_ktime
,
1489 .clock_get_timespec
= posix_get_tai_timespec
,
1490 .nsleep
= common_nsleep
,
1491 .timer_create
= common_timer_create
,
1492 .timer_set
= common_timer_set
,
1493 .timer_get
= common_timer_get
,
1494 .timer_del
= common_timer_del
,
1495 .timer_rearm
= common_hrtimer_rearm
,
1496 .timer_forward
= common_hrtimer_forward
,
1497 .timer_remaining
= common_hrtimer_remaining
,
1498 .timer_try_to_cancel
= common_hrtimer_try_to_cancel
,
1499 .timer_wait_running
= common_timer_wait_running
,
1500 .timer_arm
= common_hrtimer_arm
,
1503 static const struct k_clock clock_boottime
= {
1504 .clock_getres
= posix_get_hrtimer_res
,
1505 .clock_get_ktime
= posix_get_boottime_ktime
,
1506 .clock_get_timespec
= posix_get_boottime_timespec
,
1507 .nsleep
= common_nsleep_timens
,
1508 .timer_create
= common_timer_create
,
1509 .timer_set
= common_timer_set
,
1510 .timer_get
= common_timer_get
,
1511 .timer_del
= common_timer_del
,
1512 .timer_rearm
= common_hrtimer_rearm
,
1513 .timer_forward
= common_hrtimer_forward
,
1514 .timer_remaining
= common_hrtimer_remaining
,
1515 .timer_try_to_cancel
= common_hrtimer_try_to_cancel
,
1516 .timer_wait_running
= common_timer_wait_running
,
1517 .timer_arm
= common_hrtimer_arm
,
1520 static const struct k_clock
* const posix_clocks
[] = {
1521 [CLOCK_REALTIME
] = &clock_realtime
,
1522 [CLOCK_MONOTONIC
] = &clock_monotonic
,
1523 [CLOCK_PROCESS_CPUTIME_ID
] = &clock_process
,
1524 [CLOCK_THREAD_CPUTIME_ID
] = &clock_thread
,
1525 [CLOCK_MONOTONIC_RAW
] = &clock_monotonic_raw
,
1526 [CLOCK_REALTIME_COARSE
] = &clock_realtime_coarse
,
1527 [CLOCK_MONOTONIC_COARSE
] = &clock_monotonic_coarse
,
1528 [CLOCK_BOOTTIME
] = &clock_boottime
,
1529 [CLOCK_REALTIME_ALARM
] = &alarm_clock
,
1530 [CLOCK_BOOTTIME_ALARM
] = &alarm_clock
,
1531 [CLOCK_TAI
] = &clock_tai
,
1534 static const struct k_clock
*clockid_to_kclock(const clockid_t id
)
1539 return (id
& CLOCKFD_MASK
) == CLOCKFD
?
1540 &clock_posix_dynamic
: &clock_posix_cpu
;
1543 if (id
>= ARRAY_SIZE(posix_clocks
))
1546 return posix_clocks
[array_index_nospec(idx
, ARRAY_SIZE(posix_clocks
))];