net: move procfs code to net/core/net-procfs.c
[linux/fpc-iii.git] / kernel / posix-cpu-timers.c
bloba278cad1d5d6225a52e6bf00b2fd90f98bbe916e
1 /*
2 * Implement CPU time clocks for the POSIX clock interface.
3 */
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
11 #include <trace/events/timer.h>
12 #include <linux/random.h>
15 * Called after updating RLIMIT_CPU to run cpu timer and update
16 * tsk->signal->cputime_expires expiration cache if necessary. Needs
17 * siglock protection since other code may update expiration cache as
18 * well.
20 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
22 cputime_t cputime = secs_to_cputime(rlim_new);
24 spin_lock_irq(&task->sighand->siglock);
25 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
26 spin_unlock_irq(&task->sighand->siglock);
29 static int check_clock(const clockid_t which_clock)
31 int error = 0;
32 struct task_struct *p;
33 const pid_t pid = CPUCLOCK_PID(which_clock);
35 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
36 return -EINVAL;
38 if (pid == 0)
39 return 0;
41 rcu_read_lock();
42 p = find_task_by_vpid(pid);
43 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
44 same_thread_group(p, current) : has_group_leader_pid(p))) {
45 error = -EINVAL;
47 rcu_read_unlock();
49 return error;
52 static inline union cpu_time_count
53 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
55 union cpu_time_count ret;
56 ret.sched = 0; /* high half always zero when .cpu used */
57 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
58 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
59 } else {
60 ret.cpu = timespec_to_cputime(tp);
62 return ret;
65 static void sample_to_timespec(const clockid_t which_clock,
66 union cpu_time_count cpu,
67 struct timespec *tp)
69 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
70 *tp = ns_to_timespec(cpu.sched);
71 else
72 cputime_to_timespec(cpu.cpu, tp);
75 static inline int cpu_time_before(const clockid_t which_clock,
76 union cpu_time_count now,
77 union cpu_time_count then)
79 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
80 return now.sched < then.sched;
81 } else {
82 return now.cpu < then.cpu;
85 static inline void cpu_time_add(const clockid_t which_clock,
86 union cpu_time_count *acc,
87 union cpu_time_count val)
89 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
90 acc->sched += val.sched;
91 } else {
92 acc->cpu += val.cpu;
95 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
96 union cpu_time_count a,
97 union cpu_time_count b)
99 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
100 a.sched -= b.sched;
101 } else {
102 a.cpu -= b.cpu;
104 return a;
108 * Update expiry time from increment, and increase overrun count,
109 * given the current clock sample.
111 static void bump_cpu_timer(struct k_itimer *timer,
112 union cpu_time_count now)
114 int i;
116 if (timer->it.cpu.incr.sched == 0)
117 return;
119 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
120 unsigned long long delta, incr;
122 if (now.sched < timer->it.cpu.expires.sched)
123 return;
124 incr = timer->it.cpu.incr.sched;
125 delta = now.sched + incr - timer->it.cpu.expires.sched;
126 /* Don't use (incr*2 < delta), incr*2 might overflow. */
127 for (i = 0; incr < delta - incr; i++)
128 incr = incr << 1;
129 for (; i >= 0; incr >>= 1, i--) {
130 if (delta < incr)
131 continue;
132 timer->it.cpu.expires.sched += incr;
133 timer->it_overrun += 1 << i;
134 delta -= incr;
136 } else {
137 cputime_t delta, incr;
139 if (now.cpu < timer->it.cpu.expires.cpu)
140 return;
141 incr = timer->it.cpu.incr.cpu;
142 delta = now.cpu + incr - timer->it.cpu.expires.cpu;
143 /* Don't use (incr*2 < delta), incr*2 might overflow. */
144 for (i = 0; incr < delta - incr; i++)
145 incr += incr;
146 for (; i >= 0; incr = incr >> 1, i--) {
147 if (delta < incr)
148 continue;
149 timer->it.cpu.expires.cpu += incr;
150 timer->it_overrun += 1 << i;
151 delta -= incr;
156 static inline cputime_t prof_ticks(struct task_struct *p)
158 return p->utime + p->stime;
160 static inline cputime_t virt_ticks(struct task_struct *p)
162 return p->utime;
165 static int
166 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
168 int error = check_clock(which_clock);
169 if (!error) {
170 tp->tv_sec = 0;
171 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
172 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
174 * If sched_clock is using a cycle counter, we
175 * don't have any idea of its true resolution
176 * exported, but it is much more than 1s/HZ.
178 tp->tv_nsec = 1;
181 return error;
184 static int
185 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
188 * You can never reset a CPU clock, but we check for other errors
189 * in the call before failing with EPERM.
191 int error = check_clock(which_clock);
192 if (error == 0) {
193 error = -EPERM;
195 return error;
200 * Sample a per-thread clock for the given task.
202 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
203 union cpu_time_count *cpu)
205 switch (CPUCLOCK_WHICH(which_clock)) {
206 default:
207 return -EINVAL;
208 case CPUCLOCK_PROF:
209 cpu->cpu = prof_ticks(p);
210 break;
211 case CPUCLOCK_VIRT:
212 cpu->cpu = virt_ticks(p);
213 break;
214 case CPUCLOCK_SCHED:
215 cpu->sched = task_sched_runtime(p);
216 break;
218 return 0;
221 static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
223 if (b->utime > a->utime)
224 a->utime = b->utime;
226 if (b->stime > a->stime)
227 a->stime = b->stime;
229 if (b->sum_exec_runtime > a->sum_exec_runtime)
230 a->sum_exec_runtime = b->sum_exec_runtime;
233 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
235 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
236 struct task_cputime sum;
237 unsigned long flags;
239 if (!cputimer->running) {
241 * The POSIX timer interface allows for absolute time expiry
242 * values through the TIMER_ABSTIME flag, therefore we have
243 * to synchronize the timer to the clock every time we start
244 * it.
246 thread_group_cputime(tsk, &sum);
247 raw_spin_lock_irqsave(&cputimer->lock, flags);
248 cputimer->running = 1;
249 update_gt_cputime(&cputimer->cputime, &sum);
250 } else
251 raw_spin_lock_irqsave(&cputimer->lock, flags);
252 *times = cputimer->cputime;
253 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
257 * Sample a process (thread group) clock for the given group_leader task.
258 * Must be called with tasklist_lock held for reading.
260 static int cpu_clock_sample_group(const clockid_t which_clock,
261 struct task_struct *p,
262 union cpu_time_count *cpu)
264 struct task_cputime cputime;
266 switch (CPUCLOCK_WHICH(which_clock)) {
267 default:
268 return -EINVAL;
269 case CPUCLOCK_PROF:
270 thread_group_cputime(p, &cputime);
271 cpu->cpu = cputime.utime + cputime.stime;
272 break;
273 case CPUCLOCK_VIRT:
274 thread_group_cputime(p, &cputime);
275 cpu->cpu = cputime.utime;
276 break;
277 case CPUCLOCK_SCHED:
278 thread_group_cputime(p, &cputime);
279 cpu->sched = cputime.sum_exec_runtime;
280 break;
282 return 0;
286 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
288 const pid_t pid = CPUCLOCK_PID(which_clock);
289 int error = -EINVAL;
290 union cpu_time_count rtn;
292 if (pid == 0) {
294 * Special case constant value for our own clocks.
295 * We don't have to do any lookup to find ourselves.
297 if (CPUCLOCK_PERTHREAD(which_clock)) {
299 * Sampling just ourselves we can do with no locking.
301 error = cpu_clock_sample(which_clock,
302 current, &rtn);
303 } else {
304 read_lock(&tasklist_lock);
305 error = cpu_clock_sample_group(which_clock,
306 current, &rtn);
307 read_unlock(&tasklist_lock);
309 } else {
311 * Find the given PID, and validate that the caller
312 * should be able to see it.
314 struct task_struct *p;
315 rcu_read_lock();
316 p = find_task_by_vpid(pid);
317 if (p) {
318 if (CPUCLOCK_PERTHREAD(which_clock)) {
319 if (same_thread_group(p, current)) {
320 error = cpu_clock_sample(which_clock,
321 p, &rtn);
323 } else {
324 read_lock(&tasklist_lock);
325 if (thread_group_leader(p) && p->sighand) {
326 error =
327 cpu_clock_sample_group(which_clock,
328 p, &rtn);
330 read_unlock(&tasklist_lock);
333 rcu_read_unlock();
336 if (error)
337 return error;
338 sample_to_timespec(which_clock, rtn, tp);
339 return 0;
344 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
345 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
346 * new timer already all-zeros initialized.
348 static int posix_cpu_timer_create(struct k_itimer *new_timer)
350 int ret = 0;
351 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
352 struct task_struct *p;
354 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
355 return -EINVAL;
357 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
359 rcu_read_lock();
360 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
361 if (pid == 0) {
362 p = current;
363 } else {
364 p = find_task_by_vpid(pid);
365 if (p && !same_thread_group(p, current))
366 p = NULL;
368 } else {
369 if (pid == 0) {
370 p = current->group_leader;
371 } else {
372 p = find_task_by_vpid(pid);
373 if (p && !has_group_leader_pid(p))
374 p = NULL;
377 new_timer->it.cpu.task = p;
378 if (p) {
379 get_task_struct(p);
380 } else {
381 ret = -EINVAL;
383 rcu_read_unlock();
385 return ret;
389 * Clean up a CPU-clock timer that is about to be destroyed.
390 * This is called from timer deletion with the timer already locked.
391 * If we return TIMER_RETRY, it's necessary to release the timer's lock
392 * and try again. (This happens when the timer is in the middle of firing.)
394 static int posix_cpu_timer_del(struct k_itimer *timer)
396 struct task_struct *p = timer->it.cpu.task;
397 int ret = 0;
399 if (likely(p != NULL)) {
400 read_lock(&tasklist_lock);
401 if (unlikely(p->sighand == NULL)) {
403 * We raced with the reaping of the task.
404 * The deletion should have cleared us off the list.
406 BUG_ON(!list_empty(&timer->it.cpu.entry));
407 } else {
408 spin_lock(&p->sighand->siglock);
409 if (timer->it.cpu.firing)
410 ret = TIMER_RETRY;
411 else
412 list_del(&timer->it.cpu.entry);
413 spin_unlock(&p->sighand->siglock);
415 read_unlock(&tasklist_lock);
417 if (!ret)
418 put_task_struct(p);
421 return ret;
425 * Clean out CPU timers still ticking when a thread exited. The task
426 * pointer is cleared, and the expiry time is replaced with the residual
427 * time for later timer_gettime calls to return.
428 * This must be called with the siglock held.
430 static void cleanup_timers(struct list_head *head,
431 cputime_t utime, cputime_t stime,
432 unsigned long long sum_exec_runtime)
434 struct cpu_timer_list *timer, *next;
435 cputime_t ptime = utime + stime;
437 list_for_each_entry_safe(timer, next, head, entry) {
438 list_del_init(&timer->entry);
439 if (timer->expires.cpu < ptime) {
440 timer->expires.cpu = 0;
441 } else {
442 timer->expires.cpu -= ptime;
446 ++head;
447 list_for_each_entry_safe(timer, next, head, entry) {
448 list_del_init(&timer->entry);
449 if (timer->expires.cpu < utime) {
450 timer->expires.cpu = 0;
451 } else {
452 timer->expires.cpu -= utime;
456 ++head;
457 list_for_each_entry_safe(timer, next, head, entry) {
458 list_del_init(&timer->entry);
459 if (timer->expires.sched < sum_exec_runtime) {
460 timer->expires.sched = 0;
461 } else {
462 timer->expires.sched -= sum_exec_runtime;
468 * These are both called with the siglock held, when the current thread
469 * is being reaped. When the final (leader) thread in the group is reaped,
470 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
472 void posix_cpu_timers_exit(struct task_struct *tsk)
474 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
475 sizeof(unsigned long long));
476 cleanup_timers(tsk->cpu_timers,
477 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
480 void posix_cpu_timers_exit_group(struct task_struct *tsk)
482 struct signal_struct *const sig = tsk->signal;
484 cleanup_timers(tsk->signal->cpu_timers,
485 tsk->utime + sig->utime, tsk->stime + sig->stime,
486 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
489 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
492 * That's all for this thread or process.
493 * We leave our residual in expires to be reported.
495 put_task_struct(timer->it.cpu.task);
496 timer->it.cpu.task = NULL;
497 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
498 timer->it.cpu.expires,
499 now);
502 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
504 return expires == 0 || expires > new_exp;
508 * Insert the timer on the appropriate list before any timers that
509 * expire later. This must be called with the tasklist_lock held
510 * for reading, interrupts disabled and p->sighand->siglock taken.
512 static void arm_timer(struct k_itimer *timer)
514 struct task_struct *p = timer->it.cpu.task;
515 struct list_head *head, *listpos;
516 struct task_cputime *cputime_expires;
517 struct cpu_timer_list *const nt = &timer->it.cpu;
518 struct cpu_timer_list *next;
520 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
521 head = p->cpu_timers;
522 cputime_expires = &p->cputime_expires;
523 } else {
524 head = p->signal->cpu_timers;
525 cputime_expires = &p->signal->cputime_expires;
527 head += CPUCLOCK_WHICH(timer->it_clock);
529 listpos = head;
530 list_for_each_entry(next, head, entry) {
531 if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
532 break;
533 listpos = &next->entry;
535 list_add(&nt->entry, listpos);
537 if (listpos == head) {
538 union cpu_time_count *exp = &nt->expires;
541 * We are the new earliest-expiring POSIX 1.b timer, hence
542 * need to update expiration cache. Take into account that
543 * for process timers we share expiration cache with itimers
544 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
547 switch (CPUCLOCK_WHICH(timer->it_clock)) {
548 case CPUCLOCK_PROF:
549 if (expires_gt(cputime_expires->prof_exp, exp->cpu))
550 cputime_expires->prof_exp = exp->cpu;
551 break;
552 case CPUCLOCK_VIRT:
553 if (expires_gt(cputime_expires->virt_exp, exp->cpu))
554 cputime_expires->virt_exp = exp->cpu;
555 break;
556 case CPUCLOCK_SCHED:
557 if (cputime_expires->sched_exp == 0 ||
558 cputime_expires->sched_exp > exp->sched)
559 cputime_expires->sched_exp = exp->sched;
560 break;
566 * The timer is locked, fire it and arrange for its reload.
568 static void cpu_timer_fire(struct k_itimer *timer)
570 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
572 * User don't want any signal.
574 timer->it.cpu.expires.sched = 0;
575 } else if (unlikely(timer->sigq == NULL)) {
577 * This a special case for clock_nanosleep,
578 * not a normal timer from sys_timer_create.
580 wake_up_process(timer->it_process);
581 timer->it.cpu.expires.sched = 0;
582 } else if (timer->it.cpu.incr.sched == 0) {
584 * One-shot timer. Clear it as soon as it's fired.
586 posix_timer_event(timer, 0);
587 timer->it.cpu.expires.sched = 0;
588 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
590 * The signal did not get queued because the signal
591 * was ignored, so we won't get any callback to
592 * reload the timer. But we need to keep it
593 * ticking in case the signal is deliverable next time.
595 posix_cpu_timer_schedule(timer);
600 * Sample a process (thread group) timer for the given group_leader task.
601 * Must be called with tasklist_lock held for reading.
603 static int cpu_timer_sample_group(const clockid_t which_clock,
604 struct task_struct *p,
605 union cpu_time_count *cpu)
607 struct task_cputime cputime;
609 thread_group_cputimer(p, &cputime);
610 switch (CPUCLOCK_WHICH(which_clock)) {
611 default:
612 return -EINVAL;
613 case CPUCLOCK_PROF:
614 cpu->cpu = cputime.utime + cputime.stime;
615 break;
616 case CPUCLOCK_VIRT:
617 cpu->cpu = cputime.utime;
618 break;
619 case CPUCLOCK_SCHED:
620 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
621 break;
623 return 0;
627 * Guts of sys_timer_settime for CPU timers.
628 * This is called with the timer locked and interrupts disabled.
629 * If we return TIMER_RETRY, it's necessary to release the timer's lock
630 * and try again. (This happens when the timer is in the middle of firing.)
632 static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
633 struct itimerspec *new, struct itimerspec *old)
635 struct task_struct *p = timer->it.cpu.task;
636 union cpu_time_count old_expires, new_expires, old_incr, val;
637 int ret;
639 if (unlikely(p == NULL)) {
641 * Timer refers to a dead task's clock.
643 return -ESRCH;
646 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
648 read_lock(&tasklist_lock);
650 * We need the tasklist_lock to protect against reaping that
651 * clears p->sighand. If p has just been reaped, we can no
652 * longer get any information about it at all.
654 if (unlikely(p->sighand == NULL)) {
655 read_unlock(&tasklist_lock);
656 put_task_struct(p);
657 timer->it.cpu.task = NULL;
658 return -ESRCH;
662 * Disarm any old timer after extracting its expiry time.
664 BUG_ON(!irqs_disabled());
666 ret = 0;
667 old_incr = timer->it.cpu.incr;
668 spin_lock(&p->sighand->siglock);
669 old_expires = timer->it.cpu.expires;
670 if (unlikely(timer->it.cpu.firing)) {
671 timer->it.cpu.firing = -1;
672 ret = TIMER_RETRY;
673 } else
674 list_del_init(&timer->it.cpu.entry);
677 * We need to sample the current value to convert the new
678 * value from to relative and absolute, and to convert the
679 * old value from absolute to relative. To set a process
680 * timer, we need a sample to balance the thread expiry
681 * times (in arm_timer). With an absolute time, we must
682 * check if it's already passed. In short, we need a sample.
684 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
685 cpu_clock_sample(timer->it_clock, p, &val);
686 } else {
687 cpu_timer_sample_group(timer->it_clock, p, &val);
690 if (old) {
691 if (old_expires.sched == 0) {
692 old->it_value.tv_sec = 0;
693 old->it_value.tv_nsec = 0;
694 } else {
696 * Update the timer in case it has
697 * overrun already. If it has,
698 * we'll report it as having overrun
699 * and with the next reloaded timer
700 * already ticking, though we are
701 * swallowing that pending
702 * notification here to install the
703 * new setting.
705 bump_cpu_timer(timer, val);
706 if (cpu_time_before(timer->it_clock, val,
707 timer->it.cpu.expires)) {
708 old_expires = cpu_time_sub(
709 timer->it_clock,
710 timer->it.cpu.expires, val);
711 sample_to_timespec(timer->it_clock,
712 old_expires,
713 &old->it_value);
714 } else {
715 old->it_value.tv_nsec = 1;
716 old->it_value.tv_sec = 0;
721 if (unlikely(ret)) {
723 * We are colliding with the timer actually firing.
724 * Punt after filling in the timer's old value, and
725 * disable this firing since we are already reporting
726 * it as an overrun (thanks to bump_cpu_timer above).
728 spin_unlock(&p->sighand->siglock);
729 read_unlock(&tasklist_lock);
730 goto out;
733 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
734 cpu_time_add(timer->it_clock, &new_expires, val);
738 * Install the new expiry time (or zero).
739 * For a timer with no notification action, we don't actually
740 * arm the timer (we'll just fake it for timer_gettime).
742 timer->it.cpu.expires = new_expires;
743 if (new_expires.sched != 0 &&
744 cpu_time_before(timer->it_clock, val, new_expires)) {
745 arm_timer(timer);
748 spin_unlock(&p->sighand->siglock);
749 read_unlock(&tasklist_lock);
752 * Install the new reload setting, and
753 * set up the signal and overrun bookkeeping.
755 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
756 &new->it_interval);
759 * This acts as a modification timestamp for the timer,
760 * so any automatic reload attempt will punt on seeing
761 * that we have reset the timer manually.
763 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
764 ~REQUEUE_PENDING;
765 timer->it_overrun_last = 0;
766 timer->it_overrun = -1;
768 if (new_expires.sched != 0 &&
769 !cpu_time_before(timer->it_clock, val, new_expires)) {
771 * The designated time already passed, so we notify
772 * immediately, even if the thread never runs to
773 * accumulate more time on this clock.
775 cpu_timer_fire(timer);
778 ret = 0;
779 out:
780 if (old) {
781 sample_to_timespec(timer->it_clock,
782 old_incr, &old->it_interval);
784 return ret;
787 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
789 union cpu_time_count now;
790 struct task_struct *p = timer->it.cpu.task;
791 int clear_dead;
794 * Easy part: convert the reload time.
796 sample_to_timespec(timer->it_clock,
797 timer->it.cpu.incr, &itp->it_interval);
799 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
800 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
801 return;
804 if (unlikely(p == NULL)) {
806 * This task already died and the timer will never fire.
807 * In this case, expires is actually the dead value.
809 dead:
810 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
811 &itp->it_value);
812 return;
816 * Sample the clock to take the difference with the expiry time.
818 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
819 cpu_clock_sample(timer->it_clock, p, &now);
820 clear_dead = p->exit_state;
821 } else {
822 read_lock(&tasklist_lock);
823 if (unlikely(p->sighand == NULL)) {
825 * The process has been reaped.
826 * We can't even collect a sample any more.
827 * Call the timer disarmed, nothing else to do.
829 put_task_struct(p);
830 timer->it.cpu.task = NULL;
831 timer->it.cpu.expires.sched = 0;
832 read_unlock(&tasklist_lock);
833 goto dead;
834 } else {
835 cpu_timer_sample_group(timer->it_clock, p, &now);
836 clear_dead = (unlikely(p->exit_state) &&
837 thread_group_empty(p));
839 read_unlock(&tasklist_lock);
842 if (unlikely(clear_dead)) {
844 * We've noticed that the thread is dead, but
845 * not yet reaped. Take this opportunity to
846 * drop our task ref.
848 clear_dead_task(timer, now);
849 goto dead;
852 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
853 sample_to_timespec(timer->it_clock,
854 cpu_time_sub(timer->it_clock,
855 timer->it.cpu.expires, now),
856 &itp->it_value);
857 } else {
859 * The timer should have expired already, but the firing
860 * hasn't taken place yet. Say it's just about to expire.
862 itp->it_value.tv_nsec = 1;
863 itp->it_value.tv_sec = 0;
868 * Check for any per-thread CPU timers that have fired and move them off
869 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
870 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
872 static void check_thread_timers(struct task_struct *tsk,
873 struct list_head *firing)
875 int maxfire;
876 struct list_head *timers = tsk->cpu_timers;
877 struct signal_struct *const sig = tsk->signal;
878 unsigned long soft;
880 maxfire = 20;
881 tsk->cputime_expires.prof_exp = 0;
882 while (!list_empty(timers)) {
883 struct cpu_timer_list *t = list_first_entry(timers,
884 struct cpu_timer_list,
885 entry);
886 if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) {
887 tsk->cputime_expires.prof_exp = t->expires.cpu;
888 break;
890 t->firing = 1;
891 list_move_tail(&t->entry, firing);
894 ++timers;
895 maxfire = 20;
896 tsk->cputime_expires.virt_exp = 0;
897 while (!list_empty(timers)) {
898 struct cpu_timer_list *t = list_first_entry(timers,
899 struct cpu_timer_list,
900 entry);
901 if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) {
902 tsk->cputime_expires.virt_exp = t->expires.cpu;
903 break;
905 t->firing = 1;
906 list_move_tail(&t->entry, firing);
909 ++timers;
910 maxfire = 20;
911 tsk->cputime_expires.sched_exp = 0;
912 while (!list_empty(timers)) {
913 struct cpu_timer_list *t = list_first_entry(timers,
914 struct cpu_timer_list,
915 entry);
916 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
917 tsk->cputime_expires.sched_exp = t->expires.sched;
918 break;
920 t->firing = 1;
921 list_move_tail(&t->entry, firing);
925 * Check for the special case thread timers.
927 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
928 if (soft != RLIM_INFINITY) {
929 unsigned long hard =
930 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
932 if (hard != RLIM_INFINITY &&
933 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
935 * At the hard limit, we just die.
936 * No need to calculate anything else now.
938 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
939 return;
941 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
943 * At the soft limit, send a SIGXCPU every second.
945 if (soft < hard) {
946 soft += USEC_PER_SEC;
947 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
949 printk(KERN_INFO
950 "RT Watchdog Timeout: %s[%d]\n",
951 tsk->comm, task_pid_nr(tsk));
952 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
957 static void stop_process_timers(struct signal_struct *sig)
959 struct thread_group_cputimer *cputimer = &sig->cputimer;
960 unsigned long flags;
962 raw_spin_lock_irqsave(&cputimer->lock, flags);
963 cputimer->running = 0;
964 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
967 static u32 onecputick;
969 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
970 cputime_t *expires, cputime_t cur_time, int signo)
972 if (!it->expires)
973 return;
975 if (cur_time >= it->expires) {
976 if (it->incr) {
977 it->expires += it->incr;
978 it->error += it->incr_error;
979 if (it->error >= onecputick) {
980 it->expires -= cputime_one_jiffy;
981 it->error -= onecputick;
983 } else {
984 it->expires = 0;
987 trace_itimer_expire(signo == SIGPROF ?
988 ITIMER_PROF : ITIMER_VIRTUAL,
989 tsk->signal->leader_pid, cur_time);
990 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
993 if (it->expires && (!*expires || it->expires < *expires)) {
994 *expires = it->expires;
999 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1001 * @cputime: The struct to compare.
1003 * Checks @cputime to see if all fields are zero. Returns true if all fields
1004 * are zero, false if any field is nonzero.
1006 static inline int task_cputime_zero(const struct task_cputime *cputime)
1008 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
1009 return 1;
1010 return 0;
1014 * Check for any per-thread CPU timers that have fired and move them
1015 * off the tsk->*_timers list onto the firing list. Per-thread timers
1016 * have already been taken off.
1018 static void check_process_timers(struct task_struct *tsk,
1019 struct list_head *firing)
1021 int maxfire;
1022 struct signal_struct *const sig = tsk->signal;
1023 cputime_t utime, ptime, virt_expires, prof_expires;
1024 unsigned long long sum_sched_runtime, sched_expires;
1025 struct list_head *timers = sig->cpu_timers;
1026 struct task_cputime cputime;
1027 unsigned long soft;
1030 * Collect the current process totals.
1032 thread_group_cputimer(tsk, &cputime);
1033 utime = cputime.utime;
1034 ptime = utime + cputime.stime;
1035 sum_sched_runtime = cputime.sum_exec_runtime;
1036 maxfire = 20;
1037 prof_expires = 0;
1038 while (!list_empty(timers)) {
1039 struct cpu_timer_list *tl = list_first_entry(timers,
1040 struct cpu_timer_list,
1041 entry);
1042 if (!--maxfire || ptime < tl->expires.cpu) {
1043 prof_expires = tl->expires.cpu;
1044 break;
1046 tl->firing = 1;
1047 list_move_tail(&tl->entry, firing);
1050 ++timers;
1051 maxfire = 20;
1052 virt_expires = 0;
1053 while (!list_empty(timers)) {
1054 struct cpu_timer_list *tl = list_first_entry(timers,
1055 struct cpu_timer_list,
1056 entry);
1057 if (!--maxfire || utime < tl->expires.cpu) {
1058 virt_expires = tl->expires.cpu;
1059 break;
1061 tl->firing = 1;
1062 list_move_tail(&tl->entry, firing);
1065 ++timers;
1066 maxfire = 20;
1067 sched_expires = 0;
1068 while (!list_empty(timers)) {
1069 struct cpu_timer_list *tl = list_first_entry(timers,
1070 struct cpu_timer_list,
1071 entry);
1072 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1073 sched_expires = tl->expires.sched;
1074 break;
1076 tl->firing = 1;
1077 list_move_tail(&tl->entry, firing);
1081 * Check for the special case process timers.
1083 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1084 SIGPROF);
1085 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1086 SIGVTALRM);
1087 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1088 if (soft != RLIM_INFINITY) {
1089 unsigned long psecs = cputime_to_secs(ptime);
1090 unsigned long hard =
1091 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1092 cputime_t x;
1093 if (psecs >= hard) {
1095 * At the hard limit, we just die.
1096 * No need to calculate anything else now.
1098 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1099 return;
1101 if (psecs >= soft) {
1103 * At the soft limit, send a SIGXCPU every second.
1105 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1106 if (soft < hard) {
1107 soft++;
1108 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1111 x = secs_to_cputime(soft);
1112 if (!prof_expires || x < prof_expires) {
1113 prof_expires = x;
1117 sig->cputime_expires.prof_exp = prof_expires;
1118 sig->cputime_expires.virt_exp = virt_expires;
1119 sig->cputime_expires.sched_exp = sched_expires;
1120 if (task_cputime_zero(&sig->cputime_expires))
1121 stop_process_timers(sig);
1125 * This is called from the signal code (via do_schedule_next_timer)
1126 * when the last timer signal was delivered and we have to reload the timer.
1128 void posix_cpu_timer_schedule(struct k_itimer *timer)
1130 struct task_struct *p = timer->it.cpu.task;
1131 union cpu_time_count now;
1133 if (unlikely(p == NULL))
1135 * The task was cleaned up already, no future firings.
1137 goto out;
1140 * Fetch the current sample and update the timer's expiry time.
1142 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1143 cpu_clock_sample(timer->it_clock, p, &now);
1144 bump_cpu_timer(timer, now);
1145 if (unlikely(p->exit_state)) {
1146 clear_dead_task(timer, now);
1147 goto out;
1149 read_lock(&tasklist_lock); /* arm_timer needs it. */
1150 spin_lock(&p->sighand->siglock);
1151 } else {
1152 read_lock(&tasklist_lock);
1153 if (unlikely(p->sighand == NULL)) {
1155 * The process has been reaped.
1156 * We can't even collect a sample any more.
1158 put_task_struct(p);
1159 timer->it.cpu.task = p = NULL;
1160 timer->it.cpu.expires.sched = 0;
1161 goto out_unlock;
1162 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1164 * We've noticed that the thread is dead, but
1165 * not yet reaped. Take this opportunity to
1166 * drop our task ref.
1168 clear_dead_task(timer, now);
1169 goto out_unlock;
1171 spin_lock(&p->sighand->siglock);
1172 cpu_timer_sample_group(timer->it_clock, p, &now);
1173 bump_cpu_timer(timer, now);
1174 /* Leave the tasklist_lock locked for the call below. */
1178 * Now re-arm for the new expiry time.
1180 BUG_ON(!irqs_disabled());
1181 arm_timer(timer);
1182 spin_unlock(&p->sighand->siglock);
1184 out_unlock:
1185 read_unlock(&tasklist_lock);
1187 out:
1188 timer->it_overrun_last = timer->it_overrun;
1189 timer->it_overrun = -1;
1190 ++timer->it_requeue_pending;
1194 * task_cputime_expired - Compare two task_cputime entities.
1196 * @sample: The task_cputime structure to be checked for expiration.
1197 * @expires: Expiration times, against which @sample will be checked.
1199 * Checks @sample against @expires to see if any field of @sample has expired.
1200 * Returns true if any field of the former is greater than the corresponding
1201 * field of the latter if the latter field is set. Otherwise returns false.
1203 static inline int task_cputime_expired(const struct task_cputime *sample,
1204 const struct task_cputime *expires)
1206 if (expires->utime && sample->utime >= expires->utime)
1207 return 1;
1208 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1209 return 1;
1210 if (expires->sum_exec_runtime != 0 &&
1211 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1212 return 1;
1213 return 0;
1217 * fastpath_timer_check - POSIX CPU timers fast path.
1219 * @tsk: The task (thread) being checked.
1221 * Check the task and thread group timers. If both are zero (there are no
1222 * timers set) return false. Otherwise snapshot the task and thread group
1223 * timers and compare them with the corresponding expiration times. Return
1224 * true if a timer has expired, else return false.
1226 static inline int fastpath_timer_check(struct task_struct *tsk)
1228 struct signal_struct *sig;
1230 if (!task_cputime_zero(&tsk->cputime_expires)) {
1231 struct task_cputime task_sample = {
1232 .utime = tsk->utime,
1233 .stime = tsk->stime,
1234 .sum_exec_runtime = tsk->se.sum_exec_runtime
1237 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1238 return 1;
1241 sig = tsk->signal;
1242 if (sig->cputimer.running) {
1243 struct task_cputime group_sample;
1245 raw_spin_lock(&sig->cputimer.lock);
1246 group_sample = sig->cputimer.cputime;
1247 raw_spin_unlock(&sig->cputimer.lock);
1249 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1250 return 1;
1253 return 0;
1257 * This is called from the timer interrupt handler. The irq handler has
1258 * already updated our counts. We need to check if any timers fire now.
1259 * Interrupts are disabled.
1261 void run_posix_cpu_timers(struct task_struct *tsk)
1263 LIST_HEAD(firing);
1264 struct k_itimer *timer, *next;
1265 unsigned long flags;
1267 BUG_ON(!irqs_disabled());
1270 * The fast path checks that there are no expired thread or thread
1271 * group timers. If that's so, just return.
1273 if (!fastpath_timer_check(tsk))
1274 return;
1276 if (!lock_task_sighand(tsk, &flags))
1277 return;
1279 * Here we take off tsk->signal->cpu_timers[N] and
1280 * tsk->cpu_timers[N] all the timers that are firing, and
1281 * put them on the firing list.
1283 check_thread_timers(tsk, &firing);
1285 * If there are any active process wide timers (POSIX 1.b, itimers,
1286 * RLIMIT_CPU) cputimer must be running.
1288 if (tsk->signal->cputimer.running)
1289 check_process_timers(tsk, &firing);
1292 * We must release these locks before taking any timer's lock.
1293 * There is a potential race with timer deletion here, as the
1294 * siglock now protects our private firing list. We have set
1295 * the firing flag in each timer, so that a deletion attempt
1296 * that gets the timer lock before we do will give it up and
1297 * spin until we've taken care of that timer below.
1299 unlock_task_sighand(tsk, &flags);
1302 * Now that all the timers on our list have the firing flag,
1303 * no one will touch their list entries but us. We'll take
1304 * each timer's lock before clearing its firing flag, so no
1305 * timer call will interfere.
1307 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1308 int cpu_firing;
1310 spin_lock(&timer->it_lock);
1311 list_del_init(&timer->it.cpu.entry);
1312 cpu_firing = timer->it.cpu.firing;
1313 timer->it.cpu.firing = 0;
1315 * The firing flag is -1 if we collided with a reset
1316 * of the timer, which already reported this
1317 * almost-firing as an overrun. So don't generate an event.
1319 if (likely(cpu_firing >= 0))
1320 cpu_timer_fire(timer);
1321 spin_unlock(&timer->it_lock);
1326 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1327 * The tsk->sighand->siglock must be held by the caller.
1329 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1330 cputime_t *newval, cputime_t *oldval)
1332 union cpu_time_count now;
1334 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1335 cpu_timer_sample_group(clock_idx, tsk, &now);
1337 if (oldval) {
1339 * We are setting itimer. The *oldval is absolute and we update
1340 * it to be relative, *newval argument is relative and we update
1341 * it to be absolute.
1343 if (*oldval) {
1344 if (*oldval <= now.cpu) {
1345 /* Just about to fire. */
1346 *oldval = cputime_one_jiffy;
1347 } else {
1348 *oldval -= now.cpu;
1352 if (!*newval)
1353 return;
1354 *newval += now.cpu;
1358 * Update expiration cache if we are the earliest timer, or eventually
1359 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1361 switch (clock_idx) {
1362 case CPUCLOCK_PROF:
1363 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1364 tsk->signal->cputime_expires.prof_exp = *newval;
1365 break;
1366 case CPUCLOCK_VIRT:
1367 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1368 tsk->signal->cputime_expires.virt_exp = *newval;
1369 break;
1373 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1374 struct timespec *rqtp, struct itimerspec *it)
1376 struct k_itimer timer;
1377 int error;
1380 * Set up a temporary timer and then wait for it to go off.
1382 memset(&timer, 0, sizeof timer);
1383 spin_lock_init(&timer.it_lock);
1384 timer.it_clock = which_clock;
1385 timer.it_overrun = -1;
1386 error = posix_cpu_timer_create(&timer);
1387 timer.it_process = current;
1388 if (!error) {
1389 static struct itimerspec zero_it;
1391 memset(it, 0, sizeof *it);
1392 it->it_value = *rqtp;
1394 spin_lock_irq(&timer.it_lock);
1395 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1396 if (error) {
1397 spin_unlock_irq(&timer.it_lock);
1398 return error;
1401 while (!signal_pending(current)) {
1402 if (timer.it.cpu.expires.sched == 0) {
1404 * Our timer fired and was reset.
1406 spin_unlock_irq(&timer.it_lock);
1407 return 0;
1411 * Block until cpu_timer_fire (or a signal) wakes us.
1413 __set_current_state(TASK_INTERRUPTIBLE);
1414 spin_unlock_irq(&timer.it_lock);
1415 schedule();
1416 spin_lock_irq(&timer.it_lock);
1420 * We were interrupted by a signal.
1422 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1423 posix_cpu_timer_set(&timer, 0, &zero_it, it);
1424 spin_unlock_irq(&timer.it_lock);
1426 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1428 * It actually did fire already.
1430 return 0;
1433 error = -ERESTART_RESTARTBLOCK;
1436 return error;
1439 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1441 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1442 struct timespec *rqtp, struct timespec __user *rmtp)
1444 struct restart_block *restart_block =
1445 &current_thread_info()->restart_block;
1446 struct itimerspec it;
1447 int error;
1450 * Diagnose required errors first.
1452 if (CPUCLOCK_PERTHREAD(which_clock) &&
1453 (CPUCLOCK_PID(which_clock) == 0 ||
1454 CPUCLOCK_PID(which_clock) == current->pid))
1455 return -EINVAL;
1457 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1459 if (error == -ERESTART_RESTARTBLOCK) {
1461 if (flags & TIMER_ABSTIME)
1462 return -ERESTARTNOHAND;
1464 * Report back to the user the time still remaining.
1466 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1467 return -EFAULT;
1469 restart_block->fn = posix_cpu_nsleep_restart;
1470 restart_block->nanosleep.clockid = which_clock;
1471 restart_block->nanosleep.rmtp = rmtp;
1472 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1474 return error;
1477 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1479 clockid_t which_clock = restart_block->nanosleep.clockid;
1480 struct timespec t;
1481 struct itimerspec it;
1482 int error;
1484 t = ns_to_timespec(restart_block->nanosleep.expires);
1486 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1488 if (error == -ERESTART_RESTARTBLOCK) {
1489 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1491 * Report back to the user the time still remaining.
1493 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1494 return -EFAULT;
1496 restart_block->nanosleep.expires = timespec_to_ns(&t);
1498 return error;
1502 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1503 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1505 static int process_cpu_clock_getres(const clockid_t which_clock,
1506 struct timespec *tp)
1508 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1510 static int process_cpu_clock_get(const clockid_t which_clock,
1511 struct timespec *tp)
1513 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1515 static int process_cpu_timer_create(struct k_itimer *timer)
1517 timer->it_clock = PROCESS_CLOCK;
1518 return posix_cpu_timer_create(timer);
1520 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1521 struct timespec *rqtp,
1522 struct timespec __user *rmtp)
1524 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1526 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1528 return -EINVAL;
1530 static int thread_cpu_clock_getres(const clockid_t which_clock,
1531 struct timespec *tp)
1533 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1535 static int thread_cpu_clock_get(const clockid_t which_clock,
1536 struct timespec *tp)
1538 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1540 static int thread_cpu_timer_create(struct k_itimer *timer)
1542 timer->it_clock = THREAD_CLOCK;
1543 return posix_cpu_timer_create(timer);
1546 struct k_clock clock_posix_cpu = {
1547 .clock_getres = posix_cpu_clock_getres,
1548 .clock_set = posix_cpu_clock_set,
1549 .clock_get = posix_cpu_clock_get,
1550 .timer_create = posix_cpu_timer_create,
1551 .nsleep = posix_cpu_nsleep,
1552 .nsleep_restart = posix_cpu_nsleep_restart,
1553 .timer_set = posix_cpu_timer_set,
1554 .timer_del = posix_cpu_timer_del,
1555 .timer_get = posix_cpu_timer_get,
1558 static __init int init_posix_cpu_timers(void)
1560 struct k_clock process = {
1561 .clock_getres = process_cpu_clock_getres,
1562 .clock_get = process_cpu_clock_get,
1563 .timer_create = process_cpu_timer_create,
1564 .nsleep = process_cpu_nsleep,
1565 .nsleep_restart = process_cpu_nsleep_restart,
1567 struct k_clock thread = {
1568 .clock_getres = thread_cpu_clock_getres,
1569 .clock_get = thread_cpu_clock_get,
1570 .timer_create = thread_cpu_timer_create,
1572 struct timespec ts;
1574 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1575 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1577 cputime_to_timespec(cputime_one_jiffy, &ts);
1578 onecputick = ts.tv_nsec;
1579 WARN_ON(ts.tv_sec != 0);
1581 return 0;
1583 __initcall(init_posix_cpu_timers);