MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / kernel / timer.c
blob15182647509d062fc21b4e09fdeea6080e58c6f8
1 /*
2 * linux/kernel/timer.c
4 * Kernel internal timers, kernel timekeeping, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/cpu.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/div64.h>
38 #include <asm/timex.h>
39 #include <asm/io.h>
41 #ifdef CONFIG_TIME_INTERPOLATION
42 static void time_interpolator_update(long delta_nsec);
43 #else
44 #define time_interpolator_update(x)
45 #endif
48 * per-CPU timer vector definitions:
50 #define TVN_BITS 6
51 #define TVR_BITS 8
52 #define TVN_SIZE (1 << TVN_BITS)
53 #define TVR_SIZE (1 << TVR_BITS)
54 #define TVN_MASK (TVN_SIZE - 1)
55 #define TVR_MASK (TVR_SIZE - 1)
57 typedef struct tvec_s {
58 struct list_head vec[TVN_SIZE];
59 } tvec_t;
61 typedef struct tvec_root_s {
62 struct list_head vec[TVR_SIZE];
63 } tvec_root_t;
65 struct tvec_t_base_s {
66 spinlock_t lock;
67 unsigned long timer_jiffies;
68 struct timer_list *running_timer;
69 tvec_root_t tv1;
70 tvec_t tv2;
71 tvec_t tv3;
72 tvec_t tv4;
73 tvec_t tv5;
74 } ____cacheline_aligned_in_smp;
76 typedef struct tvec_t_base_s tvec_base_t;
78 static inline void set_running_timer(tvec_base_t *base,
79 struct timer_list *timer)
81 #ifdef CONFIG_SMP
82 base->running_timer = timer;
83 #endif
86 /* Fake initialization */
87 static DEFINE_PER_CPU(tvec_base_t, tvec_bases) = { SPIN_LOCK_UNLOCKED };
89 static void check_timer_failed(struct timer_list *timer)
91 static int whine_count;
92 if (whine_count < 16) {
93 whine_count++;
94 printk("Uninitialised timer!\n");
95 printk("This is just a warning. Your computer is OK\n");
96 printk("function=0x%p, data=0x%lx\n",
97 timer->function, timer->data);
98 dump_stack();
101 * Now fix it up
103 spin_lock_init(&timer->lock);
104 timer->magic = TIMER_MAGIC;
107 static inline void check_timer(struct timer_list *timer)
109 if (timer->magic != TIMER_MAGIC)
110 check_timer_failed(timer);
114 static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
116 unsigned long expires = timer->expires;
117 unsigned long idx = expires - base->timer_jiffies;
118 struct list_head *vec;
120 if (idx < TVR_SIZE) {
121 int i = expires & TVR_MASK;
122 vec = base->tv1.vec + i;
123 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
124 int i = (expires >> TVR_BITS) & TVN_MASK;
125 vec = base->tv2.vec + i;
126 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
127 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
128 vec = base->tv3.vec + i;
129 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
130 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
131 vec = base->tv4.vec + i;
132 } else if ((signed long) idx < 0) {
134 * Can happen if you add a timer with expires == jiffies,
135 * or you set a timer to go off in the past
137 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
138 } else {
139 int i;
140 /* If the timeout is larger than 0xffffffff on 64-bit
141 * architectures then we use the maximum timeout:
143 if (idx > 0xffffffffUL) {
144 idx = 0xffffffffUL;
145 expires = idx + base->timer_jiffies;
147 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
148 vec = base->tv5.vec + i;
151 * Timers are FIFO:
153 list_add_tail(&timer->entry, vec);
156 int __mod_timer(struct timer_list *timer, unsigned long expires)
158 tvec_base_t *old_base, *new_base;
159 unsigned long flags;
160 int ret = 0;
162 BUG_ON(!timer->function);
164 check_timer(timer);
166 spin_lock_irqsave(&timer->lock, flags);
167 new_base = &__get_cpu_var(tvec_bases);
168 repeat:
169 old_base = timer->base;
172 * Prevent deadlocks via ordering by old_base < new_base.
174 if (old_base && (new_base != old_base)) {
175 if (old_base < new_base) {
176 spin_lock(&new_base->lock);
177 spin_lock(&old_base->lock);
178 } else {
179 spin_lock(&old_base->lock);
180 spin_lock(&new_base->lock);
183 * The timer base might have been cancelled while we were
184 * trying to take the lock(s):
186 if (timer->base != old_base) {
187 spin_unlock(&new_base->lock);
188 spin_unlock(&old_base->lock);
189 goto repeat;
191 } else {
192 spin_lock(&new_base->lock);
193 if (timer->base != old_base) {
194 spin_unlock(&new_base->lock);
195 goto repeat;
200 * Delete the previous timeout (if there was any), and install
201 * the new one:
203 if (old_base) {
204 list_del(&timer->entry);
205 ret = 1;
207 timer->expires = expires;
208 internal_add_timer(new_base, timer);
209 timer->base = new_base;
211 if (old_base && (new_base != old_base))
212 spin_unlock(&old_base->lock);
213 spin_unlock(&new_base->lock);
214 spin_unlock_irqrestore(&timer->lock, flags);
216 return ret;
219 EXPORT_SYMBOL(__mod_timer);
221 /***
222 * add_timer_on - start a timer on a particular CPU
223 * @timer: the timer to be added
224 * @cpu: the CPU to start it on
226 * This is not very scalable on SMP. Double adds are not possible.
228 void add_timer_on(struct timer_list *timer, int cpu)
230 tvec_base_t *base = &per_cpu(tvec_bases, cpu);
231 unsigned long flags;
233 BUG_ON(timer_pending(timer) || !timer->function);
235 check_timer(timer);
237 spin_lock_irqsave(&base->lock, flags);
238 internal_add_timer(base, timer);
239 timer->base = base;
240 spin_unlock_irqrestore(&base->lock, flags);
243 EXPORT_SYMBOL(add_timer_on);
245 /***
246 * mod_timer - modify a timer's timeout
247 * @timer: the timer to be modified
249 * mod_timer is a more efficient way to update the expire field of an
250 * active timer (if the timer is inactive it will be activated)
252 * mod_timer(timer, expires) is equivalent to:
254 * del_timer(timer); timer->expires = expires; add_timer(timer);
256 * Note that if there are multiple unserialized concurrent users of the
257 * same timer, then mod_timer() is the only safe way to modify the timeout,
258 * since add_timer() cannot modify an already running timer.
260 * The function returns whether it has modified a pending timer or not.
261 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
262 * active timer returns 1.)
264 int mod_timer(struct timer_list *timer, unsigned long expires)
266 BUG_ON(!timer->function);
268 check_timer(timer);
271 * This is a common optimization triggered by the
272 * networking code - if the timer is re-modified
273 * to be the same thing then just return:
275 if (timer->expires == expires && timer_pending(timer))
276 return 1;
278 return __mod_timer(timer, expires);
281 EXPORT_SYMBOL(mod_timer);
283 /***
284 * del_timer - deactive a timer.
285 * @timer: the timer to be deactivated
287 * del_timer() deactivates a timer - this works on both active and inactive
288 * timers.
290 * The function returns whether it has deactivated a pending timer or not.
291 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
292 * active timer returns 1.)
294 int del_timer(struct timer_list *timer)
296 unsigned long flags;
297 tvec_base_t *base;
299 check_timer(timer);
301 repeat:
302 base = timer->base;
303 if (!base)
304 return 0;
305 spin_lock_irqsave(&base->lock, flags);
306 if (base != timer->base) {
307 spin_unlock_irqrestore(&base->lock, flags);
308 goto repeat;
310 list_del(&timer->entry);
311 timer->base = NULL;
312 spin_unlock_irqrestore(&base->lock, flags);
314 return 1;
317 EXPORT_SYMBOL(del_timer);
319 #ifdef CONFIG_SMP
320 /***
321 * del_timer_sync - deactivate a timer and wait for the handler to finish.
322 * @timer: the timer to be deactivated
324 * This function only differs from del_timer() on SMP: besides deactivating
325 * the timer it also makes sure the handler has finished executing on other
326 * CPUs.
328 * Synchronization rules: callers must prevent restarting of the timer,
329 * otherwise this function is meaningless. It must not be called from
330 * interrupt contexts. The caller must not hold locks which would prevent
331 * completion of the timer's handler. Upon exit the timer is not queued and
332 * the handler is not running on any CPU.
334 * The function returns whether it has deactivated a pending timer or not.
336 * del_timer_sync() is slow and complicated because it copes with timer
337 * handlers which re-arm the timer (periodic timers). If the timer handler
338 * is known to not do this (a single shot timer) then use
339 * del_singleshot_timer_sync() instead.
341 int del_timer_sync(struct timer_list *timer)
343 tvec_base_t *base;
344 int i, ret = 0;
346 check_timer(timer);
348 del_again:
349 ret += del_timer(timer);
351 for_each_online_cpu(i) {
352 base = &per_cpu(tvec_bases, i);
353 if (base->running_timer == timer) {
354 while (base->running_timer == timer) {
355 cpu_relax();
356 preempt_check_resched();
358 break;
361 smp_rmb();
362 if (timer_pending(timer))
363 goto del_again;
365 return ret;
367 EXPORT_SYMBOL(del_timer_sync);
369 /***
370 * del_singleshot_timer_sync - deactivate a non-recursive timer
371 * @timer: the timer to be deactivated
373 * This function is an optimization of del_timer_sync for the case where the
374 * caller can guarantee the timer does not reschedule itself in its timer
375 * function.
377 * Synchronization rules: callers must prevent restarting of the timer,
378 * otherwise this function is meaningless. It must not be called from
379 * interrupt contexts. The caller must not hold locks which wold prevent
380 * completion of the timer's handler. Upon exit the timer is not queued and
381 * the handler is not running on any CPU.
383 * The function returns whether it has deactivated a pending timer or not.
385 int del_singleshot_timer_sync(struct timer_list *timer)
387 int ret = del_timer(timer);
389 if (!ret) {
390 ret = del_timer_sync(timer);
391 BUG_ON(ret);
394 return ret;
396 EXPORT_SYMBOL(del_singleshot_timer_sync);
397 #endif
399 static int cascade(tvec_base_t *base, tvec_t *tv, int index)
401 /* cascade all the timers from tv up one level */
402 struct list_head *head, *curr;
404 head = tv->vec + index;
405 curr = head->next;
407 * We are removing _all_ timers from the list, so we don't have to
408 * detach them individually, just clear the list afterwards.
410 while (curr != head) {
411 struct timer_list *tmp;
413 tmp = list_entry(curr, struct timer_list, entry);
414 BUG_ON(tmp->base != base);
415 curr = curr->next;
416 internal_add_timer(base, tmp);
418 INIT_LIST_HEAD(head);
420 return index;
423 /***
424 * __run_timers - run all expired timers (if any) on this CPU.
425 * @base: the timer vector to be processed.
427 * This function cascades all vectors and executes all expired timer
428 * vectors.
430 #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
432 static inline void __run_timers(tvec_base_t *base)
434 struct timer_list *timer;
436 spin_lock_irq(&base->lock);
437 while (time_after_eq(jiffies, base->timer_jiffies)) {
438 struct list_head work_list = LIST_HEAD_INIT(work_list);
439 struct list_head *head = &work_list;
440 int index = base->timer_jiffies & TVR_MASK;
443 * Cascade timers:
445 if (!index &&
446 (!cascade(base, &base->tv2, INDEX(0))) &&
447 (!cascade(base, &base->tv3, INDEX(1))) &&
448 !cascade(base, &base->tv4, INDEX(2)))
449 cascade(base, &base->tv5, INDEX(3));
450 ++base->timer_jiffies;
451 list_splice_init(base->tv1.vec + index, &work_list);
452 repeat:
453 if (!list_empty(head)) {
454 void (*fn)(unsigned long);
455 unsigned long data;
457 timer = list_entry(head->next,struct timer_list,entry);
458 fn = timer->function;
459 data = timer->data;
461 list_del(&timer->entry);
462 set_running_timer(base, timer);
463 smp_wmb();
464 timer->base = NULL;
465 spin_unlock_irq(&base->lock);
466 fn(data);
467 spin_lock_irq(&base->lock);
468 goto repeat;
471 set_running_timer(base, NULL);
472 spin_unlock_irq(&base->lock);
475 #ifdef CONFIG_NO_IDLE_HZ
477 * Find out when the next timer event is due to happen. This
478 * is used on S/390 to stop all activity when a cpus is idle.
479 * This functions needs to be called disabled.
481 unsigned long next_timer_interrupt(void)
483 tvec_base_t *base;
484 struct list_head *list;
485 struct timer_list *nte;
486 unsigned long expires;
487 tvec_t *varray[4];
488 int i, j;
490 base = &__get_cpu_var(tvec_bases);
491 spin_lock(&base->lock);
492 expires = base->timer_jiffies + (LONG_MAX >> 1);
493 list = 0;
495 /* Look for timer events in tv1. */
496 j = base->timer_jiffies & TVR_MASK;
497 do {
498 list_for_each_entry(nte, base->tv1.vec + j, entry) {
499 expires = nte->expires;
500 if (j < (base->timer_jiffies & TVR_MASK))
501 list = base->tv2.vec + (INDEX(0));
502 goto found;
504 j = (j + 1) & TVR_MASK;
505 } while (j != (base->timer_jiffies & TVR_MASK));
507 /* Check tv2-tv5. */
508 varray[0] = &base->tv2;
509 varray[1] = &base->tv3;
510 varray[2] = &base->tv4;
511 varray[3] = &base->tv5;
512 for (i = 0; i < 4; i++) {
513 j = INDEX(i);
514 do {
515 if (list_empty(varray[i]->vec + j)) {
516 j = (j + 1) & TVN_MASK;
517 continue;
519 list_for_each_entry(nte, varray[i]->vec + j, entry)
520 if (time_before(nte->expires, expires))
521 expires = nte->expires;
522 if (j < (INDEX(i)) && i < 3)
523 list = varray[i + 1]->vec + (INDEX(i + 1));
524 goto found;
525 } while (j != (INDEX(i)));
527 found:
528 if (list) {
530 * The search wrapped. We need to look at the next list
531 * from next tv element that would cascade into tv element
532 * where we found the timer element.
534 list_for_each_entry(nte, list, entry) {
535 if (time_before(nte->expires, expires))
536 expires = nte->expires;
539 spin_unlock(&base->lock);
540 return expires;
542 #endif
544 /******************************************************************/
547 * Timekeeping variables
549 unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
550 unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */
553 * The current time
554 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
555 * for sub jiffie times) to get to monotonic time. Monotonic is pegged at zero
556 * at zero at system boot time, so wall_to_monotonic will be negative,
557 * however, we will ALWAYS keep the tv_nsec part positive so we can use
558 * the usual normalization.
560 struct timespec xtime __attribute__ ((aligned (16)));
561 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
563 EXPORT_SYMBOL(xtime);
565 /* Don't completely fail for HZ > 500. */
566 int tickadj = 500/HZ ? : 1; /* microsecs */
570 * phase-lock loop variables
572 /* TIME_ERROR prevents overwriting the CMOS clock */
573 int time_state = TIME_OK; /* clock synchronization status */
574 int time_status = STA_UNSYNC; /* clock status bits */
575 long time_offset; /* time adjustment (us) */
576 long time_constant = 2; /* pll time constant */
577 long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
578 long time_precision = 1; /* clock precision (us) */
579 long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
580 long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
581 long time_phase; /* phase offset (scaled us) */
582 long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
583 /* frequency offset (scaled ppm)*/
584 long time_adj; /* tick adjust (scaled 1 / HZ) */
585 long time_reftime; /* time at last adjustment (s) */
586 long time_adjust;
587 long time_next_adjust;
590 * this routine handles the overflow of the microsecond field
592 * The tricky bits of code to handle the accurate clock support
593 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
594 * They were originally developed for SUN and DEC kernels.
595 * All the kudos should go to Dave for this stuff.
598 static void second_overflow(void)
600 long ltemp;
602 /* Bump the maxerror field */
603 time_maxerror += time_tolerance >> SHIFT_USEC;
604 if ( time_maxerror > NTP_PHASE_LIMIT ) {
605 time_maxerror = NTP_PHASE_LIMIT;
606 time_status |= STA_UNSYNC;
610 * Leap second processing. If in leap-insert state at
611 * the end of the day, the system clock is set back one
612 * second; if in leap-delete state, the system clock is
613 * set ahead one second. The microtime() routine or
614 * external clock driver will insure that reported time
615 * is always monotonic. The ugly divides should be
616 * replaced.
618 switch (time_state) {
620 case TIME_OK:
621 if (time_status & STA_INS)
622 time_state = TIME_INS;
623 else if (time_status & STA_DEL)
624 time_state = TIME_DEL;
625 break;
627 case TIME_INS:
628 if (xtime.tv_sec % 86400 == 0) {
629 xtime.tv_sec--;
630 wall_to_monotonic.tv_sec++;
631 /* The timer interpolator will make time change gradually instead
632 * of an immediate jump by one second.
634 time_interpolator_update(-NSEC_PER_SEC);
635 time_state = TIME_OOP;
636 clock_was_set();
637 printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
639 break;
641 case TIME_DEL:
642 if ((xtime.tv_sec + 1) % 86400 == 0) {
643 xtime.tv_sec++;
644 wall_to_monotonic.tv_sec--;
645 /* Use of time interpolator for a gradual change of time */
646 time_interpolator_update(NSEC_PER_SEC);
647 time_state = TIME_WAIT;
648 clock_was_set();
649 printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
651 break;
653 case TIME_OOP:
654 time_state = TIME_WAIT;
655 break;
657 case TIME_WAIT:
658 if (!(time_status & (STA_INS | STA_DEL)))
659 time_state = TIME_OK;
663 * Compute the phase adjustment for the next second. In
664 * PLL mode, the offset is reduced by a fixed factor
665 * times the time constant. In FLL mode the offset is
666 * used directly. In either mode, the maximum phase
667 * adjustment for each second is clamped so as to spread
668 * the adjustment over not more than the number of
669 * seconds between updates.
671 if (time_offset < 0) {
672 ltemp = -time_offset;
673 if (!(time_status & STA_FLL))
674 ltemp >>= SHIFT_KG + time_constant;
675 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
676 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
677 time_offset += ltemp;
678 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
679 } else {
680 ltemp = time_offset;
681 if (!(time_status & STA_FLL))
682 ltemp >>= SHIFT_KG + time_constant;
683 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
684 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
685 time_offset -= ltemp;
686 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
690 * Compute the frequency estimate and additional phase
691 * adjustment due to frequency error for the next
692 * second. When the PPS signal is engaged, gnaw on the
693 * watchdog counter and update the frequency computed by
694 * the pll and the PPS signal.
696 pps_valid++;
697 if (pps_valid == PPS_VALID) { /* PPS signal lost */
698 pps_jitter = MAXTIME;
699 pps_stabil = MAXFREQ;
700 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
701 STA_PPSWANDER | STA_PPSERROR);
703 ltemp = time_freq + pps_freq;
704 if (ltemp < 0)
705 time_adj -= -ltemp >>
706 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
707 else
708 time_adj += ltemp >>
709 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
711 #if HZ == 100
712 /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
713 * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
715 if (time_adj < 0)
716 time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
717 else
718 time_adj += (time_adj >> 2) + (time_adj >> 5);
719 #endif
720 #if HZ == 1000
721 /* Compensate for (HZ==1000) != (1 << SHIFT_HZ).
722 * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
724 if (time_adj < 0)
725 time_adj -= (-time_adj >> 6) + (-time_adj >> 7);
726 else
727 time_adj += (time_adj >> 6) + (time_adj >> 7);
728 #endif
731 /* in the NTP reference this is called "hardclock()" */
732 static void update_wall_time_one_tick(void)
734 long time_adjust_step, delta_nsec;
736 if ( (time_adjust_step = time_adjust) != 0 ) {
737 /* We are doing an adjtime thing.
739 * Prepare time_adjust_step to be within bounds.
740 * Note that a positive time_adjust means we want the clock
741 * to run faster.
743 * Limit the amount of the step to be in the range
744 * -tickadj .. +tickadj
746 if (time_adjust > tickadj)
747 time_adjust_step = tickadj;
748 else if (time_adjust < -tickadj)
749 time_adjust_step = -tickadj;
751 /* Reduce by this step the amount of time left */
752 time_adjust -= time_adjust_step;
754 delta_nsec = tick_nsec + time_adjust_step * 1000;
756 * Advance the phase, once it gets to one microsecond, then
757 * advance the tick more.
759 time_phase += time_adj;
760 if (time_phase <= -FINENSEC) {
761 long ltemp = -time_phase >> (SHIFT_SCALE - 10);
762 time_phase += ltemp << (SHIFT_SCALE - 10);
763 delta_nsec -= ltemp;
765 else if (time_phase >= FINENSEC) {
766 long ltemp = time_phase >> (SHIFT_SCALE - 10);
767 time_phase -= ltemp << (SHIFT_SCALE - 10);
768 delta_nsec += ltemp;
770 xtime.tv_nsec += delta_nsec;
771 time_interpolator_update(delta_nsec);
773 /* Changes by adjtime() do not take effect till next tick. */
774 if (time_next_adjust != 0) {
775 time_adjust = time_next_adjust;
776 time_next_adjust = 0;
781 * Using a loop looks inefficient, but "ticks" is
782 * usually just one (we shouldn't be losing ticks,
783 * we're doing this this way mainly for interrupt
784 * latency reasons, not because we think we'll
785 * have lots of lost timer ticks
787 static void update_wall_time(unsigned long ticks)
789 do {
790 ticks--;
791 update_wall_time_one_tick();
792 } while (ticks);
794 if (xtime.tv_nsec >= 1000000000) {
795 xtime.tv_nsec -= 1000000000;
796 xtime.tv_sec++;
797 second_overflow();
801 static inline void do_process_times(struct task_struct *p,
802 unsigned long user, unsigned long system)
804 unsigned long psecs;
806 psecs = (p->utime += user);
807 psecs += (p->stime += system);
808 if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_cur) {
809 /* Send SIGXCPU every second.. */
810 if (!(psecs % HZ))
811 send_sig(SIGXCPU, p, 1);
812 /* and SIGKILL when we go over max.. */
813 if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_max)
814 send_sig(SIGKILL, p, 1);
818 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
820 unsigned long it_virt = p->it_virt_value;
822 if (it_virt) {
823 it_virt -= ticks;
824 if (!it_virt) {
825 it_virt = p->it_virt_incr;
826 send_sig(SIGVTALRM, p, 1);
828 p->it_virt_value = it_virt;
832 static inline void do_it_prof(struct task_struct *p)
834 unsigned long it_prof = p->it_prof_value;
836 if (it_prof) {
837 if (--it_prof == 0) {
838 it_prof = p->it_prof_incr;
839 send_sig(SIGPROF, p, 1);
841 p->it_prof_value = it_prof;
845 static void update_one_process(struct task_struct *p, unsigned long user,
846 unsigned long system, int cpu)
848 do_process_times(p, user, system);
849 do_it_virt(p, user);
850 do_it_prof(p);
854 * Called from the timer interrupt handler to charge one tick to the current
855 * process. user_tick is 1 if the tick is user time, 0 for system.
857 void update_process_times(int user_tick)
859 struct task_struct *p = current;
860 int cpu = smp_processor_id(), system = user_tick ^ 1;
862 update_one_process(p, user_tick, system, cpu);
863 run_local_timers();
864 scheduler_tick(user_tick, system);
868 * Nr of active tasks - counted in fixed-point numbers
870 static unsigned long count_active_tasks(void)
872 return (nr_running() + nr_uninterruptible()) * FIXED_1;
876 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
877 * imply that avenrun[] is the standard name for this kind of thing.
878 * Nothing else seems to be standardized: the fractional size etc
879 * all seem to differ on different machines.
881 * Requires xtime_lock to access.
883 unsigned long avenrun[3];
886 * calc_load - given tick count, update the avenrun load estimates.
887 * This is called while holding a write_lock on xtime_lock.
889 static inline void calc_load(unsigned long ticks)
891 unsigned long active_tasks; /* fixed-point */
892 static int count = LOAD_FREQ;
894 count -= ticks;
895 if (count < 0) {
896 count += LOAD_FREQ;
897 active_tasks = count_active_tasks();
898 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
899 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
900 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
904 /* jiffies at the most recent update of wall time */
905 unsigned long wall_jiffies = INITIAL_JIFFIES;
908 * This read-write spinlock protects us from races in SMP while
909 * playing with xtime and avenrun.
911 #ifndef ARCH_HAVE_XTIME_LOCK
912 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
914 EXPORT_SYMBOL(xtime_lock);
915 #endif
918 * This function runs timers and the timer-tq in bottom half context.
920 static void run_timer_softirq(struct softirq_action *h)
922 tvec_base_t *base = &__get_cpu_var(tvec_bases);
924 if (time_after_eq(jiffies, base->timer_jiffies))
925 __run_timers(base);
929 * Called by the local, per-CPU timer interrupt on SMP.
931 void run_local_timers(void)
933 raise_softirq(TIMER_SOFTIRQ);
937 * Called by the timer interrupt. xtime_lock must already be taken
938 * by the timer IRQ!
940 static inline void update_times(void)
942 unsigned long ticks;
944 ticks = jiffies - wall_jiffies;
945 if (ticks) {
946 wall_jiffies += ticks;
947 update_wall_time(ticks);
949 calc_load(ticks);
953 * The 64-bit jiffies value is not atomic - you MUST NOT read it
954 * without sampling the sequence number in xtime_lock.
955 * jiffies is defined in the linker script...
958 void do_timer(struct pt_regs *regs)
960 jiffies_64++;
961 #ifndef CONFIG_SMP
962 /* SMP process accounting uses the local APIC timer */
964 update_process_times(user_mode(regs));
965 #endif
966 update_times();
969 #ifdef __ARCH_WANT_SYS_ALARM
972 * For backwards compatibility? This can be done in libc so Alpha
973 * and all newer ports shouldn't need it.
975 asmlinkage unsigned long sys_alarm(unsigned int seconds)
977 struct itimerval it_new, it_old;
978 unsigned int oldalarm;
980 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
981 it_new.it_value.tv_sec = seconds;
982 it_new.it_value.tv_usec = 0;
983 do_setitimer(ITIMER_REAL, &it_new, &it_old);
984 oldalarm = it_old.it_value.tv_sec;
985 /* ehhh.. We can't return 0 if we have an alarm pending.. */
986 /* And we'd better return too much than too little anyway */
987 if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000)
988 oldalarm++;
989 return oldalarm;
992 #endif
994 #ifndef __alpha__
997 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
998 * should be moved into arch/i386 instead?
1002 * sys_getpid - return the thread group id of the current process
1004 * Note, despite the name, this returns the tgid not the pid. The tgid and
1005 * the pid are identical unless CLONE_THREAD was specified on clone() in
1006 * which case the tgid is the same in all threads of the same group.
1008 * This is SMP safe as current->tgid does not change.
1010 asmlinkage long sys_getpid(void)
1012 return current->tgid;
1016 * Accessing ->group_leader->real_parent is not SMP-safe, it could
1017 * change from under us. However, rather than getting any lock
1018 * we can use an optimistic algorithm: get the parent
1019 * pid, and go back and check that the parent is still
1020 * the same. If it has changed (which is extremely unlikely
1021 * indeed), we just try again..
1023 * NOTE! This depends on the fact that even if we _do_
1024 * get an old value of "parent", we can happily dereference
1025 * the pointer (it was and remains a dereferencable kernel pointer
1026 * no matter what): we just can't necessarily trust the result
1027 * until we know that the parent pointer is valid.
1029 * NOTE2: ->group_leader never changes from under us.
1031 asmlinkage long sys_getppid(void)
1033 int pid;
1034 struct task_struct *me = current;
1035 struct task_struct *parent;
1037 parent = me->group_leader->real_parent;
1038 for (;;) {
1039 pid = parent->tgid;
1040 #ifdef CONFIG_SMP
1042 struct task_struct *old = parent;
1045 * Make sure we read the pid before re-reading the
1046 * parent pointer:
1048 rmb();
1049 parent = me->group_leader->real_parent;
1050 if (old != parent)
1051 continue;
1053 #endif
1054 break;
1056 return pid;
1059 asmlinkage long sys_getuid(void)
1061 /* Only we change this so SMP safe */
1062 return current->uid;
1065 asmlinkage long sys_geteuid(void)
1067 /* Only we change this so SMP safe */
1068 return current->euid;
1071 asmlinkage long sys_getgid(void)
1073 /* Only we change this so SMP safe */
1074 return current->gid;
1077 asmlinkage long sys_getegid(void)
1079 /* Only we change this so SMP safe */
1080 return current->egid;
1083 #endif
1085 static void process_timeout(unsigned long __data)
1087 wake_up_process((task_t *)__data);
1091 * schedule_timeout - sleep until timeout
1092 * @timeout: timeout value in jiffies
1094 * Make the current task sleep until @timeout jiffies have
1095 * elapsed. The routine will return immediately unless
1096 * the current task state has been set (see set_current_state()).
1098 * You can set the task state as follows -
1100 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1101 * pass before the routine returns. The routine will return 0
1103 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1104 * delivered to the current task. In this case the remaining time
1105 * in jiffies will be returned, or 0 if the timer expired in time
1107 * The current task state is guaranteed to be TASK_RUNNING when this
1108 * routine returns.
1110 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1111 * the CPU away without a bound on the timeout. In this case the return
1112 * value will be %MAX_SCHEDULE_TIMEOUT.
1114 * In all cases the return value is guaranteed to be non-negative.
1116 fastcall signed long __sched schedule_timeout(signed long timeout)
1118 struct timer_list timer;
1119 unsigned long expire;
1121 switch (timeout)
1123 case MAX_SCHEDULE_TIMEOUT:
1125 * These two special cases are useful to be comfortable
1126 * in the caller. Nothing more. We could take
1127 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1128 * but I' d like to return a valid offset (>=0) to allow
1129 * the caller to do everything it want with the retval.
1131 schedule();
1132 goto out;
1133 default:
1135 * Another bit of PARANOID. Note that the retval will be
1136 * 0 since no piece of kernel is supposed to do a check
1137 * for a negative retval of schedule_timeout() (since it
1138 * should never happens anyway). You just have the printk()
1139 * that will tell you if something is gone wrong and where.
1141 if (timeout < 0)
1143 printk(KERN_ERR "schedule_timeout: wrong timeout "
1144 "value %lx from %p\n", timeout,
1145 __builtin_return_address(0));
1146 current->state = TASK_RUNNING;
1147 goto out;
1151 expire = timeout + jiffies;
1153 init_timer(&timer);
1154 timer.expires = expire;
1155 timer.data = (unsigned long) current;
1156 timer.function = process_timeout;
1158 add_timer(&timer);
1159 schedule();
1160 del_singleshot_timer_sync(&timer);
1162 timeout = expire - jiffies;
1164 out:
1165 return timeout < 0 ? 0 : timeout;
1168 EXPORT_SYMBOL(schedule_timeout);
1170 /* Thread ID - the internal kernel "pid" */
1171 asmlinkage long sys_gettid(void)
1173 return current->pid;
1176 static long __sched nanosleep_restart(struct restart_block *restart)
1178 unsigned long expire = restart->arg0, now = jiffies;
1179 struct timespec __user *rmtp = (struct timespec __user *) restart->arg1;
1180 long ret;
1182 /* Did it expire while we handled signals? */
1183 if (!time_after(expire, now))
1184 return 0;
1186 current->state = TASK_INTERRUPTIBLE;
1187 expire = schedule_timeout(expire - now);
1189 ret = 0;
1190 if (expire) {
1191 struct timespec t;
1192 jiffies_to_timespec(expire, &t);
1194 ret = -ERESTART_RESTARTBLOCK;
1195 if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
1196 ret = -EFAULT;
1197 /* The 'restart' block is already filled in */
1199 return ret;
1202 asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
1204 struct timespec t;
1205 unsigned long expire;
1206 long ret;
1208 if (copy_from_user(&t, rqtp, sizeof(t)))
1209 return -EFAULT;
1211 if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0))
1212 return -EINVAL;
1214 expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
1215 current->state = TASK_INTERRUPTIBLE;
1216 expire = schedule_timeout(expire);
1218 ret = 0;
1219 if (expire) {
1220 struct restart_block *restart;
1221 jiffies_to_timespec(expire, &t);
1222 if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
1223 return -EFAULT;
1225 restart = &current_thread_info()->restart_block;
1226 restart->fn = nanosleep_restart;
1227 restart->arg0 = jiffies + expire;
1228 restart->arg1 = (unsigned long) rmtp;
1229 ret = -ERESTART_RESTARTBLOCK;
1231 return ret;
1235 * sys_sysinfo - fill in sysinfo struct
1237 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1239 struct sysinfo val;
1240 unsigned long mem_total, sav_total;
1241 unsigned int mem_unit, bitcount;
1242 unsigned long seq;
1244 memset((char *)&val, 0, sizeof(struct sysinfo));
1246 do {
1247 struct timespec tp;
1248 seq = read_seqbegin(&xtime_lock);
1251 * This is annoying. The below is the same thing
1252 * posix_get_clock_monotonic() does, but it wants to
1253 * take the lock which we want to cover the loads stuff
1254 * too.
1257 getnstimeofday(&tp);
1258 tp.tv_sec += wall_to_monotonic.tv_sec;
1259 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1260 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1261 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1262 tp.tv_sec++;
1264 val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1266 val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1267 val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1268 val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1270 val.procs = nr_threads;
1271 } while (read_seqretry(&xtime_lock, seq));
1273 si_meminfo(&val);
1274 si_swapinfo(&val);
1277 * If the sum of all the available memory (i.e. ram + swap)
1278 * is less than can be stored in a 32 bit unsigned long then
1279 * we can be binary compatible with 2.2.x kernels. If not,
1280 * well, in that case 2.2.x was broken anyways...
1282 * -Erik Andersen <andersee@debian.org>
1285 mem_total = val.totalram + val.totalswap;
1286 if (mem_total < val.totalram || mem_total < val.totalswap)
1287 goto out;
1288 bitcount = 0;
1289 mem_unit = val.mem_unit;
1290 while (mem_unit > 1) {
1291 bitcount++;
1292 mem_unit >>= 1;
1293 sav_total = mem_total;
1294 mem_total <<= 1;
1295 if (mem_total < sav_total)
1296 goto out;
1300 * If mem_total did not overflow, multiply all memory values by
1301 * val.mem_unit and set it to 1. This leaves things compatible
1302 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1303 * kernels...
1306 val.mem_unit = 1;
1307 val.totalram <<= bitcount;
1308 val.freeram <<= bitcount;
1309 val.sharedram <<= bitcount;
1310 val.bufferram <<= bitcount;
1311 val.totalswap <<= bitcount;
1312 val.freeswap <<= bitcount;
1313 val.totalhigh <<= bitcount;
1314 val.freehigh <<= bitcount;
1316 out:
1317 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1318 return -EFAULT;
1320 return 0;
1323 static void __devinit init_timers_cpu(int cpu)
1325 int j;
1326 tvec_base_t *base;
1328 base = &per_cpu(tvec_bases, cpu);
1329 spin_lock_init(&base->lock);
1330 for (j = 0; j < TVN_SIZE; j++) {
1331 INIT_LIST_HEAD(base->tv5.vec + j);
1332 INIT_LIST_HEAD(base->tv4.vec + j);
1333 INIT_LIST_HEAD(base->tv3.vec + j);
1334 INIT_LIST_HEAD(base->tv2.vec + j);
1336 for (j = 0; j < TVR_SIZE; j++)
1337 INIT_LIST_HEAD(base->tv1.vec + j);
1339 base->timer_jiffies = jiffies;
1342 #ifdef CONFIG_HOTPLUG_CPU
1343 static int migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1345 struct timer_list *timer;
1347 while (!list_empty(head)) {
1348 timer = list_entry(head->next, struct timer_list, entry);
1349 /* We're locking backwards from __mod_timer order here,
1350 beware deadlock. */
1351 if (!spin_trylock(&timer->lock))
1352 return 0;
1353 list_del(&timer->entry);
1354 internal_add_timer(new_base, timer);
1355 timer->base = new_base;
1356 spin_unlock(&timer->lock);
1358 return 1;
1361 static void __devinit migrate_timers(int cpu)
1363 tvec_base_t *old_base;
1364 tvec_base_t *new_base;
1365 int i;
1367 BUG_ON(cpu_online(cpu));
1368 old_base = &per_cpu(tvec_bases, cpu);
1369 new_base = &get_cpu_var(tvec_bases);
1371 local_irq_disable();
1372 again:
1373 /* Prevent deadlocks via ordering by old_base < new_base. */
1374 if (old_base < new_base) {
1375 spin_lock(&new_base->lock);
1376 spin_lock(&old_base->lock);
1377 } else {
1378 spin_lock(&old_base->lock);
1379 spin_lock(&new_base->lock);
1382 if (old_base->running_timer)
1383 BUG();
1384 for (i = 0; i < TVR_SIZE; i++)
1385 if (!migrate_timer_list(new_base, old_base->tv1.vec + i))
1386 goto unlock_again;
1387 for (i = 0; i < TVN_SIZE; i++)
1388 if (!migrate_timer_list(new_base, old_base->tv2.vec + i)
1389 || !migrate_timer_list(new_base, old_base->tv3.vec + i)
1390 || !migrate_timer_list(new_base, old_base->tv4.vec + i)
1391 || !migrate_timer_list(new_base, old_base->tv5.vec + i))
1392 goto unlock_again;
1393 spin_unlock(&old_base->lock);
1394 spin_unlock(&new_base->lock);
1395 local_irq_enable();
1396 put_cpu_var(tvec_bases);
1397 return;
1399 unlock_again:
1400 /* Avoid deadlock with __mod_timer, by backing off. */
1401 spin_unlock(&old_base->lock);
1402 spin_unlock(&new_base->lock);
1403 cpu_relax();
1404 goto again;
1406 #endif /* CONFIG_HOTPLUG_CPU */
1408 static int __devinit timer_cpu_notify(struct notifier_block *self,
1409 unsigned long action, void *hcpu)
1411 long cpu = (long)hcpu;
1412 switch(action) {
1413 case CPU_UP_PREPARE:
1414 init_timers_cpu(cpu);
1415 break;
1416 #ifdef CONFIG_HOTPLUG_CPU
1417 case CPU_DEAD:
1418 migrate_timers(cpu);
1419 break;
1420 #endif
1421 default:
1422 break;
1424 return NOTIFY_OK;
1427 static struct notifier_block __devinitdata timers_nb = {
1428 .notifier_call = timer_cpu_notify,
1432 void __init init_timers(void)
1434 timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1435 (void *)(long)smp_processor_id());
1436 register_cpu_notifier(&timers_nb);
1437 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1440 #ifdef CONFIG_TIME_INTERPOLATION
1442 struct time_interpolator *time_interpolator;
1443 static struct time_interpolator *time_interpolator_list;
1444 static spinlock_t time_interpolator_lock = SPIN_LOCK_UNLOCKED;
1446 static inline unsigned long time_interpolator_get_cycles(unsigned int src)
1448 unsigned long (*x)(void);
1450 switch (src)
1452 case TIME_SOURCE_FUNCTION:
1453 x = time_interpolator->addr;
1454 return x();
1456 case TIME_SOURCE_MMIO64 :
1457 return readq(time_interpolator->addr);
1459 case TIME_SOURCE_MMIO32 :
1460 return readl(time_interpolator->addr);
1461 default: return get_cycles();
1465 static inline unsigned long time_interpolator_get_counter(void)
1467 unsigned int src = time_interpolator->source;
1469 if (time_interpolator->jitter)
1471 unsigned long lcycle;
1472 unsigned long now;
1474 do {
1475 lcycle = time_interpolator->last_cycle;
1476 now = time_interpolator_get_cycles(src);
1477 if (lcycle && time_after(lcycle, now)) return lcycle;
1478 /* Keep track of the last timer value returned. The use of cmpxchg here
1479 * will cause contention in an SMP environment.
1481 } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
1482 return now;
1484 else
1485 return time_interpolator_get_cycles(src);
1488 void time_interpolator_reset(void)
1490 time_interpolator->offset = 0;
1491 time_interpolator->last_counter = time_interpolator_get_counter();
1494 unsigned long time_interpolator_resolution(void)
1496 if (time_interpolator->frequency < NSEC_PER_SEC)
1497 return NSEC_PER_SEC / time_interpolator->frequency;
1498 else
1499 return 1;
1502 #define GET_TI_NSECS(count,i) ((((count) - i->last_counter) * i->nsec_per_cyc) >> i->shift)
1504 unsigned long time_interpolator_get_offset(void)
1506 return time_interpolator->offset +
1507 GET_TI_NSECS(time_interpolator_get_counter(), time_interpolator);
1510 static void time_interpolator_update(long delta_nsec)
1512 unsigned long counter = time_interpolator_get_counter();
1513 unsigned long offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator);
1515 /* The interpolator compensates for late ticks by accumulating
1516 * the late time in time_interpolator->offset. A tick earlier than
1517 * expected will lead to a reset of the offset and a corresponding
1518 * jump of the clock forward. Again this only works if the
1519 * interpolator clock is running slightly slower than the regular clock
1520 * and the tuning logic insures that.
1523 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
1524 time_interpolator->offset = offset - delta_nsec;
1525 else {
1526 time_interpolator->skips++;
1527 time_interpolator->ns_skipped += delta_nsec - offset;
1528 time_interpolator->offset = 0;
1530 time_interpolator->last_counter = counter;
1532 /* Tuning logic for time interpolator invoked every minute or so.
1533 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1534 * Increase interpolator clock speed if we skip too much time.
1536 if (jiffies % INTERPOLATOR_ADJUST == 0)
1538 if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
1539 time_interpolator->nsec_per_cyc--;
1540 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1541 time_interpolator->nsec_per_cyc++;
1542 time_interpolator->skips = 0;
1543 time_interpolator->ns_skipped = 0;
1547 static inline int
1548 is_better_time_interpolator(struct time_interpolator *new)
1550 if (!time_interpolator)
1551 return 1;
1552 return new->frequency > 2*time_interpolator->frequency ||
1553 (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
1556 void
1557 register_time_interpolator(struct time_interpolator *ti)
1559 unsigned long flags;
1561 ti->nsec_per_cyc = (NSEC_PER_SEC << ti->shift) / ti->frequency;
1562 spin_lock(&time_interpolator_lock);
1563 write_seqlock_irqsave(&xtime_lock, flags);
1564 if (is_better_time_interpolator(ti)) {
1565 time_interpolator = ti;
1566 time_interpolator_reset();
1568 write_sequnlock_irqrestore(&xtime_lock, flags);
1570 ti->next = time_interpolator_list;
1571 time_interpolator_list = ti;
1572 spin_unlock(&time_interpolator_lock);
1575 void
1576 unregister_time_interpolator(struct time_interpolator *ti)
1578 struct time_interpolator *curr, **prev;
1579 unsigned long flags;
1581 spin_lock(&time_interpolator_lock);
1582 prev = &time_interpolator_list;
1583 for (curr = *prev; curr; curr = curr->next) {
1584 if (curr == ti) {
1585 *prev = curr->next;
1586 break;
1588 prev = &curr->next;
1591 write_seqlock_irqsave(&xtime_lock, flags);
1592 if (ti == time_interpolator) {
1593 /* we lost the best time-interpolator: */
1594 time_interpolator = NULL;
1595 /* find the next-best interpolator */
1596 for (curr = time_interpolator_list; curr; curr = curr->next)
1597 if (is_better_time_interpolator(curr))
1598 time_interpolator = curr;
1599 time_interpolator_reset();
1601 write_sequnlock_irqrestore(&xtime_lock, flags);
1602 spin_unlock(&time_interpolator_lock);
1604 #endif /* CONFIG_TIME_INTERPOLATION */
1607 * msleep - sleep safely even with waitqueue interruptions
1608 * @msecs: Time in milliseconds to sleep for
1610 void msleep(unsigned int msecs)
1612 unsigned long timeout = msecs_to_jiffies(msecs);
1614 while (timeout) {
1615 set_current_state(TASK_UNINTERRUPTIBLE);
1616 timeout = schedule_timeout(timeout);
1620 EXPORT_SYMBOL(msleep);
1623 * msleep_interruptible - sleep waiting for waitqueue interruptions
1624 * @msecs: Time in milliseconds to sleep for
1626 unsigned long msleep_interruptible(unsigned int msecs)
1628 unsigned long timeout = msecs_to_jiffies(msecs);
1630 while (timeout && !signal_pending(current)) {
1631 set_current_state(TASK_INTERRUPTIBLE);
1632 timeout = schedule_timeout(timeout);
1634 return jiffies_to_msecs(timeout);
1637 EXPORT_SYMBOL(msleep_interruptible);