netback: correct netbk_tx_err to handle wrap around.
[linux/fpc-iii.git] / kernel / timer.c
blob27982d993c2edfa6faa67ab51eca950bb860713d
1 /*
2 * linux/kernel/timer.c
4 * Kernel internal timers, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
44 #include <asm/uaccess.h>
45 #include <asm/unistd.h>
46 #include <asm/div64.h>
47 #include <asm/timex.h>
48 #include <asm/io.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/timer.h>
53 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
55 EXPORT_SYMBOL(jiffies_64);
58 * per-CPU timer vector definitions:
60 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
61 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
62 #define TVN_SIZE (1 << TVN_BITS)
63 #define TVR_SIZE (1 << TVR_BITS)
64 #define TVN_MASK (TVN_SIZE - 1)
65 #define TVR_MASK (TVR_SIZE - 1)
66 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
68 struct tvec {
69 struct list_head vec[TVN_SIZE];
72 struct tvec_root {
73 struct list_head vec[TVR_SIZE];
76 struct tvec_base {
77 spinlock_t lock;
78 struct timer_list *running_timer;
79 unsigned long timer_jiffies;
80 unsigned long next_timer;
81 struct tvec_root tv1;
82 struct tvec tv2;
83 struct tvec tv3;
84 struct tvec tv4;
85 struct tvec tv5;
86 } ____cacheline_aligned;
88 struct tvec_base boot_tvec_bases;
89 EXPORT_SYMBOL(boot_tvec_bases);
90 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
92 /* Functions below help us manage 'deferrable' flag */
93 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
95 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
98 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
100 return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
103 static inline void timer_set_deferrable(struct timer_list *timer)
105 timer->base = TBASE_MAKE_DEFERRED(timer->base);
108 static inline void
109 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
111 timer->base = (struct tvec_base *)((unsigned long)(new_base) |
112 tbase_get_deferrable(timer->base));
115 static unsigned long round_jiffies_common(unsigned long j, int cpu,
116 bool force_up)
118 int rem;
119 unsigned long original = j;
122 * We don't want all cpus firing their timers at once hitting the
123 * same lock or cachelines, so we skew each extra cpu with an extra
124 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
125 * already did this.
126 * The skew is done by adding 3*cpunr, then round, then subtract this
127 * extra offset again.
129 j += cpu * 3;
131 rem = j % HZ;
134 * If the target jiffie is just after a whole second (which can happen
135 * due to delays of the timer irq, long irq off times etc etc) then
136 * we should round down to the whole second, not up. Use 1/4th second
137 * as cutoff for this rounding as an extreme upper bound for this.
138 * But never round down if @force_up is set.
140 if (rem < HZ/4 && !force_up) /* round down */
141 j = j - rem;
142 else /* round up */
143 j = j - rem + HZ;
145 /* now that we have rounded, subtract the extra skew again */
146 j -= cpu * 3;
148 if (j <= jiffies) /* rounding ate our timeout entirely; */
149 return original;
150 return j;
154 * __round_jiffies - function to round jiffies to a full second
155 * @j: the time in (absolute) jiffies that should be rounded
156 * @cpu: the processor number on which the timeout will happen
158 * __round_jiffies() rounds an absolute time in the future (in jiffies)
159 * up or down to (approximately) full seconds. This is useful for timers
160 * for which the exact time they fire does not matter too much, as long as
161 * they fire approximately every X seconds.
163 * By rounding these timers to whole seconds, all such timers will fire
164 * at the same time, rather than at various times spread out. The goal
165 * of this is to have the CPU wake up less, which saves power.
167 * The exact rounding is skewed for each processor to avoid all
168 * processors firing at the exact same time, which could lead
169 * to lock contention or spurious cache line bouncing.
171 * The return value is the rounded version of the @j parameter.
173 unsigned long __round_jiffies(unsigned long j, int cpu)
175 return round_jiffies_common(j, cpu, false);
177 EXPORT_SYMBOL_GPL(__round_jiffies);
180 * __round_jiffies_relative - function to round jiffies to a full second
181 * @j: the time in (relative) jiffies that should be rounded
182 * @cpu: the processor number on which the timeout will happen
184 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
185 * up or down to (approximately) full seconds. This is useful for timers
186 * for which the exact time they fire does not matter too much, as long as
187 * they fire approximately every X seconds.
189 * By rounding these timers to whole seconds, all such timers will fire
190 * at the same time, rather than at various times spread out. The goal
191 * of this is to have the CPU wake up less, which saves power.
193 * The exact rounding is skewed for each processor to avoid all
194 * processors firing at the exact same time, which could lead
195 * to lock contention or spurious cache line bouncing.
197 * The return value is the rounded version of the @j parameter.
199 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
201 unsigned long j0 = jiffies;
203 /* Use j0 because jiffies might change while we run */
204 return round_jiffies_common(j + j0, cpu, false) - j0;
206 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
209 * round_jiffies - function to round jiffies to a full second
210 * @j: the time in (absolute) jiffies that should be rounded
212 * round_jiffies() rounds an absolute time in the future (in jiffies)
213 * up or down to (approximately) full seconds. This is useful for timers
214 * for which the exact time they fire does not matter too much, as long as
215 * they fire approximately every X seconds.
217 * By rounding these timers to whole seconds, all such timers will fire
218 * at the same time, rather than at various times spread out. The goal
219 * of this is to have the CPU wake up less, which saves power.
221 * The return value is the rounded version of the @j parameter.
223 unsigned long round_jiffies(unsigned long j)
225 return round_jiffies_common(j, raw_smp_processor_id(), false);
227 EXPORT_SYMBOL_GPL(round_jiffies);
230 * round_jiffies_relative - function to round jiffies to a full second
231 * @j: the time in (relative) jiffies that should be rounded
233 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
234 * up or down to (approximately) full seconds. This is useful for timers
235 * for which the exact time they fire does not matter too much, as long as
236 * they fire approximately every X seconds.
238 * By rounding these timers to whole seconds, all such timers will fire
239 * at the same time, rather than at various times spread out. The goal
240 * of this is to have the CPU wake up less, which saves power.
242 * The return value is the rounded version of the @j parameter.
244 unsigned long round_jiffies_relative(unsigned long j)
246 return __round_jiffies_relative(j, raw_smp_processor_id());
248 EXPORT_SYMBOL_GPL(round_jiffies_relative);
251 * __round_jiffies_up - function to round jiffies up to a full second
252 * @j: the time in (absolute) jiffies that should be rounded
253 * @cpu: the processor number on which the timeout will happen
255 * This is the same as __round_jiffies() except that it will never
256 * round down. This is useful for timeouts for which the exact time
257 * of firing does not matter too much, as long as they don't fire too
258 * early.
260 unsigned long __round_jiffies_up(unsigned long j, int cpu)
262 return round_jiffies_common(j, cpu, true);
264 EXPORT_SYMBOL_GPL(__round_jiffies_up);
267 * __round_jiffies_up_relative - function to round jiffies up to a full second
268 * @j: the time in (relative) jiffies that should be rounded
269 * @cpu: the processor number on which the timeout will happen
271 * This is the same as __round_jiffies_relative() except that it will never
272 * round down. This is useful for timeouts for which the exact time
273 * of firing does not matter too much, as long as they don't fire too
274 * early.
276 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
278 unsigned long j0 = jiffies;
280 /* Use j0 because jiffies might change while we run */
281 return round_jiffies_common(j + j0, cpu, true) - j0;
283 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
286 * round_jiffies_up - function to round jiffies up to a full second
287 * @j: the time in (absolute) jiffies that should be rounded
289 * This is the same as round_jiffies() except that it will never
290 * round down. This is useful for timeouts for which the exact time
291 * of firing does not matter too much, as long as they don't fire too
292 * early.
294 unsigned long round_jiffies_up(unsigned long j)
296 return round_jiffies_common(j, raw_smp_processor_id(), true);
298 EXPORT_SYMBOL_GPL(round_jiffies_up);
301 * round_jiffies_up_relative - function to round jiffies up to a full second
302 * @j: the time in (relative) jiffies that should be rounded
304 * This is the same as round_jiffies_relative() except that it will never
305 * round down. This is useful for timeouts for which the exact time
306 * of firing does not matter too much, as long as they don't fire too
307 * early.
309 unsigned long round_jiffies_up_relative(unsigned long j)
311 return __round_jiffies_up_relative(j, raw_smp_processor_id());
313 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
316 * set_timer_slack - set the allowed slack for a timer
317 * @timer: the timer to be modified
318 * @slack_hz: the amount of time (in jiffies) allowed for rounding
320 * Set the amount of time, in jiffies, that a certain timer has
321 * in terms of slack. By setting this value, the timer subsystem
322 * will schedule the actual timer somewhere between
323 * the time mod_timer() asks for, and that time plus the slack.
325 * By setting the slack to -1, a percentage of the delay is used
326 * instead.
328 void set_timer_slack(struct timer_list *timer, int slack_hz)
330 timer->slack = slack_hz;
332 EXPORT_SYMBOL_GPL(set_timer_slack);
334 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
336 unsigned long expires = timer->expires;
337 unsigned long idx = expires - base->timer_jiffies;
338 struct list_head *vec;
340 if (idx < TVR_SIZE) {
341 int i = expires & TVR_MASK;
342 vec = base->tv1.vec + i;
343 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
344 int i = (expires >> TVR_BITS) & TVN_MASK;
345 vec = base->tv2.vec + i;
346 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
347 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
348 vec = base->tv3.vec + i;
349 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
350 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
351 vec = base->tv4.vec + i;
352 } else if ((signed long) idx < 0) {
354 * Can happen if you add a timer with expires == jiffies,
355 * or you set a timer to go off in the past
357 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
358 } else {
359 int i;
360 /* If the timeout is larger than MAX_TVAL (on 64-bit
361 * architectures or with CONFIG_BASE_SMALL=1) then we
362 * use the maximum timeout.
364 if (idx > MAX_TVAL) {
365 idx = MAX_TVAL;
366 expires = idx + base->timer_jiffies;
368 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
369 vec = base->tv5.vec + i;
372 * Timers are FIFO:
374 list_add_tail(&timer->entry, vec);
377 #ifdef CONFIG_TIMER_STATS
378 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
380 if (timer->start_site)
381 return;
383 timer->start_site = addr;
384 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
385 timer->start_pid = current->pid;
388 static void timer_stats_account_timer(struct timer_list *timer)
390 unsigned int flag = 0;
392 if (likely(!timer->start_site))
393 return;
394 if (unlikely(tbase_get_deferrable(timer->base)))
395 flag |= TIMER_STATS_FLAG_DEFERRABLE;
397 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
398 timer->function, timer->start_comm, flag);
401 #else
402 static void timer_stats_account_timer(struct timer_list *timer) {}
403 #endif
405 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
407 static struct debug_obj_descr timer_debug_descr;
409 static void *timer_debug_hint(void *addr)
411 return ((struct timer_list *) addr)->function;
415 * fixup_init is called when:
416 * - an active object is initialized
418 static int timer_fixup_init(void *addr, enum debug_obj_state state)
420 struct timer_list *timer = addr;
422 switch (state) {
423 case ODEBUG_STATE_ACTIVE:
424 del_timer_sync(timer);
425 debug_object_init(timer, &timer_debug_descr);
426 return 1;
427 default:
428 return 0;
433 * fixup_activate is called when:
434 * - an active object is activated
435 * - an unknown object is activated (might be a statically initialized object)
437 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
439 struct timer_list *timer = addr;
441 switch (state) {
443 case ODEBUG_STATE_NOTAVAILABLE:
445 * This is not really a fixup. The timer was
446 * statically initialized. We just make sure that it
447 * is tracked in the object tracker.
449 if (timer->entry.next == NULL &&
450 timer->entry.prev == TIMER_ENTRY_STATIC) {
451 debug_object_init(timer, &timer_debug_descr);
452 debug_object_activate(timer, &timer_debug_descr);
453 return 0;
454 } else {
455 WARN_ON_ONCE(1);
457 return 0;
459 case ODEBUG_STATE_ACTIVE:
460 WARN_ON(1);
462 default:
463 return 0;
468 * fixup_free is called when:
469 * - an active object is freed
471 static int timer_fixup_free(void *addr, enum debug_obj_state state)
473 struct timer_list *timer = addr;
475 switch (state) {
476 case ODEBUG_STATE_ACTIVE:
477 del_timer_sync(timer);
478 debug_object_free(timer, &timer_debug_descr);
479 return 1;
480 default:
481 return 0;
485 static struct debug_obj_descr timer_debug_descr = {
486 .name = "timer_list",
487 .debug_hint = timer_debug_hint,
488 .fixup_init = timer_fixup_init,
489 .fixup_activate = timer_fixup_activate,
490 .fixup_free = timer_fixup_free,
493 static inline void debug_timer_init(struct timer_list *timer)
495 debug_object_init(timer, &timer_debug_descr);
498 static inline void debug_timer_activate(struct timer_list *timer)
500 debug_object_activate(timer, &timer_debug_descr);
503 static inline void debug_timer_deactivate(struct timer_list *timer)
505 debug_object_deactivate(timer, &timer_debug_descr);
508 static inline void debug_timer_free(struct timer_list *timer)
510 debug_object_free(timer, &timer_debug_descr);
513 static void __init_timer(struct timer_list *timer,
514 const char *name,
515 struct lock_class_key *key);
517 void init_timer_on_stack_key(struct timer_list *timer,
518 const char *name,
519 struct lock_class_key *key)
521 debug_object_init_on_stack(timer, &timer_debug_descr);
522 __init_timer(timer, name, key);
524 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
526 void destroy_timer_on_stack(struct timer_list *timer)
528 debug_object_free(timer, &timer_debug_descr);
530 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
532 #else
533 static inline void debug_timer_init(struct timer_list *timer) { }
534 static inline void debug_timer_activate(struct timer_list *timer) { }
535 static inline void debug_timer_deactivate(struct timer_list *timer) { }
536 #endif
538 static inline void debug_init(struct timer_list *timer)
540 debug_timer_init(timer);
541 trace_timer_init(timer);
544 static inline void
545 debug_activate(struct timer_list *timer, unsigned long expires)
547 debug_timer_activate(timer);
548 trace_timer_start(timer, expires);
551 static inline void debug_deactivate(struct timer_list *timer)
553 debug_timer_deactivate(timer);
554 trace_timer_cancel(timer);
557 static void __init_timer(struct timer_list *timer,
558 const char *name,
559 struct lock_class_key *key)
561 timer->entry.next = NULL;
562 timer->base = __raw_get_cpu_var(tvec_bases);
563 timer->slack = -1;
564 #ifdef CONFIG_TIMER_STATS
565 timer->start_site = NULL;
566 timer->start_pid = -1;
567 memset(timer->start_comm, 0, TASK_COMM_LEN);
568 #endif
569 lockdep_init_map(&timer->lockdep_map, name, key, 0);
572 void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
573 const char *name,
574 struct lock_class_key *key,
575 void (*function)(unsigned long),
576 unsigned long data)
578 timer->function = function;
579 timer->data = data;
580 init_timer_on_stack_key(timer, name, key);
581 timer_set_deferrable(timer);
583 EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
586 * init_timer_key - initialize a timer
587 * @timer: the timer to be initialized
588 * @name: name of the timer
589 * @key: lockdep class key of the fake lock used for tracking timer
590 * sync lock dependencies
592 * init_timer_key() must be done to a timer prior calling *any* of the
593 * other timer functions.
595 void init_timer_key(struct timer_list *timer,
596 const char *name,
597 struct lock_class_key *key)
599 debug_init(timer);
600 __init_timer(timer, name, key);
602 EXPORT_SYMBOL(init_timer_key);
604 void init_timer_deferrable_key(struct timer_list *timer,
605 const char *name,
606 struct lock_class_key *key)
608 init_timer_key(timer, name, key);
609 timer_set_deferrable(timer);
611 EXPORT_SYMBOL(init_timer_deferrable_key);
613 static inline void detach_timer(struct timer_list *timer,
614 int clear_pending)
616 struct list_head *entry = &timer->entry;
618 debug_deactivate(timer);
620 __list_del(entry->prev, entry->next);
621 if (clear_pending)
622 entry->next = NULL;
623 entry->prev = LIST_POISON2;
627 * We are using hashed locking: holding per_cpu(tvec_bases).lock
628 * means that all timers which are tied to this base via timer->base are
629 * locked, and the base itself is locked too.
631 * So __run_timers/migrate_timers can safely modify all timers which could
632 * be found on ->tvX lists.
634 * When the timer's base is locked, and the timer removed from list, it is
635 * possible to set timer->base = NULL and drop the lock: the timer remains
636 * locked.
638 static struct tvec_base *lock_timer_base(struct timer_list *timer,
639 unsigned long *flags)
640 __acquires(timer->base->lock)
642 struct tvec_base *base;
644 for (;;) {
645 struct tvec_base *prelock_base = timer->base;
646 base = tbase_get_base(prelock_base);
647 if (likely(base != NULL)) {
648 spin_lock_irqsave(&base->lock, *flags);
649 if (likely(prelock_base == timer->base))
650 return base;
651 /* The timer has migrated to another CPU */
652 spin_unlock_irqrestore(&base->lock, *flags);
654 cpu_relax();
658 static inline int
659 __mod_timer(struct timer_list *timer, unsigned long expires,
660 bool pending_only, int pinned)
662 struct tvec_base *base, *new_base;
663 unsigned long flags;
664 int ret = 0 , cpu;
666 timer_stats_timer_set_start_info(timer);
667 BUG_ON(!timer->function);
669 base = lock_timer_base(timer, &flags);
671 if (timer_pending(timer)) {
672 detach_timer(timer, 0);
673 if (timer->expires == base->next_timer &&
674 !tbase_get_deferrable(timer->base))
675 base->next_timer = base->timer_jiffies;
676 ret = 1;
677 } else {
678 if (pending_only)
679 goto out_unlock;
682 debug_activate(timer, expires);
684 cpu = smp_processor_id();
686 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
687 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
688 cpu = get_nohz_timer_target();
689 #endif
690 new_base = per_cpu(tvec_bases, cpu);
692 if (base != new_base) {
694 * We are trying to schedule the timer on the local CPU.
695 * However we can't change timer's base while it is running,
696 * otherwise del_timer_sync() can't detect that the timer's
697 * handler yet has not finished. This also guarantees that
698 * the timer is serialized wrt itself.
700 if (likely(base->running_timer != timer)) {
701 /* See the comment in lock_timer_base() */
702 timer_set_base(timer, NULL);
703 spin_unlock(&base->lock);
704 base = new_base;
705 spin_lock(&base->lock);
706 timer_set_base(timer, base);
710 timer->expires = expires;
711 if (time_before(timer->expires, base->next_timer) &&
712 !tbase_get_deferrable(timer->base))
713 base->next_timer = timer->expires;
714 internal_add_timer(base, timer);
716 out_unlock:
717 spin_unlock_irqrestore(&base->lock, flags);
719 return ret;
723 * mod_timer_pending - modify a pending timer's timeout
724 * @timer: the pending timer to be modified
725 * @expires: new timeout in jiffies
727 * mod_timer_pending() is the same for pending timers as mod_timer(),
728 * but will not re-activate and modify already deleted timers.
730 * It is useful for unserialized use of timers.
732 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
734 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
736 EXPORT_SYMBOL(mod_timer_pending);
739 * Decide where to put the timer while taking the slack into account
741 * Algorithm:
742 * 1) calculate the maximum (absolute) time
743 * 2) calculate the highest bit where the expires and new max are different
744 * 3) use this bit to make a mask
745 * 4) use the bitmask to round down the maximum time, so that all last
746 * bits are zeros
748 static inline
749 unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
751 unsigned long expires_limit, mask;
752 int bit;
754 if (timer->slack >= 0) {
755 expires_limit = expires + timer->slack;
756 } else {
757 long delta = expires - jiffies;
759 if (delta < 256)
760 return expires;
762 expires_limit = expires + delta / 256;
764 mask = expires ^ expires_limit;
765 if (mask == 0)
766 return expires;
768 bit = find_last_bit(&mask, BITS_PER_LONG);
770 mask = (1 << bit) - 1;
772 expires_limit = expires_limit & ~(mask);
774 return expires_limit;
778 * mod_timer - modify a timer's timeout
779 * @timer: the timer to be modified
780 * @expires: new timeout in jiffies
782 * mod_timer() is a more efficient way to update the expire field of an
783 * active timer (if the timer is inactive it will be activated)
785 * mod_timer(timer, expires) is equivalent to:
787 * del_timer(timer); timer->expires = expires; add_timer(timer);
789 * Note that if there are multiple unserialized concurrent users of the
790 * same timer, then mod_timer() is the only safe way to modify the timeout,
791 * since add_timer() cannot modify an already running timer.
793 * The function returns whether it has modified a pending timer or not.
794 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
795 * active timer returns 1.)
797 int mod_timer(struct timer_list *timer, unsigned long expires)
799 expires = apply_slack(timer, expires);
802 * This is a common optimization triggered by the
803 * networking code - if the timer is re-modified
804 * to be the same thing then just return:
806 if (timer_pending(timer) && timer->expires == expires)
807 return 1;
809 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
811 EXPORT_SYMBOL(mod_timer);
814 * mod_timer_pinned - modify a timer's timeout
815 * @timer: the timer to be modified
816 * @expires: new timeout in jiffies
818 * mod_timer_pinned() is a way to update the expire field of an
819 * active timer (if the timer is inactive it will be activated)
820 * and not allow the timer to be migrated to a different CPU.
822 * mod_timer_pinned(timer, expires) is equivalent to:
824 * del_timer(timer); timer->expires = expires; add_timer(timer);
826 int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
828 if (timer->expires == expires && timer_pending(timer))
829 return 1;
831 return __mod_timer(timer, expires, false, TIMER_PINNED);
833 EXPORT_SYMBOL(mod_timer_pinned);
836 * add_timer - start a timer
837 * @timer: the timer to be added
839 * The kernel will do a ->function(->data) callback from the
840 * timer interrupt at the ->expires point in the future. The
841 * current time is 'jiffies'.
843 * The timer's ->expires, ->function (and if the handler uses it, ->data)
844 * fields must be set prior calling this function.
846 * Timers with an ->expires field in the past will be executed in the next
847 * timer tick.
849 void add_timer(struct timer_list *timer)
851 BUG_ON(timer_pending(timer));
852 mod_timer(timer, timer->expires);
854 EXPORT_SYMBOL(add_timer);
857 * add_timer_on - start a timer on a particular CPU
858 * @timer: the timer to be added
859 * @cpu: the CPU to start it on
861 * This is not very scalable on SMP. Double adds are not possible.
863 void add_timer_on(struct timer_list *timer, int cpu)
865 struct tvec_base *base = per_cpu(tvec_bases, cpu);
866 unsigned long flags;
868 timer_stats_timer_set_start_info(timer);
869 BUG_ON(timer_pending(timer) || !timer->function);
870 spin_lock_irqsave(&base->lock, flags);
871 timer_set_base(timer, base);
872 debug_activate(timer, timer->expires);
873 if (time_before(timer->expires, base->next_timer) &&
874 !tbase_get_deferrable(timer->base))
875 base->next_timer = timer->expires;
876 internal_add_timer(base, timer);
878 * Check whether the other CPU is idle and needs to be
879 * triggered to reevaluate the timer wheel when nohz is
880 * active. We are protected against the other CPU fiddling
881 * with the timer by holding the timer base lock. This also
882 * makes sure that a CPU on the way to idle can not evaluate
883 * the timer wheel.
885 wake_up_idle_cpu(cpu);
886 spin_unlock_irqrestore(&base->lock, flags);
888 EXPORT_SYMBOL_GPL(add_timer_on);
891 * del_timer - deactive a timer.
892 * @timer: the timer to be deactivated
894 * del_timer() deactivates a timer - this works on both active and inactive
895 * timers.
897 * The function returns whether it has deactivated a pending timer or not.
898 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
899 * active timer returns 1.)
901 int del_timer(struct timer_list *timer)
903 struct tvec_base *base;
904 unsigned long flags;
905 int ret = 0;
907 timer_stats_timer_clear_start_info(timer);
908 if (timer_pending(timer)) {
909 base = lock_timer_base(timer, &flags);
910 if (timer_pending(timer)) {
911 detach_timer(timer, 1);
912 if (timer->expires == base->next_timer &&
913 !tbase_get_deferrable(timer->base))
914 base->next_timer = base->timer_jiffies;
915 ret = 1;
917 spin_unlock_irqrestore(&base->lock, flags);
920 return ret;
922 EXPORT_SYMBOL(del_timer);
925 * try_to_del_timer_sync - Try to deactivate a timer
926 * @timer: timer do del
928 * This function tries to deactivate a timer. Upon successful (ret >= 0)
929 * exit the timer is not queued and the handler is not running on any CPU.
931 int try_to_del_timer_sync(struct timer_list *timer)
933 struct tvec_base *base;
934 unsigned long flags;
935 int ret = -1;
937 base = lock_timer_base(timer, &flags);
939 if (base->running_timer == timer)
940 goto out;
942 timer_stats_timer_clear_start_info(timer);
943 ret = 0;
944 if (timer_pending(timer)) {
945 detach_timer(timer, 1);
946 if (timer->expires == base->next_timer &&
947 !tbase_get_deferrable(timer->base))
948 base->next_timer = base->timer_jiffies;
949 ret = 1;
951 out:
952 spin_unlock_irqrestore(&base->lock, flags);
954 return ret;
956 EXPORT_SYMBOL(try_to_del_timer_sync);
958 #ifdef CONFIG_SMP
960 * del_timer_sync - deactivate a timer and wait for the handler to finish.
961 * @timer: the timer to be deactivated
963 * This function only differs from del_timer() on SMP: besides deactivating
964 * the timer it also makes sure the handler has finished executing on other
965 * CPUs.
967 * Synchronization rules: Callers must prevent restarting of the timer,
968 * otherwise this function is meaningless. It must not be called from
969 * interrupt contexts. The caller must not hold locks which would prevent
970 * completion of the timer's handler. The timer's handler must not call
971 * add_timer_on(). Upon exit the timer is not queued and the handler is
972 * not running on any CPU.
974 * Note: You must not hold locks that are held in interrupt context
975 * while calling this function. Even if the lock has nothing to do
976 * with the timer in question. Here's why:
978 * CPU0 CPU1
979 * ---- ----
980 * <SOFTIRQ>
981 * call_timer_fn();
982 * base->running_timer = mytimer;
983 * spin_lock_irq(somelock);
984 * <IRQ>
985 * spin_lock(somelock);
986 * del_timer_sync(mytimer);
987 * while (base->running_timer == mytimer);
989 * Now del_timer_sync() will never return and never release somelock.
990 * The interrupt on the other CPU is waiting to grab somelock but
991 * it has interrupted the softirq that CPU0 is waiting to finish.
993 * The function returns whether it has deactivated a pending timer or not.
995 int del_timer_sync(struct timer_list *timer)
997 #ifdef CONFIG_LOCKDEP
998 unsigned long flags;
1001 * If lockdep gives a backtrace here, please reference
1002 * the synchronization rules above.
1004 local_irq_save(flags);
1005 lock_map_acquire(&timer->lockdep_map);
1006 lock_map_release(&timer->lockdep_map);
1007 local_irq_restore(flags);
1008 #endif
1010 * don't use it in hardirq context, because it
1011 * could lead to deadlock.
1013 WARN_ON(in_irq());
1014 for (;;) {
1015 int ret = try_to_del_timer_sync(timer);
1016 if (ret >= 0)
1017 return ret;
1018 cpu_relax();
1021 EXPORT_SYMBOL(del_timer_sync);
1022 #endif
1024 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1026 /* cascade all the timers from tv up one level */
1027 struct timer_list *timer, *tmp;
1028 struct list_head tv_list;
1030 list_replace_init(tv->vec + index, &tv_list);
1033 * We are removing _all_ timers from the list, so we
1034 * don't have to detach them individually.
1036 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1037 BUG_ON(tbase_get_base(timer->base) != base);
1038 internal_add_timer(base, timer);
1041 return index;
1044 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1045 unsigned long data)
1047 int preempt_count = preempt_count();
1049 #ifdef CONFIG_LOCKDEP
1051 * It is permissible to free the timer from inside the
1052 * function that is called from it, this we need to take into
1053 * account for lockdep too. To avoid bogus "held lock freed"
1054 * warnings as well as problems when looking into
1055 * timer->lockdep_map, make a copy and use that here.
1057 struct lockdep_map lockdep_map = timer->lockdep_map;
1058 #endif
1060 * Couple the lock chain with the lock chain at
1061 * del_timer_sync() by acquiring the lock_map around the fn()
1062 * call here and in del_timer_sync().
1064 lock_map_acquire(&lockdep_map);
1066 trace_timer_expire_entry(timer);
1067 fn(data);
1068 trace_timer_expire_exit(timer);
1070 lock_map_release(&lockdep_map);
1072 if (preempt_count != preempt_count()) {
1073 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1074 fn, preempt_count, preempt_count());
1076 * Restore the preempt count. That gives us a decent
1077 * chance to survive and extract information. If the
1078 * callback kept a lock held, bad luck, but not worse
1079 * than the BUG() we had.
1081 preempt_count() = preempt_count;
1085 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1088 * __run_timers - run all expired timers (if any) on this CPU.
1089 * @base: the timer vector to be processed.
1091 * This function cascades all vectors and executes all expired timer
1092 * vectors.
1094 static inline void __run_timers(struct tvec_base *base)
1096 struct timer_list *timer;
1098 spin_lock_irq(&base->lock);
1099 while (time_after_eq(jiffies, base->timer_jiffies)) {
1100 struct list_head work_list;
1101 struct list_head *head = &work_list;
1102 int index = base->timer_jiffies & TVR_MASK;
1105 * Cascade timers:
1107 if (!index &&
1108 (!cascade(base, &base->tv2, INDEX(0))) &&
1109 (!cascade(base, &base->tv3, INDEX(1))) &&
1110 !cascade(base, &base->tv4, INDEX(2)))
1111 cascade(base, &base->tv5, INDEX(3));
1112 ++base->timer_jiffies;
1113 list_replace_init(base->tv1.vec + index, &work_list);
1114 while (!list_empty(head)) {
1115 void (*fn)(unsigned long);
1116 unsigned long data;
1118 timer = list_first_entry(head, struct timer_list,entry);
1119 fn = timer->function;
1120 data = timer->data;
1122 timer_stats_account_timer(timer);
1124 base->running_timer = timer;
1125 detach_timer(timer, 1);
1127 spin_unlock_irq(&base->lock);
1128 call_timer_fn(timer, fn, data);
1129 spin_lock_irq(&base->lock);
1132 base->running_timer = NULL;
1133 spin_unlock_irq(&base->lock);
1136 #ifdef CONFIG_NO_HZ
1138 * Find out when the next timer event is due to happen. This
1139 * is used on S/390 to stop all activity when a CPU is idle.
1140 * This function needs to be called with interrupts disabled.
1142 static unsigned long __next_timer_interrupt(struct tvec_base *base)
1144 unsigned long timer_jiffies = base->timer_jiffies;
1145 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1146 int index, slot, array, found = 0;
1147 struct timer_list *nte;
1148 struct tvec *varray[4];
1150 /* Look for timer events in tv1. */
1151 index = slot = timer_jiffies & TVR_MASK;
1152 do {
1153 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
1154 if (tbase_get_deferrable(nte->base))
1155 continue;
1157 found = 1;
1158 expires = nte->expires;
1159 /* Look at the cascade bucket(s)? */
1160 if (!index || slot < index)
1161 goto cascade;
1162 return expires;
1164 slot = (slot + 1) & TVR_MASK;
1165 } while (slot != index);
1167 cascade:
1168 /* Calculate the next cascade event */
1169 if (index)
1170 timer_jiffies += TVR_SIZE - index;
1171 timer_jiffies >>= TVR_BITS;
1173 /* Check tv2-tv5. */
1174 varray[0] = &base->tv2;
1175 varray[1] = &base->tv3;
1176 varray[2] = &base->tv4;
1177 varray[3] = &base->tv5;
1179 for (array = 0; array < 4; array++) {
1180 struct tvec *varp = varray[array];
1182 index = slot = timer_jiffies & TVN_MASK;
1183 do {
1184 list_for_each_entry(nte, varp->vec + slot, entry) {
1185 if (tbase_get_deferrable(nte->base))
1186 continue;
1188 found = 1;
1189 if (time_before(nte->expires, expires))
1190 expires = nte->expires;
1193 * Do we still search for the first timer or are
1194 * we looking up the cascade buckets ?
1196 if (found) {
1197 /* Look at the cascade bucket(s)? */
1198 if (!index || slot < index)
1199 break;
1200 return expires;
1202 slot = (slot + 1) & TVN_MASK;
1203 } while (slot != index);
1205 if (index)
1206 timer_jiffies += TVN_SIZE - index;
1207 timer_jiffies >>= TVN_BITS;
1209 return expires;
1213 * Check, if the next hrtimer event is before the next timer wheel
1214 * event:
1216 static unsigned long cmp_next_hrtimer_event(unsigned long now,
1217 unsigned long expires)
1219 ktime_t hr_delta = hrtimer_get_next_event();
1220 struct timespec tsdelta;
1221 unsigned long delta;
1223 if (hr_delta.tv64 == KTIME_MAX)
1224 return expires;
1227 * Expired timer available, let it expire in the next tick
1229 if (hr_delta.tv64 <= 0)
1230 return now + 1;
1232 tsdelta = ktime_to_timespec(hr_delta);
1233 delta = timespec_to_jiffies(&tsdelta);
1236 * Limit the delta to the max value, which is checked in
1237 * tick_nohz_stop_sched_tick():
1239 if (delta > NEXT_TIMER_MAX_DELTA)
1240 delta = NEXT_TIMER_MAX_DELTA;
1243 * Take rounding errors in to account and make sure, that it
1244 * expires in the next tick. Otherwise we go into an endless
1245 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1246 * the timer softirq
1248 if (delta < 1)
1249 delta = 1;
1250 now += delta;
1251 if (time_before(now, expires))
1252 return now;
1253 return expires;
1257 * get_next_timer_interrupt - return the jiffy of the next pending timer
1258 * @now: current time (in jiffies)
1260 unsigned long get_next_timer_interrupt(unsigned long now)
1262 struct tvec_base *base = __this_cpu_read(tvec_bases);
1263 unsigned long expires;
1266 * Pretend that there is no timer pending if the cpu is offline.
1267 * Possible pending timers will be migrated later to an active cpu.
1269 if (cpu_is_offline(smp_processor_id()))
1270 return now + NEXT_TIMER_MAX_DELTA;
1271 spin_lock(&base->lock);
1272 if (time_before_eq(base->next_timer, base->timer_jiffies))
1273 base->next_timer = __next_timer_interrupt(base);
1274 expires = base->next_timer;
1275 spin_unlock(&base->lock);
1277 if (time_before_eq(expires, now))
1278 return now;
1280 return cmp_next_hrtimer_event(now, expires);
1282 #endif
1285 * Called from the timer interrupt handler to charge one tick to the current
1286 * process. user_tick is 1 if the tick is user time, 0 for system.
1288 void update_process_times(int user_tick)
1290 struct task_struct *p = current;
1291 int cpu = smp_processor_id();
1293 /* Note: this timer irq context must be accounted for as well. */
1294 account_process_tick(p, user_tick);
1295 run_local_timers();
1296 rcu_check_callbacks(cpu, user_tick);
1297 printk_tick();
1298 #ifdef CONFIG_IRQ_WORK
1299 if (in_irq())
1300 irq_work_run();
1301 #endif
1302 scheduler_tick();
1303 run_posix_cpu_timers(p);
1307 * This function runs timers and the timer-tq in bottom half context.
1309 static void run_timer_softirq(struct softirq_action *h)
1311 struct tvec_base *base = __this_cpu_read(tvec_bases);
1313 hrtimer_run_pending();
1315 if (time_after_eq(jiffies, base->timer_jiffies))
1316 __run_timers(base);
1320 * Called by the local, per-CPU timer interrupt on SMP.
1322 void run_local_timers(void)
1324 hrtimer_run_queues();
1325 raise_softirq(TIMER_SOFTIRQ);
1328 #ifdef __ARCH_WANT_SYS_ALARM
1331 * For backwards compatibility? This can be done in libc so Alpha
1332 * and all newer ports shouldn't need it.
1334 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1336 return alarm_setitimer(seconds);
1339 #endif
1341 #ifndef __alpha__
1344 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1345 * should be moved into arch/i386 instead?
1349 * sys_getpid - return the thread group id of the current process
1351 * Note, despite the name, this returns the tgid not the pid. The tgid and
1352 * the pid are identical unless CLONE_THREAD was specified on clone() in
1353 * which case the tgid is the same in all threads of the same group.
1355 * This is SMP safe as current->tgid does not change.
1357 SYSCALL_DEFINE0(getpid)
1359 return task_tgid_vnr(current);
1363 * Accessing ->real_parent is not SMP-safe, it could
1364 * change from under us. However, we can use a stale
1365 * value of ->real_parent under rcu_read_lock(), see
1366 * release_task()->call_rcu(delayed_put_task_struct).
1368 SYSCALL_DEFINE0(getppid)
1370 int pid;
1372 rcu_read_lock();
1373 pid = task_tgid_vnr(current->real_parent);
1374 rcu_read_unlock();
1376 return pid;
1379 SYSCALL_DEFINE0(getuid)
1381 /* Only we change this so SMP safe */
1382 return current_uid();
1385 SYSCALL_DEFINE0(geteuid)
1387 /* Only we change this so SMP safe */
1388 return current_euid();
1391 SYSCALL_DEFINE0(getgid)
1393 /* Only we change this so SMP safe */
1394 return current_gid();
1397 SYSCALL_DEFINE0(getegid)
1399 /* Only we change this so SMP safe */
1400 return current_egid();
1403 #endif
1405 static void process_timeout(unsigned long __data)
1407 wake_up_process((struct task_struct *)__data);
1411 * schedule_timeout - sleep until timeout
1412 * @timeout: timeout value in jiffies
1414 * Make the current task sleep until @timeout jiffies have
1415 * elapsed. The routine will return immediately unless
1416 * the current task state has been set (see set_current_state()).
1418 * You can set the task state as follows -
1420 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1421 * pass before the routine returns. The routine will return 0
1423 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1424 * delivered to the current task. In this case the remaining time
1425 * in jiffies will be returned, or 0 if the timer expired in time
1427 * The current task state is guaranteed to be TASK_RUNNING when this
1428 * routine returns.
1430 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1431 * the CPU away without a bound on the timeout. In this case the return
1432 * value will be %MAX_SCHEDULE_TIMEOUT.
1434 * In all cases the return value is guaranteed to be non-negative.
1436 signed long __sched schedule_timeout(signed long timeout)
1438 struct timer_list timer;
1439 unsigned long expire;
1441 switch (timeout)
1443 case MAX_SCHEDULE_TIMEOUT:
1445 * These two special cases are useful to be comfortable
1446 * in the caller. Nothing more. We could take
1447 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1448 * but I' d like to return a valid offset (>=0) to allow
1449 * the caller to do everything it want with the retval.
1451 schedule();
1452 goto out;
1453 default:
1455 * Another bit of PARANOID. Note that the retval will be
1456 * 0 since no piece of kernel is supposed to do a check
1457 * for a negative retval of schedule_timeout() (since it
1458 * should never happens anyway). You just have the printk()
1459 * that will tell you if something is gone wrong and where.
1461 if (timeout < 0) {
1462 printk(KERN_ERR "schedule_timeout: wrong timeout "
1463 "value %lx\n", timeout);
1464 dump_stack();
1465 current->state = TASK_RUNNING;
1466 goto out;
1470 expire = timeout + jiffies;
1472 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1473 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1474 schedule();
1475 del_singleshot_timer_sync(&timer);
1477 /* Remove the timer from the object tracker */
1478 destroy_timer_on_stack(&timer);
1480 timeout = expire - jiffies;
1482 out:
1483 return timeout < 0 ? 0 : timeout;
1485 EXPORT_SYMBOL(schedule_timeout);
1488 * We can use __set_current_state() here because schedule_timeout() calls
1489 * schedule() unconditionally.
1491 signed long __sched schedule_timeout_interruptible(signed long timeout)
1493 __set_current_state(TASK_INTERRUPTIBLE);
1494 return schedule_timeout(timeout);
1496 EXPORT_SYMBOL(schedule_timeout_interruptible);
1498 signed long __sched schedule_timeout_killable(signed long timeout)
1500 __set_current_state(TASK_KILLABLE);
1501 return schedule_timeout(timeout);
1503 EXPORT_SYMBOL(schedule_timeout_killable);
1505 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1507 __set_current_state(TASK_UNINTERRUPTIBLE);
1508 return schedule_timeout(timeout);
1510 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1512 /* Thread ID - the internal kernel "pid" */
1513 SYSCALL_DEFINE0(gettid)
1515 return task_pid_vnr(current);
1519 * do_sysinfo - fill in sysinfo struct
1520 * @info: pointer to buffer to fill
1522 int do_sysinfo(struct sysinfo *info)
1524 unsigned long mem_total, sav_total;
1525 unsigned int mem_unit, bitcount;
1526 struct timespec tp;
1528 memset(info, 0, sizeof(struct sysinfo));
1530 ktime_get_ts(&tp);
1531 monotonic_to_bootbased(&tp);
1532 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1534 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1536 info->procs = nr_threads;
1538 si_meminfo(info);
1539 si_swapinfo(info);
1542 * If the sum of all the available memory (i.e. ram + swap)
1543 * is less than can be stored in a 32 bit unsigned long then
1544 * we can be binary compatible with 2.2.x kernels. If not,
1545 * well, in that case 2.2.x was broken anyways...
1547 * -Erik Andersen <andersee@debian.org>
1550 mem_total = info->totalram + info->totalswap;
1551 if (mem_total < info->totalram || mem_total < info->totalswap)
1552 goto out;
1553 bitcount = 0;
1554 mem_unit = info->mem_unit;
1555 while (mem_unit > 1) {
1556 bitcount++;
1557 mem_unit >>= 1;
1558 sav_total = mem_total;
1559 mem_total <<= 1;
1560 if (mem_total < sav_total)
1561 goto out;
1565 * If mem_total did not overflow, multiply all memory values by
1566 * info->mem_unit and set it to 1. This leaves things compatible
1567 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1568 * kernels...
1571 info->mem_unit = 1;
1572 info->totalram <<= bitcount;
1573 info->freeram <<= bitcount;
1574 info->sharedram <<= bitcount;
1575 info->bufferram <<= bitcount;
1576 info->totalswap <<= bitcount;
1577 info->freeswap <<= bitcount;
1578 info->totalhigh <<= bitcount;
1579 info->freehigh <<= bitcount;
1581 out:
1582 return 0;
1585 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1587 struct sysinfo val;
1589 do_sysinfo(&val);
1591 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1592 return -EFAULT;
1594 return 0;
1597 static int __cpuinit init_timers_cpu(int cpu)
1599 int j;
1600 struct tvec_base *base;
1601 static char __cpuinitdata tvec_base_done[NR_CPUS];
1603 if (!tvec_base_done[cpu]) {
1604 static char boot_done;
1606 if (boot_done) {
1608 * The APs use this path later in boot
1610 base = kmalloc_node(sizeof(*base),
1611 GFP_KERNEL | __GFP_ZERO,
1612 cpu_to_node(cpu));
1613 if (!base)
1614 return -ENOMEM;
1616 /* Make sure that tvec_base is 2 byte aligned */
1617 if (tbase_get_deferrable(base)) {
1618 WARN_ON(1);
1619 kfree(base);
1620 return -ENOMEM;
1622 per_cpu(tvec_bases, cpu) = base;
1623 } else {
1625 * This is for the boot CPU - we use compile-time
1626 * static initialisation because per-cpu memory isn't
1627 * ready yet and because the memory allocators are not
1628 * initialised either.
1630 boot_done = 1;
1631 base = &boot_tvec_bases;
1633 tvec_base_done[cpu] = 1;
1634 } else {
1635 base = per_cpu(tvec_bases, cpu);
1638 spin_lock_init(&base->lock);
1640 for (j = 0; j < TVN_SIZE; j++) {
1641 INIT_LIST_HEAD(base->tv5.vec + j);
1642 INIT_LIST_HEAD(base->tv4.vec + j);
1643 INIT_LIST_HEAD(base->tv3.vec + j);
1644 INIT_LIST_HEAD(base->tv2.vec + j);
1646 for (j = 0; j < TVR_SIZE; j++)
1647 INIT_LIST_HEAD(base->tv1.vec + j);
1649 base->timer_jiffies = jiffies;
1650 base->next_timer = base->timer_jiffies;
1651 return 0;
1654 #ifdef CONFIG_HOTPLUG_CPU
1655 static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1657 struct timer_list *timer;
1659 while (!list_empty(head)) {
1660 timer = list_first_entry(head, struct timer_list, entry);
1661 detach_timer(timer, 0);
1662 timer_set_base(timer, new_base);
1663 if (time_before(timer->expires, new_base->next_timer) &&
1664 !tbase_get_deferrable(timer->base))
1665 new_base->next_timer = timer->expires;
1666 internal_add_timer(new_base, timer);
1670 static void __cpuinit migrate_timers(int cpu)
1672 struct tvec_base *old_base;
1673 struct tvec_base *new_base;
1674 int i;
1676 BUG_ON(cpu_online(cpu));
1677 old_base = per_cpu(tvec_bases, cpu);
1678 new_base = get_cpu_var(tvec_bases);
1680 * The caller is globally serialized and nobody else
1681 * takes two locks at once, deadlock is not possible.
1683 spin_lock_irq(&new_base->lock);
1684 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1686 BUG_ON(old_base->running_timer);
1688 for (i = 0; i < TVR_SIZE; i++)
1689 migrate_timer_list(new_base, old_base->tv1.vec + i);
1690 for (i = 0; i < TVN_SIZE; i++) {
1691 migrate_timer_list(new_base, old_base->tv2.vec + i);
1692 migrate_timer_list(new_base, old_base->tv3.vec + i);
1693 migrate_timer_list(new_base, old_base->tv4.vec + i);
1694 migrate_timer_list(new_base, old_base->tv5.vec + i);
1697 spin_unlock(&old_base->lock);
1698 spin_unlock_irq(&new_base->lock);
1699 put_cpu_var(tvec_bases);
1701 #endif /* CONFIG_HOTPLUG_CPU */
1703 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1704 unsigned long action, void *hcpu)
1706 long cpu = (long)hcpu;
1707 int err;
1709 switch(action) {
1710 case CPU_UP_PREPARE:
1711 case CPU_UP_PREPARE_FROZEN:
1712 err = init_timers_cpu(cpu);
1713 if (err < 0)
1714 return notifier_from_errno(err);
1715 break;
1716 #ifdef CONFIG_HOTPLUG_CPU
1717 case CPU_DEAD:
1718 case CPU_DEAD_FROZEN:
1719 migrate_timers(cpu);
1720 break;
1721 #endif
1722 default:
1723 break;
1725 return NOTIFY_OK;
1728 static struct notifier_block __cpuinitdata timers_nb = {
1729 .notifier_call = timer_cpu_notify,
1733 void __init init_timers(void)
1735 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1736 (void *)(long)smp_processor_id());
1738 init_timer_stats();
1740 BUG_ON(err != NOTIFY_OK);
1741 register_cpu_notifier(&timers_nb);
1742 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1746 * msleep - sleep safely even with waitqueue interruptions
1747 * @msecs: Time in milliseconds to sleep for
1749 void msleep(unsigned int msecs)
1751 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1753 while (timeout)
1754 timeout = schedule_timeout_uninterruptible(timeout);
1757 EXPORT_SYMBOL(msleep);
1760 * msleep_interruptible - sleep waiting for signals
1761 * @msecs: Time in milliseconds to sleep for
1763 unsigned long msleep_interruptible(unsigned int msecs)
1765 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1767 while (timeout && !signal_pending(current))
1768 timeout = schedule_timeout_interruptible(timeout);
1769 return jiffies_to_msecs(timeout);
1772 EXPORT_SYMBOL(msleep_interruptible);
1774 static int __sched do_usleep_range(unsigned long min, unsigned long max)
1776 ktime_t kmin;
1777 unsigned long delta;
1779 kmin = ktime_set(0, min * NSEC_PER_USEC);
1780 delta = (max - min) * NSEC_PER_USEC;
1781 return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1785 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1786 * @min: Minimum time in usecs to sleep
1787 * @max: Maximum time in usecs to sleep
1789 void usleep_range(unsigned long min, unsigned long max)
1791 __set_current_state(TASK_UNINTERRUPTIBLE);
1792 do_usleep_range(min, max);
1794 EXPORT_SYMBOL(usleep_range);