4 * Kernel internal timers, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/perf_event.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
44 #include <asm/uaccess.h>
45 #include <asm/unistd.h>
46 #include <asm/div64.h>
47 #include <asm/timex.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/timer.h>
53 u64 jiffies_64 __cacheline_aligned_in_smp
= INITIAL_JIFFIES
;
55 EXPORT_SYMBOL(jiffies_64
);
58 * per-CPU timer vector definitions:
60 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
61 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
62 #define TVN_SIZE (1 << TVN_BITS)
63 #define TVR_SIZE (1 << TVR_BITS)
64 #define TVN_MASK (TVN_SIZE - 1)
65 #define TVR_MASK (TVR_SIZE - 1)
68 struct list_head vec
[TVN_SIZE
];
72 struct list_head vec
[TVR_SIZE
];
77 struct timer_list
*running_timer
;
78 unsigned long timer_jiffies
;
79 unsigned long next_timer
;
85 } ____cacheline_aligned
;
87 struct tvec_base boot_tvec_bases
;
88 EXPORT_SYMBOL(boot_tvec_bases
);
89 static DEFINE_PER_CPU(struct tvec_base
*, tvec_bases
) = &boot_tvec_bases
;
92 * Note that all tvec_bases are 2 byte aligned and lower bit of
93 * base in timer_list is guaranteed to be zero. Use the LSB for
94 * the new flag to indicate whether the timer is deferrable
96 #define TBASE_DEFERRABLE_FLAG (0x1)
98 /* Functions below help us manage 'deferrable' flag */
99 static inline unsigned int tbase_get_deferrable(struct tvec_base
*base
)
101 return ((unsigned int)(unsigned long)base
& TBASE_DEFERRABLE_FLAG
);
104 static inline struct tvec_base
*tbase_get_base(struct tvec_base
*base
)
106 return ((struct tvec_base
*)((unsigned long)base
& ~TBASE_DEFERRABLE_FLAG
));
109 static inline void timer_set_deferrable(struct timer_list
*timer
)
111 timer
->base
= ((struct tvec_base
*)((unsigned long)(timer
->base
) |
112 TBASE_DEFERRABLE_FLAG
));
116 timer_set_base(struct timer_list
*timer
, struct tvec_base
*new_base
)
118 timer
->base
= (struct tvec_base
*)((unsigned long)(new_base
) |
119 tbase_get_deferrable(timer
->base
));
122 static unsigned long round_jiffies_common(unsigned long j
, int cpu
,
126 unsigned long original
= j
;
129 * We don't want all cpus firing their timers at once hitting the
130 * same lock or cachelines, so we skew each extra cpu with an extra
131 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
133 * The skew is done by adding 3*cpunr, then round, then subtract this
134 * extra offset again.
141 * If the target jiffie is just after a whole second (which can happen
142 * due to delays of the timer irq, long irq off times etc etc) then
143 * we should round down to the whole second, not up. Use 1/4th second
144 * as cutoff for this rounding as an extreme upper bound for this.
145 * But never round down if @force_up is set.
147 if (rem
< HZ
/4 && !force_up
) /* round down */
152 /* now that we have rounded, subtract the extra skew again */
155 if (j
<= jiffies
) /* rounding ate our timeout entirely; */
161 * __round_jiffies - function to round jiffies to a full second
162 * @j: the time in (absolute) jiffies that should be rounded
163 * @cpu: the processor number on which the timeout will happen
165 * __round_jiffies() rounds an absolute time in the future (in jiffies)
166 * up or down to (approximately) full seconds. This is useful for timers
167 * for which the exact time they fire does not matter too much, as long as
168 * they fire approximately every X seconds.
170 * By rounding these timers to whole seconds, all such timers will fire
171 * at the same time, rather than at various times spread out. The goal
172 * of this is to have the CPU wake up less, which saves power.
174 * The exact rounding is skewed for each processor to avoid all
175 * processors firing at the exact same time, which could lead
176 * to lock contention or spurious cache line bouncing.
178 * The return value is the rounded version of the @j parameter.
180 unsigned long __round_jiffies(unsigned long j
, int cpu
)
182 return round_jiffies_common(j
, cpu
, false);
184 EXPORT_SYMBOL_GPL(__round_jiffies
);
187 * __round_jiffies_relative - function to round jiffies to a full second
188 * @j: the time in (relative) jiffies that should be rounded
189 * @cpu: the processor number on which the timeout will happen
191 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
192 * up or down to (approximately) full seconds. This is useful for timers
193 * for which the exact time they fire does not matter too much, as long as
194 * they fire approximately every X seconds.
196 * By rounding these timers to whole seconds, all such timers will fire
197 * at the same time, rather than at various times spread out. The goal
198 * of this is to have the CPU wake up less, which saves power.
200 * The exact rounding is skewed for each processor to avoid all
201 * processors firing at the exact same time, which could lead
202 * to lock contention or spurious cache line bouncing.
204 * The return value is the rounded version of the @j parameter.
206 unsigned long __round_jiffies_relative(unsigned long j
, int cpu
)
208 unsigned long j0
= jiffies
;
210 /* Use j0 because jiffies might change while we run */
211 return round_jiffies_common(j
+ j0
, cpu
, false) - j0
;
213 EXPORT_SYMBOL_GPL(__round_jiffies_relative
);
216 * round_jiffies - function to round jiffies to a full second
217 * @j: the time in (absolute) jiffies that should be rounded
219 * round_jiffies() rounds an absolute time in the future (in jiffies)
220 * up or down to (approximately) full seconds. This is useful for timers
221 * for which the exact time they fire does not matter too much, as long as
222 * they fire approximately every X seconds.
224 * By rounding these timers to whole seconds, all such timers will fire
225 * at the same time, rather than at various times spread out. The goal
226 * of this is to have the CPU wake up less, which saves power.
228 * The return value is the rounded version of the @j parameter.
230 unsigned long round_jiffies(unsigned long j
)
232 return round_jiffies_common(j
, raw_smp_processor_id(), false);
234 EXPORT_SYMBOL_GPL(round_jiffies
);
237 * round_jiffies_relative - function to round jiffies to a full second
238 * @j: the time in (relative) jiffies that should be rounded
240 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
241 * up or down to (approximately) full seconds. This is useful for timers
242 * for which the exact time they fire does not matter too much, as long as
243 * they fire approximately every X seconds.
245 * By rounding these timers to whole seconds, all such timers will fire
246 * at the same time, rather than at various times spread out. The goal
247 * of this is to have the CPU wake up less, which saves power.
249 * The return value is the rounded version of the @j parameter.
251 unsigned long round_jiffies_relative(unsigned long j
)
253 return __round_jiffies_relative(j
, raw_smp_processor_id());
255 EXPORT_SYMBOL_GPL(round_jiffies_relative
);
258 * __round_jiffies_up - function to round jiffies up to a full second
259 * @j: the time in (absolute) jiffies that should be rounded
260 * @cpu: the processor number on which the timeout will happen
262 * This is the same as __round_jiffies() except that it will never
263 * round down. This is useful for timeouts for which the exact time
264 * of firing does not matter too much, as long as they don't fire too
267 unsigned long __round_jiffies_up(unsigned long j
, int cpu
)
269 return round_jiffies_common(j
, cpu
, true);
271 EXPORT_SYMBOL_GPL(__round_jiffies_up
);
274 * __round_jiffies_up_relative - function to round jiffies up to a full second
275 * @j: the time in (relative) jiffies that should be rounded
276 * @cpu: the processor number on which the timeout will happen
278 * This is the same as __round_jiffies_relative() except that it will never
279 * round down. This is useful for timeouts for which the exact time
280 * of firing does not matter too much, as long as they don't fire too
283 unsigned long __round_jiffies_up_relative(unsigned long j
, int cpu
)
285 unsigned long j0
= jiffies
;
287 /* Use j0 because jiffies might change while we run */
288 return round_jiffies_common(j
+ j0
, cpu
, true) - j0
;
290 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative
);
293 * round_jiffies_up - function to round jiffies up to a full second
294 * @j: the time in (absolute) jiffies that should be rounded
296 * This is the same as round_jiffies() except that it will never
297 * round down. This is useful for timeouts for which the exact time
298 * of firing does not matter too much, as long as they don't fire too
301 unsigned long round_jiffies_up(unsigned long j
)
303 return round_jiffies_common(j
, raw_smp_processor_id(), true);
305 EXPORT_SYMBOL_GPL(round_jiffies_up
);
308 * round_jiffies_up_relative - function to round jiffies up to a full second
309 * @j: the time in (relative) jiffies that should be rounded
311 * This is the same as round_jiffies_relative() except that it will never
312 * round down. This is useful for timeouts for which the exact time
313 * of firing does not matter too much, as long as they don't fire too
316 unsigned long round_jiffies_up_relative(unsigned long j
)
318 return __round_jiffies_up_relative(j
, raw_smp_processor_id());
320 EXPORT_SYMBOL_GPL(round_jiffies_up_relative
);
323 * set_timer_slack - set the allowed slack for a timer
324 * @slack_hz: the amount of time (in jiffies) allowed for rounding
326 * Set the amount of time, in jiffies, that a certain timer has
327 * in terms of slack. By setting this value, the timer subsystem
328 * will schedule the actual timer somewhere between
329 * the time mod_timer() asks for, and that time plus the slack.
331 * By setting the slack to -1, a percentage of the delay is used
334 void set_timer_slack(struct timer_list
*timer
, int slack_hz
)
336 timer
->slack
= slack_hz
;
338 EXPORT_SYMBOL_GPL(set_timer_slack
);
341 static inline void set_running_timer(struct tvec_base
*base
,
342 struct timer_list
*timer
)
345 base
->running_timer
= timer
;
349 static void internal_add_timer(struct tvec_base
*base
, struct timer_list
*timer
)
351 unsigned long expires
= timer
->expires
;
352 unsigned long idx
= expires
- base
->timer_jiffies
;
353 struct list_head
*vec
;
355 if (idx
< TVR_SIZE
) {
356 int i
= expires
& TVR_MASK
;
357 vec
= base
->tv1
.vec
+ i
;
358 } else if (idx
< 1 << (TVR_BITS
+ TVN_BITS
)) {
359 int i
= (expires
>> TVR_BITS
) & TVN_MASK
;
360 vec
= base
->tv2
.vec
+ i
;
361 } else if (idx
< 1 << (TVR_BITS
+ 2 * TVN_BITS
)) {
362 int i
= (expires
>> (TVR_BITS
+ TVN_BITS
)) & TVN_MASK
;
363 vec
= base
->tv3
.vec
+ i
;
364 } else if (idx
< 1 << (TVR_BITS
+ 3 * TVN_BITS
)) {
365 int i
= (expires
>> (TVR_BITS
+ 2 * TVN_BITS
)) & TVN_MASK
;
366 vec
= base
->tv4
.vec
+ i
;
367 } else if ((signed long) idx
< 0) {
369 * Can happen if you add a timer with expires == jiffies,
370 * or you set a timer to go off in the past
372 vec
= base
->tv1
.vec
+ (base
->timer_jiffies
& TVR_MASK
);
375 /* If the timeout is larger than 0xffffffff on 64-bit
376 * architectures then we use the maximum timeout:
378 if (idx
> 0xffffffffUL
) {
380 expires
= idx
+ base
->timer_jiffies
;
382 i
= (expires
>> (TVR_BITS
+ 3 * TVN_BITS
)) & TVN_MASK
;
383 vec
= base
->tv5
.vec
+ i
;
388 list_add_tail(&timer
->entry
, vec
);
391 #ifdef CONFIG_TIMER_STATS
392 void __timer_stats_timer_set_start_info(struct timer_list
*timer
, void *addr
)
394 if (timer
->start_site
)
397 timer
->start_site
= addr
;
398 memcpy(timer
->start_comm
, current
->comm
, TASK_COMM_LEN
);
399 timer
->start_pid
= current
->pid
;
402 static void timer_stats_account_timer(struct timer_list
*timer
)
404 unsigned int flag
= 0;
406 if (likely(!timer
->start_site
))
408 if (unlikely(tbase_get_deferrable(timer
->base
)))
409 flag
|= TIMER_STATS_FLAG_DEFERRABLE
;
411 timer_stats_update_stats(timer
, timer
->start_pid
, timer
->start_site
,
412 timer
->function
, timer
->start_comm
, flag
);
416 static void timer_stats_account_timer(struct timer_list
*timer
) {}
419 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
421 static struct debug_obj_descr timer_debug_descr
;
424 * fixup_init is called when:
425 * - an active object is initialized
427 static int timer_fixup_init(void *addr
, enum debug_obj_state state
)
429 struct timer_list
*timer
= addr
;
432 case ODEBUG_STATE_ACTIVE
:
433 del_timer_sync(timer
);
434 debug_object_init(timer
, &timer_debug_descr
);
442 * fixup_activate is called when:
443 * - an active object is activated
444 * - an unknown object is activated (might be a statically initialized object)
446 static int timer_fixup_activate(void *addr
, enum debug_obj_state state
)
448 struct timer_list
*timer
= addr
;
452 case ODEBUG_STATE_NOTAVAILABLE
:
454 * This is not really a fixup. The timer was
455 * statically initialized. We just make sure that it
456 * is tracked in the object tracker.
458 if (timer
->entry
.next
== NULL
&&
459 timer
->entry
.prev
== TIMER_ENTRY_STATIC
) {
460 debug_object_init(timer
, &timer_debug_descr
);
461 debug_object_activate(timer
, &timer_debug_descr
);
468 case ODEBUG_STATE_ACTIVE
:
477 * fixup_free is called when:
478 * - an active object is freed
480 static int timer_fixup_free(void *addr
, enum debug_obj_state state
)
482 struct timer_list
*timer
= addr
;
485 case ODEBUG_STATE_ACTIVE
:
486 del_timer_sync(timer
);
487 debug_object_free(timer
, &timer_debug_descr
);
494 static struct debug_obj_descr timer_debug_descr
= {
495 .name
= "timer_list",
496 .fixup_init
= timer_fixup_init
,
497 .fixup_activate
= timer_fixup_activate
,
498 .fixup_free
= timer_fixup_free
,
501 static inline void debug_timer_init(struct timer_list
*timer
)
503 debug_object_init(timer
, &timer_debug_descr
);
506 static inline void debug_timer_activate(struct timer_list
*timer
)
508 debug_object_activate(timer
, &timer_debug_descr
);
511 static inline void debug_timer_deactivate(struct timer_list
*timer
)
513 debug_object_deactivate(timer
, &timer_debug_descr
);
516 static inline void debug_timer_free(struct timer_list
*timer
)
518 debug_object_free(timer
, &timer_debug_descr
);
521 static void __init_timer(struct timer_list
*timer
,
523 struct lock_class_key
*key
);
525 void init_timer_on_stack_key(struct timer_list
*timer
,
527 struct lock_class_key
*key
)
529 debug_object_init_on_stack(timer
, &timer_debug_descr
);
530 __init_timer(timer
, name
, key
);
532 EXPORT_SYMBOL_GPL(init_timer_on_stack_key
);
534 void destroy_timer_on_stack(struct timer_list
*timer
)
536 debug_object_free(timer
, &timer_debug_descr
);
538 EXPORT_SYMBOL_GPL(destroy_timer_on_stack
);
541 static inline void debug_timer_init(struct timer_list
*timer
) { }
542 static inline void debug_timer_activate(struct timer_list
*timer
) { }
543 static inline void debug_timer_deactivate(struct timer_list
*timer
) { }
546 static inline void debug_init(struct timer_list
*timer
)
548 debug_timer_init(timer
);
549 trace_timer_init(timer
);
553 debug_activate(struct timer_list
*timer
, unsigned long expires
)
555 debug_timer_activate(timer
);
556 trace_timer_start(timer
, expires
);
559 static inline void debug_deactivate(struct timer_list
*timer
)
561 debug_timer_deactivate(timer
);
562 trace_timer_cancel(timer
);
565 static void __init_timer(struct timer_list
*timer
,
567 struct lock_class_key
*key
)
569 timer
->entry
.next
= NULL
;
570 timer
->base
= __raw_get_cpu_var(tvec_bases
);
572 #ifdef CONFIG_TIMER_STATS
573 timer
->start_site
= NULL
;
574 timer
->start_pid
= -1;
575 memset(timer
->start_comm
, 0, TASK_COMM_LEN
);
577 lockdep_init_map(&timer
->lockdep_map
, name
, key
, 0);
581 * init_timer_key - initialize a timer
582 * @timer: the timer to be initialized
583 * @name: name of the timer
584 * @key: lockdep class key of the fake lock used for tracking timer
585 * sync lock dependencies
587 * init_timer_key() must be done to a timer prior calling *any* of the
588 * other timer functions.
590 void init_timer_key(struct timer_list
*timer
,
592 struct lock_class_key
*key
)
595 __init_timer(timer
, name
, key
);
597 EXPORT_SYMBOL(init_timer_key
);
599 void init_timer_deferrable_key(struct timer_list
*timer
,
601 struct lock_class_key
*key
)
603 init_timer_key(timer
, name
, key
);
604 timer_set_deferrable(timer
);
606 EXPORT_SYMBOL(init_timer_deferrable_key
);
608 static inline void detach_timer(struct timer_list
*timer
,
611 struct list_head
*entry
= &timer
->entry
;
613 debug_deactivate(timer
);
615 __list_del(entry
->prev
, entry
->next
);
618 entry
->prev
= LIST_POISON2
;
622 * We are using hashed locking: holding per_cpu(tvec_bases).lock
623 * means that all timers which are tied to this base via timer->base are
624 * locked, and the base itself is locked too.
626 * So __run_timers/migrate_timers can safely modify all timers which could
627 * be found on ->tvX lists.
629 * When the timer's base is locked, and the timer removed from list, it is
630 * possible to set timer->base = NULL and drop the lock: the timer remains
633 static struct tvec_base
*lock_timer_base(struct timer_list
*timer
,
634 unsigned long *flags
)
635 __acquires(timer
->base
->lock
)
637 struct tvec_base
*base
;
640 struct tvec_base
*prelock_base
= timer
->base
;
641 base
= tbase_get_base(prelock_base
);
642 if (likely(base
!= NULL
)) {
643 spin_lock_irqsave(&base
->lock
, *flags
);
644 if (likely(prelock_base
== timer
->base
))
646 /* The timer has migrated to another CPU */
647 spin_unlock_irqrestore(&base
->lock
, *flags
);
654 __mod_timer(struct timer_list
*timer
, unsigned long expires
,
655 bool pending_only
, int pinned
)
657 struct tvec_base
*base
, *new_base
;
661 timer_stats_timer_set_start_info(timer
);
662 BUG_ON(!timer
->function
);
664 base
= lock_timer_base(timer
, &flags
);
666 if (timer_pending(timer
)) {
667 detach_timer(timer
, 0);
668 if (timer
->expires
== base
->next_timer
&&
669 !tbase_get_deferrable(timer
->base
))
670 base
->next_timer
= base
->timer_jiffies
;
677 debug_activate(timer
, expires
);
679 cpu
= smp_processor_id();
681 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
682 if (!pinned
&& get_sysctl_timer_migration() && idle_cpu(cpu
)) {
683 int preferred_cpu
= get_nohz_load_balancer();
685 if (preferred_cpu
>= 0)
689 new_base
= per_cpu(tvec_bases
, cpu
);
691 if (base
!= new_base
) {
693 * We are trying to schedule the timer on the local CPU.
694 * However we can't change timer's base while it is running,
695 * otherwise del_timer_sync() can't detect that the timer's
696 * handler yet has not finished. This also guarantees that
697 * the timer is serialized wrt itself.
699 if (likely(base
->running_timer
!= timer
)) {
700 /* See the comment in lock_timer_base() */
701 timer_set_base(timer
, NULL
);
702 spin_unlock(&base
->lock
);
704 spin_lock(&base
->lock
);
705 timer_set_base(timer
, base
);
709 timer
->expires
= expires
;
710 if (time_before(timer
->expires
, base
->next_timer
) &&
711 !tbase_get_deferrable(timer
->base
))
712 base
->next_timer
= timer
->expires
;
713 internal_add_timer(base
, timer
);
716 spin_unlock_irqrestore(&base
->lock
, flags
);
722 * mod_timer_pending - modify a pending timer's timeout
723 * @timer: the pending timer to be modified
724 * @expires: new timeout in jiffies
726 * mod_timer_pending() is the same for pending timers as mod_timer(),
727 * but will not re-activate and modify already deleted timers.
729 * It is useful for unserialized use of timers.
731 int mod_timer_pending(struct timer_list
*timer
, unsigned long expires
)
733 return __mod_timer(timer
, expires
, true, TIMER_NOT_PINNED
);
735 EXPORT_SYMBOL(mod_timer_pending
);
738 * Decide where to put the timer while taking the slack into account
741 * 1) calculate the maximum (absolute) time
742 * 2) calculate the highest bit where the expires and new max are different
743 * 3) use this bit to make a mask
744 * 4) use the bitmask to round down the maximum time, so that all last
748 unsigned long apply_slack(struct timer_list
*timer
, unsigned long expires
)
750 unsigned long expires_limit
, mask
;
753 expires_limit
= expires
+ timer
->slack
;
755 if (timer
->slack
< 0) /* auto slack: use 0.4% */
756 expires_limit
= expires
+ (expires
- jiffies
)/256;
758 mask
= expires
^ expires_limit
;
763 bit
= find_last_bit(&mask
, BITS_PER_LONG
);
765 mask
= (1 << bit
) - 1;
767 expires_limit
= expires_limit
& ~(mask
);
769 return expires_limit
;
773 * mod_timer - modify a timer's timeout
774 * @timer: the timer to be modified
775 * @expires: new timeout in jiffies
777 * mod_timer() is a more efficient way to update the expire field of an
778 * active timer (if the timer is inactive it will be activated)
780 * mod_timer(timer, expires) is equivalent to:
782 * del_timer(timer); timer->expires = expires; add_timer(timer);
784 * Note that if there are multiple unserialized concurrent users of the
785 * same timer, then mod_timer() is the only safe way to modify the timeout,
786 * since add_timer() cannot modify an already running timer.
788 * The function returns whether it has modified a pending timer or not.
789 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
790 * active timer returns 1.)
792 int mod_timer(struct timer_list
*timer
, unsigned long expires
)
795 * This is a common optimization triggered by the
796 * networking code - if the timer is re-modified
797 * to be the same thing then just return:
799 if (timer_pending(timer
) && timer
->expires
== expires
)
802 expires
= apply_slack(timer
, expires
);
804 return __mod_timer(timer
, expires
, false, TIMER_NOT_PINNED
);
806 EXPORT_SYMBOL(mod_timer
);
809 * mod_timer_pinned - modify a timer's timeout
810 * @timer: the timer to be modified
811 * @expires: new timeout in jiffies
813 * mod_timer_pinned() is a way to update the expire field of an
814 * active timer (if the timer is inactive it will be activated)
815 * and not allow the timer to be migrated to a different CPU.
817 * mod_timer_pinned(timer, expires) is equivalent to:
819 * del_timer(timer); timer->expires = expires; add_timer(timer);
821 int mod_timer_pinned(struct timer_list
*timer
, unsigned long expires
)
823 if (timer
->expires
== expires
&& timer_pending(timer
))
826 return __mod_timer(timer
, expires
, false, TIMER_PINNED
);
828 EXPORT_SYMBOL(mod_timer_pinned
);
831 * add_timer - start a timer
832 * @timer: the timer to be added
834 * The kernel will do a ->function(->data) callback from the
835 * timer interrupt at the ->expires point in the future. The
836 * current time is 'jiffies'.
838 * The timer's ->expires, ->function (and if the handler uses it, ->data)
839 * fields must be set prior calling this function.
841 * Timers with an ->expires field in the past will be executed in the next
844 void add_timer(struct timer_list
*timer
)
846 BUG_ON(timer_pending(timer
));
847 mod_timer(timer
, timer
->expires
);
849 EXPORT_SYMBOL(add_timer
);
852 * add_timer_on - start a timer on a particular CPU
853 * @timer: the timer to be added
854 * @cpu: the CPU to start it on
856 * This is not very scalable on SMP. Double adds are not possible.
858 void add_timer_on(struct timer_list
*timer
, int cpu
)
860 struct tvec_base
*base
= per_cpu(tvec_bases
, cpu
);
863 timer_stats_timer_set_start_info(timer
);
864 BUG_ON(timer_pending(timer
) || !timer
->function
);
865 spin_lock_irqsave(&base
->lock
, flags
);
866 timer_set_base(timer
, base
);
867 debug_activate(timer
, timer
->expires
);
868 if (time_before(timer
->expires
, base
->next_timer
) &&
869 !tbase_get_deferrable(timer
->base
))
870 base
->next_timer
= timer
->expires
;
871 internal_add_timer(base
, timer
);
873 * Check whether the other CPU is idle and needs to be
874 * triggered to reevaluate the timer wheel when nohz is
875 * active. We are protected against the other CPU fiddling
876 * with the timer by holding the timer base lock. This also
877 * makes sure that a CPU on the way to idle can not evaluate
880 wake_up_idle_cpu(cpu
);
881 spin_unlock_irqrestore(&base
->lock
, flags
);
883 EXPORT_SYMBOL_GPL(add_timer_on
);
886 * del_timer - deactive a timer.
887 * @timer: the timer to be deactivated
889 * del_timer() deactivates a timer - this works on both active and inactive
892 * The function returns whether it has deactivated a pending timer or not.
893 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
894 * active timer returns 1.)
896 int del_timer(struct timer_list
*timer
)
898 struct tvec_base
*base
;
902 timer_stats_timer_clear_start_info(timer
);
903 if (timer_pending(timer
)) {
904 base
= lock_timer_base(timer
, &flags
);
905 if (timer_pending(timer
)) {
906 detach_timer(timer
, 1);
907 if (timer
->expires
== base
->next_timer
&&
908 !tbase_get_deferrable(timer
->base
))
909 base
->next_timer
= base
->timer_jiffies
;
912 spin_unlock_irqrestore(&base
->lock
, flags
);
917 EXPORT_SYMBOL(del_timer
);
921 * try_to_del_timer_sync - Try to deactivate a timer
922 * @timer: timer do del
924 * This function tries to deactivate a timer. Upon successful (ret >= 0)
925 * exit the timer is not queued and the handler is not running on any CPU.
927 * It must not be called from interrupt contexts.
929 int try_to_del_timer_sync(struct timer_list
*timer
)
931 struct tvec_base
*base
;
935 base
= lock_timer_base(timer
, &flags
);
937 if (base
->running_timer
== timer
)
940 timer_stats_timer_clear_start_info(timer
);
942 if (timer_pending(timer
)) {
943 detach_timer(timer
, 1);
944 if (timer
->expires
== base
->next_timer
&&
945 !tbase_get_deferrable(timer
->base
))
946 base
->next_timer
= base
->timer_jiffies
;
950 spin_unlock_irqrestore(&base
->lock
, flags
);
954 EXPORT_SYMBOL(try_to_del_timer_sync
);
957 * del_timer_sync - deactivate a timer and wait for the handler to finish.
958 * @timer: the timer to be deactivated
960 * This function only differs from del_timer() on SMP: besides deactivating
961 * the timer it also makes sure the handler has finished executing on other
964 * Synchronization rules: Callers must prevent restarting of the timer,
965 * otherwise this function is meaningless. It must not be called from
966 * interrupt contexts. The caller must not hold locks which would prevent
967 * completion of the timer's handler. The timer's handler must not call
968 * add_timer_on(). Upon exit the timer is not queued and the handler is
969 * not running on any CPU.
971 * The function returns whether it has deactivated a pending timer or not.
973 int del_timer_sync(struct timer_list
*timer
)
975 #ifdef CONFIG_LOCKDEP
978 local_irq_save(flags
);
979 lock_map_acquire(&timer
->lockdep_map
);
980 lock_map_release(&timer
->lockdep_map
);
981 local_irq_restore(flags
);
985 int ret
= try_to_del_timer_sync(timer
);
991 EXPORT_SYMBOL(del_timer_sync
);
994 static int cascade(struct tvec_base
*base
, struct tvec
*tv
, int index
)
996 /* cascade all the timers from tv up one level */
997 struct timer_list
*timer
, *tmp
;
998 struct list_head tv_list
;
1000 list_replace_init(tv
->vec
+ index
, &tv_list
);
1003 * We are removing _all_ timers from the list, so we
1004 * don't have to detach them individually.
1006 list_for_each_entry_safe(timer
, tmp
, &tv_list
, entry
) {
1007 BUG_ON(tbase_get_base(timer
->base
) != base
);
1008 internal_add_timer(base
, timer
);
1014 static void call_timer_fn(struct timer_list
*timer
, void (*fn
)(unsigned long),
1017 int preempt_count
= preempt_count();
1019 #ifdef CONFIG_LOCKDEP
1021 * It is permissible to free the timer from inside the
1022 * function that is called from it, this we need to take into
1023 * account for lockdep too. To avoid bogus "held lock freed"
1024 * warnings as well as problems when looking into
1025 * timer->lockdep_map, make a copy and use that here.
1027 struct lockdep_map lockdep_map
= timer
->lockdep_map
;
1030 * Couple the lock chain with the lock chain at
1031 * del_timer_sync() by acquiring the lock_map around the fn()
1032 * call here and in del_timer_sync().
1034 lock_map_acquire(&lockdep_map
);
1036 trace_timer_expire_entry(timer
);
1038 trace_timer_expire_exit(timer
);
1040 lock_map_release(&lockdep_map
);
1042 if (preempt_count
!= preempt_count()) {
1043 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1044 fn
, preempt_count
, preempt_count());
1046 * Restore the preempt count. That gives us a decent
1047 * chance to survive and extract information. If the
1048 * callback kept a lock held, bad luck, but not worse
1049 * than the BUG() we had.
1051 preempt_count() = preempt_count
;
1055 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1058 * __run_timers - run all expired timers (if any) on this CPU.
1059 * @base: the timer vector to be processed.
1061 * This function cascades all vectors and executes all expired timer
1064 static inline void __run_timers(struct tvec_base
*base
)
1066 struct timer_list
*timer
;
1068 spin_lock_irq(&base
->lock
);
1069 while (time_after_eq(jiffies
, base
->timer_jiffies
)) {
1070 struct list_head work_list
;
1071 struct list_head
*head
= &work_list
;
1072 int index
= base
->timer_jiffies
& TVR_MASK
;
1078 (!cascade(base
, &base
->tv2
, INDEX(0))) &&
1079 (!cascade(base
, &base
->tv3
, INDEX(1))) &&
1080 !cascade(base
, &base
->tv4
, INDEX(2)))
1081 cascade(base
, &base
->tv5
, INDEX(3));
1082 ++base
->timer_jiffies
;
1083 list_replace_init(base
->tv1
.vec
+ index
, &work_list
);
1084 while (!list_empty(head
)) {
1085 void (*fn
)(unsigned long);
1088 timer
= list_first_entry(head
, struct timer_list
,entry
);
1089 fn
= timer
->function
;
1092 timer_stats_account_timer(timer
);
1094 set_running_timer(base
, timer
);
1095 detach_timer(timer
, 1);
1097 spin_unlock_irq(&base
->lock
);
1098 call_timer_fn(timer
, fn
, data
);
1099 spin_lock_irq(&base
->lock
);
1102 set_running_timer(base
, NULL
);
1103 spin_unlock_irq(&base
->lock
);
1108 * Find out when the next timer event is due to happen. This
1109 * is used on S/390 to stop all activity when a CPU is idle.
1110 * This function needs to be called with interrupts disabled.
1112 static unsigned long __next_timer_interrupt(struct tvec_base
*base
)
1114 unsigned long timer_jiffies
= base
->timer_jiffies
;
1115 unsigned long expires
= timer_jiffies
+ NEXT_TIMER_MAX_DELTA
;
1116 int index
, slot
, array
, found
= 0;
1117 struct timer_list
*nte
;
1118 struct tvec
*varray
[4];
1120 /* Look for timer events in tv1. */
1121 index
= slot
= timer_jiffies
& TVR_MASK
;
1123 list_for_each_entry(nte
, base
->tv1
.vec
+ slot
, entry
) {
1124 if (tbase_get_deferrable(nte
->base
))
1128 expires
= nte
->expires
;
1129 /* Look at the cascade bucket(s)? */
1130 if (!index
|| slot
< index
)
1134 slot
= (slot
+ 1) & TVR_MASK
;
1135 } while (slot
!= index
);
1138 /* Calculate the next cascade event */
1140 timer_jiffies
+= TVR_SIZE
- index
;
1141 timer_jiffies
>>= TVR_BITS
;
1143 /* Check tv2-tv5. */
1144 varray
[0] = &base
->tv2
;
1145 varray
[1] = &base
->tv3
;
1146 varray
[2] = &base
->tv4
;
1147 varray
[3] = &base
->tv5
;
1149 for (array
= 0; array
< 4; array
++) {
1150 struct tvec
*varp
= varray
[array
];
1152 index
= slot
= timer_jiffies
& TVN_MASK
;
1154 list_for_each_entry(nte
, varp
->vec
+ slot
, entry
) {
1155 if (tbase_get_deferrable(nte
->base
))
1159 if (time_before(nte
->expires
, expires
))
1160 expires
= nte
->expires
;
1163 * Do we still search for the first timer or are
1164 * we looking up the cascade buckets ?
1167 /* Look at the cascade bucket(s)? */
1168 if (!index
|| slot
< index
)
1172 slot
= (slot
+ 1) & TVN_MASK
;
1173 } while (slot
!= index
);
1176 timer_jiffies
+= TVN_SIZE
- index
;
1177 timer_jiffies
>>= TVN_BITS
;
1183 * Check, if the next hrtimer event is before the next timer wheel
1186 static unsigned long cmp_next_hrtimer_event(unsigned long now
,
1187 unsigned long expires
)
1189 ktime_t hr_delta
= hrtimer_get_next_event();
1190 struct timespec tsdelta
;
1191 unsigned long delta
;
1193 if (hr_delta
.tv64
== KTIME_MAX
)
1197 * Expired timer available, let it expire in the next tick
1199 if (hr_delta
.tv64
<= 0)
1202 tsdelta
= ktime_to_timespec(hr_delta
);
1203 delta
= timespec_to_jiffies(&tsdelta
);
1206 * Limit the delta to the max value, which is checked in
1207 * tick_nohz_stop_sched_tick():
1209 if (delta
> NEXT_TIMER_MAX_DELTA
)
1210 delta
= NEXT_TIMER_MAX_DELTA
;
1213 * Take rounding errors in to account and make sure, that it
1214 * expires in the next tick. Otherwise we go into an endless
1215 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1221 if (time_before(now
, expires
))
1227 * get_next_timer_interrupt - return the jiffy of the next pending timer
1228 * @now: current time (in jiffies)
1230 unsigned long get_next_timer_interrupt(unsigned long now
)
1232 struct tvec_base
*base
= __get_cpu_var(tvec_bases
);
1233 unsigned long expires
;
1235 spin_lock(&base
->lock
);
1236 if (time_before_eq(base
->next_timer
, base
->timer_jiffies
))
1237 base
->next_timer
= __next_timer_interrupt(base
);
1238 expires
= base
->next_timer
;
1239 spin_unlock(&base
->lock
);
1241 if (time_before_eq(expires
, now
))
1244 return cmp_next_hrtimer_event(now
, expires
);
1249 * Called from the timer interrupt handler to charge one tick to the current
1250 * process. user_tick is 1 if the tick is user time, 0 for system.
1252 void update_process_times(int user_tick
)
1254 struct task_struct
*p
= current
;
1255 int cpu
= smp_processor_id();
1257 /* Note: this timer irq context must be accounted for as well. */
1258 account_process_tick(p
, user_tick
);
1260 rcu_check_callbacks(cpu
, user_tick
);
1262 perf_event_do_pending();
1264 run_posix_cpu_timers(p
);
1268 * This function runs timers and the timer-tq in bottom half context.
1270 static void run_timer_softirq(struct softirq_action
*h
)
1272 struct tvec_base
*base
= __get_cpu_var(tvec_bases
);
1274 hrtimer_run_pending();
1276 if (time_after_eq(jiffies
, base
->timer_jiffies
))
1281 * Called by the local, per-CPU timer interrupt on SMP.
1283 void run_local_timers(void)
1285 hrtimer_run_queues();
1286 raise_softirq(TIMER_SOFTIRQ
);
1291 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1292 * without sampling the sequence number in xtime_lock.
1293 * jiffies is defined in the linker script...
1296 void do_timer(unsigned long ticks
)
1298 jiffies_64
+= ticks
;
1303 #ifdef __ARCH_WANT_SYS_ALARM
1306 * For backwards compatibility? This can be done in libc so Alpha
1307 * and all newer ports shouldn't need it.
1309 SYSCALL_DEFINE1(alarm
, unsigned int, seconds
)
1311 return alarm_setitimer(seconds
);
1319 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1320 * should be moved into arch/i386 instead?
1324 * sys_getpid - return the thread group id of the current process
1326 * Note, despite the name, this returns the tgid not the pid. The tgid and
1327 * the pid are identical unless CLONE_THREAD was specified on clone() in
1328 * which case the tgid is the same in all threads of the same group.
1330 * This is SMP safe as current->tgid does not change.
1332 SYSCALL_DEFINE0(getpid
)
1334 return task_tgid_vnr(current
);
1338 * Accessing ->real_parent is not SMP-safe, it could
1339 * change from under us. However, we can use a stale
1340 * value of ->real_parent under rcu_read_lock(), see
1341 * release_task()->call_rcu(delayed_put_task_struct).
1343 SYSCALL_DEFINE0(getppid
)
1348 pid
= task_tgid_vnr(current
->real_parent
);
1354 SYSCALL_DEFINE0(getuid
)
1356 /* Only we change this so SMP safe */
1357 return current_uid();
1360 SYSCALL_DEFINE0(geteuid
)
1362 /* Only we change this so SMP safe */
1363 return current_euid();
1366 SYSCALL_DEFINE0(getgid
)
1368 /* Only we change this so SMP safe */
1369 return current_gid();
1372 SYSCALL_DEFINE0(getegid
)
1374 /* Only we change this so SMP safe */
1375 return current_egid();
1380 static void process_timeout(unsigned long __data
)
1382 wake_up_process((struct task_struct
*)__data
);
1386 * schedule_timeout - sleep until timeout
1387 * @timeout: timeout value in jiffies
1389 * Make the current task sleep until @timeout jiffies have
1390 * elapsed. The routine will return immediately unless
1391 * the current task state has been set (see set_current_state()).
1393 * You can set the task state as follows -
1395 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1396 * pass before the routine returns. The routine will return 0
1398 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1399 * delivered to the current task. In this case the remaining time
1400 * in jiffies will be returned, or 0 if the timer expired in time
1402 * The current task state is guaranteed to be TASK_RUNNING when this
1405 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1406 * the CPU away without a bound on the timeout. In this case the return
1407 * value will be %MAX_SCHEDULE_TIMEOUT.
1409 * In all cases the return value is guaranteed to be non-negative.
1411 signed long __sched
schedule_timeout(signed long timeout
)
1413 struct timer_list timer
;
1414 unsigned long expire
;
1418 case MAX_SCHEDULE_TIMEOUT
:
1420 * These two special cases are useful to be comfortable
1421 * in the caller. Nothing more. We could take
1422 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1423 * but I' d like to return a valid offset (>=0) to allow
1424 * the caller to do everything it want with the retval.
1430 * Another bit of PARANOID. Note that the retval will be
1431 * 0 since no piece of kernel is supposed to do a check
1432 * for a negative retval of schedule_timeout() (since it
1433 * should never happens anyway). You just have the printk()
1434 * that will tell you if something is gone wrong and where.
1437 printk(KERN_ERR
"schedule_timeout: wrong timeout "
1438 "value %lx\n", timeout
);
1440 current
->state
= TASK_RUNNING
;
1445 expire
= timeout
+ jiffies
;
1447 setup_timer_on_stack(&timer
, process_timeout
, (unsigned long)current
);
1448 __mod_timer(&timer
, expire
, false, TIMER_NOT_PINNED
);
1450 del_singleshot_timer_sync(&timer
);
1452 /* Remove the timer from the object tracker */
1453 destroy_timer_on_stack(&timer
);
1455 timeout
= expire
- jiffies
;
1458 return timeout
< 0 ? 0 : timeout
;
1460 EXPORT_SYMBOL(schedule_timeout
);
1463 * We can use __set_current_state() here because schedule_timeout() calls
1464 * schedule() unconditionally.
1466 signed long __sched
schedule_timeout_interruptible(signed long timeout
)
1468 __set_current_state(TASK_INTERRUPTIBLE
);
1469 return schedule_timeout(timeout
);
1471 EXPORT_SYMBOL(schedule_timeout_interruptible
);
1473 signed long __sched
schedule_timeout_killable(signed long timeout
)
1475 __set_current_state(TASK_KILLABLE
);
1476 return schedule_timeout(timeout
);
1478 EXPORT_SYMBOL(schedule_timeout_killable
);
1480 signed long __sched
schedule_timeout_uninterruptible(signed long timeout
)
1482 __set_current_state(TASK_UNINTERRUPTIBLE
);
1483 return schedule_timeout(timeout
);
1485 EXPORT_SYMBOL(schedule_timeout_uninterruptible
);
1487 /* Thread ID - the internal kernel "pid" */
1488 SYSCALL_DEFINE0(gettid
)
1490 return task_pid_vnr(current
);
1494 * do_sysinfo - fill in sysinfo struct
1495 * @info: pointer to buffer to fill
1497 int do_sysinfo(struct sysinfo
*info
)
1499 unsigned long mem_total
, sav_total
;
1500 unsigned int mem_unit
, bitcount
;
1503 memset(info
, 0, sizeof(struct sysinfo
));
1506 monotonic_to_bootbased(&tp
);
1507 info
->uptime
= tp
.tv_sec
+ (tp
.tv_nsec
? 1 : 0);
1509 get_avenrun(info
->loads
, 0, SI_LOAD_SHIFT
- FSHIFT
);
1511 info
->procs
= nr_threads
;
1517 * If the sum of all the available memory (i.e. ram + swap)
1518 * is less than can be stored in a 32 bit unsigned long then
1519 * we can be binary compatible with 2.2.x kernels. If not,
1520 * well, in that case 2.2.x was broken anyways...
1522 * -Erik Andersen <andersee@debian.org>
1525 mem_total
= info
->totalram
+ info
->totalswap
;
1526 if (mem_total
< info
->totalram
|| mem_total
< info
->totalswap
)
1529 mem_unit
= info
->mem_unit
;
1530 while (mem_unit
> 1) {
1533 sav_total
= mem_total
;
1535 if (mem_total
< sav_total
)
1540 * If mem_total did not overflow, multiply all memory values by
1541 * info->mem_unit and set it to 1. This leaves things compatible
1542 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1547 info
->totalram
<<= bitcount
;
1548 info
->freeram
<<= bitcount
;
1549 info
->sharedram
<<= bitcount
;
1550 info
->bufferram
<<= bitcount
;
1551 info
->totalswap
<<= bitcount
;
1552 info
->freeswap
<<= bitcount
;
1553 info
->totalhigh
<<= bitcount
;
1554 info
->freehigh
<<= bitcount
;
1560 SYSCALL_DEFINE1(sysinfo
, struct sysinfo __user
*, info
)
1566 if (copy_to_user(info
, &val
, sizeof(struct sysinfo
)))
1572 static int __cpuinit
init_timers_cpu(int cpu
)
1575 struct tvec_base
*base
;
1576 static char __cpuinitdata tvec_base_done
[NR_CPUS
];
1578 if (!tvec_base_done
[cpu
]) {
1579 static char boot_done
;
1583 * The APs use this path later in boot
1585 base
= kmalloc_node(sizeof(*base
),
1586 GFP_KERNEL
| __GFP_ZERO
,
1591 /* Make sure that tvec_base is 2 byte aligned */
1592 if (tbase_get_deferrable(base
)) {
1597 per_cpu(tvec_bases
, cpu
) = base
;
1600 * This is for the boot CPU - we use compile-time
1601 * static initialisation because per-cpu memory isn't
1602 * ready yet and because the memory allocators are not
1603 * initialised either.
1606 base
= &boot_tvec_bases
;
1608 tvec_base_done
[cpu
] = 1;
1610 base
= per_cpu(tvec_bases
, cpu
);
1613 spin_lock_init(&base
->lock
);
1615 for (j
= 0; j
< TVN_SIZE
; j
++) {
1616 INIT_LIST_HEAD(base
->tv5
.vec
+ j
);
1617 INIT_LIST_HEAD(base
->tv4
.vec
+ j
);
1618 INIT_LIST_HEAD(base
->tv3
.vec
+ j
);
1619 INIT_LIST_HEAD(base
->tv2
.vec
+ j
);
1621 for (j
= 0; j
< TVR_SIZE
; j
++)
1622 INIT_LIST_HEAD(base
->tv1
.vec
+ j
);
1624 base
->timer_jiffies
= jiffies
;
1625 base
->next_timer
= base
->timer_jiffies
;
1629 #ifdef CONFIG_HOTPLUG_CPU
1630 static void migrate_timer_list(struct tvec_base
*new_base
, struct list_head
*head
)
1632 struct timer_list
*timer
;
1634 while (!list_empty(head
)) {
1635 timer
= list_first_entry(head
, struct timer_list
, entry
);
1636 detach_timer(timer
, 0);
1637 timer_set_base(timer
, new_base
);
1638 if (time_before(timer
->expires
, new_base
->next_timer
) &&
1639 !tbase_get_deferrable(timer
->base
))
1640 new_base
->next_timer
= timer
->expires
;
1641 internal_add_timer(new_base
, timer
);
1645 static void __cpuinit
migrate_timers(int cpu
)
1647 struct tvec_base
*old_base
;
1648 struct tvec_base
*new_base
;
1651 BUG_ON(cpu_online(cpu
));
1652 old_base
= per_cpu(tvec_bases
, cpu
);
1653 new_base
= get_cpu_var(tvec_bases
);
1655 * The caller is globally serialized and nobody else
1656 * takes two locks at once, deadlock is not possible.
1658 spin_lock_irq(&new_base
->lock
);
1659 spin_lock_nested(&old_base
->lock
, SINGLE_DEPTH_NESTING
);
1661 BUG_ON(old_base
->running_timer
);
1663 for (i
= 0; i
< TVR_SIZE
; i
++)
1664 migrate_timer_list(new_base
, old_base
->tv1
.vec
+ i
);
1665 for (i
= 0; i
< TVN_SIZE
; i
++) {
1666 migrate_timer_list(new_base
, old_base
->tv2
.vec
+ i
);
1667 migrate_timer_list(new_base
, old_base
->tv3
.vec
+ i
);
1668 migrate_timer_list(new_base
, old_base
->tv4
.vec
+ i
);
1669 migrate_timer_list(new_base
, old_base
->tv5
.vec
+ i
);
1672 spin_unlock(&old_base
->lock
);
1673 spin_unlock_irq(&new_base
->lock
);
1674 put_cpu_var(tvec_bases
);
1676 #endif /* CONFIG_HOTPLUG_CPU */
1678 static int __cpuinit
timer_cpu_notify(struct notifier_block
*self
,
1679 unsigned long action
, void *hcpu
)
1681 long cpu
= (long)hcpu
;
1683 case CPU_UP_PREPARE
:
1684 case CPU_UP_PREPARE_FROZEN
:
1685 if (init_timers_cpu(cpu
) < 0)
1688 #ifdef CONFIG_HOTPLUG_CPU
1690 case CPU_DEAD_FROZEN
:
1691 migrate_timers(cpu
);
1700 static struct notifier_block __cpuinitdata timers_nb
= {
1701 .notifier_call
= timer_cpu_notify
,
1705 void __init
init_timers(void)
1707 int err
= timer_cpu_notify(&timers_nb
, (unsigned long)CPU_UP_PREPARE
,
1708 (void *)(long)smp_processor_id());
1712 BUG_ON(err
== NOTIFY_BAD
);
1713 register_cpu_notifier(&timers_nb
);
1714 open_softirq(TIMER_SOFTIRQ
, run_timer_softirq
);
1718 * msleep - sleep safely even with waitqueue interruptions
1719 * @msecs: Time in milliseconds to sleep for
1721 void msleep(unsigned int msecs
)
1723 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1726 timeout
= schedule_timeout_uninterruptible(timeout
);
1729 EXPORT_SYMBOL(msleep
);
1732 * msleep_interruptible - sleep waiting for signals
1733 * @msecs: Time in milliseconds to sleep for
1735 unsigned long msleep_interruptible(unsigned int msecs
)
1737 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1739 while (timeout
&& !signal_pending(current
))
1740 timeout
= schedule_timeout_interruptible(timeout
);
1741 return jiffies_to_msecs(timeout
);
1744 EXPORT_SYMBOL(msleep_interruptible
);