4 * Kernel internal timers
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/slab.h>
44 #include <linux/compat.h>
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/div64.h>
49 #include <asm/timex.h>
52 #include "tick-internal.h"
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/timer.h>
57 __visible u64 jiffies_64 __cacheline_aligned_in_smp
= INITIAL_JIFFIES
;
59 EXPORT_SYMBOL(jiffies_64
);
62 * per-CPU timer vector definitions:
64 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
65 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
66 #define TVN_SIZE (1 << TVN_BITS)
67 #define TVR_SIZE (1 << TVR_BITS)
68 #define TVN_MASK (TVN_SIZE - 1)
69 #define TVR_MASK (TVR_SIZE - 1)
70 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
73 struct hlist_head vec
[TVN_SIZE
];
77 struct hlist_head vec
[TVR_SIZE
];
82 struct timer_list
*running_timer
;
83 unsigned long timer_jiffies
;
84 unsigned long next_timer
;
85 unsigned long active_timers
;
86 unsigned long all_timers
;
88 bool migration_enabled
;
95 } ____cacheline_aligned
;
98 static DEFINE_PER_CPU(struct tvec_base
, tvec_bases
);
100 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
101 unsigned int sysctl_timer_migration
= 1;
103 void timers_update_migration(bool update_nohz
)
105 bool on
= sysctl_timer_migration
&& tick_nohz_active
;
108 /* Avoid the loop, if nothing to update */
109 if (this_cpu_read(tvec_bases
.migration_enabled
) == on
)
112 for_each_possible_cpu(cpu
) {
113 per_cpu(tvec_bases
.migration_enabled
, cpu
) = on
;
114 per_cpu(hrtimer_bases
.migration_enabled
, cpu
) = on
;
117 per_cpu(tvec_bases
.nohz_active
, cpu
) = true;
118 per_cpu(hrtimer_bases
.nohz_active
, cpu
) = true;
122 int timer_migration_handler(struct ctl_table
*table
, int write
,
123 void __user
*buffer
, size_t *lenp
,
126 static DEFINE_MUTEX(mutex
);
130 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
132 timers_update_migration(false);
133 mutex_unlock(&mutex
);
137 static inline struct tvec_base
*get_target_base(struct tvec_base
*base
,
140 if (pinned
|| !base
->migration_enabled
)
141 return this_cpu_ptr(&tvec_bases
);
142 return per_cpu_ptr(&tvec_bases
, get_nohz_timer_target());
145 static inline struct tvec_base
*get_target_base(struct tvec_base
*base
,
148 return this_cpu_ptr(&tvec_bases
);
152 static unsigned long round_jiffies_common(unsigned long j
, int cpu
,
156 unsigned long original
= j
;
159 * We don't want all cpus firing their timers at once hitting the
160 * same lock or cachelines, so we skew each extra cpu with an extra
161 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
163 * The skew is done by adding 3*cpunr, then round, then subtract this
164 * extra offset again.
171 * If the target jiffie is just after a whole second (which can happen
172 * due to delays of the timer irq, long irq off times etc etc) then
173 * we should round down to the whole second, not up. Use 1/4th second
174 * as cutoff for this rounding as an extreme upper bound for this.
175 * But never round down if @force_up is set.
177 if (rem
< HZ
/4 && !force_up
) /* round down */
182 /* now that we have rounded, subtract the extra skew again */
186 * Make sure j is still in the future. Otherwise return the
189 return time_is_after_jiffies(j
) ? j
: original
;
193 * __round_jiffies - function to round jiffies to a full second
194 * @j: the time in (absolute) jiffies that should be rounded
195 * @cpu: the processor number on which the timeout will happen
197 * __round_jiffies() rounds an absolute time in the future (in jiffies)
198 * up or down to (approximately) full seconds. This is useful for timers
199 * for which the exact time they fire does not matter too much, as long as
200 * they fire approximately every X seconds.
202 * By rounding these timers to whole seconds, all such timers will fire
203 * at the same time, rather than at various times spread out. The goal
204 * of this is to have the CPU wake up less, which saves power.
206 * The exact rounding is skewed for each processor to avoid all
207 * processors firing at the exact same time, which could lead
208 * to lock contention or spurious cache line bouncing.
210 * The return value is the rounded version of the @j parameter.
212 unsigned long __round_jiffies(unsigned long j
, int cpu
)
214 return round_jiffies_common(j
, cpu
, false);
216 EXPORT_SYMBOL_GPL(__round_jiffies
);
219 * __round_jiffies_relative - function to round jiffies to a full second
220 * @j: the time in (relative) jiffies that should be rounded
221 * @cpu: the processor number on which the timeout will happen
223 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
224 * up or down to (approximately) full seconds. This is useful for timers
225 * for which the exact time they fire does not matter too much, as long as
226 * they fire approximately every X seconds.
228 * By rounding these timers to whole seconds, all such timers will fire
229 * at the same time, rather than at various times spread out. The goal
230 * of this is to have the CPU wake up less, which saves power.
232 * The exact rounding is skewed for each processor to avoid all
233 * processors firing at the exact same time, which could lead
234 * to lock contention or spurious cache line bouncing.
236 * The return value is the rounded version of the @j parameter.
238 unsigned long __round_jiffies_relative(unsigned long j
, int cpu
)
240 unsigned long j0
= jiffies
;
242 /* Use j0 because jiffies might change while we run */
243 return round_jiffies_common(j
+ j0
, cpu
, false) - j0
;
245 EXPORT_SYMBOL_GPL(__round_jiffies_relative
);
248 * round_jiffies - function to round jiffies to a full second
249 * @j: the time in (absolute) jiffies that should be rounded
251 * round_jiffies() rounds an absolute time in the future (in jiffies)
252 * up or down to (approximately) full seconds. This is useful for timers
253 * for which the exact time they fire does not matter too much, as long as
254 * they fire approximately every X seconds.
256 * By rounding these timers to whole seconds, all such timers will fire
257 * at the same time, rather than at various times spread out. The goal
258 * of this is to have the CPU wake up less, which saves power.
260 * The return value is the rounded version of the @j parameter.
262 unsigned long round_jiffies(unsigned long j
)
264 return round_jiffies_common(j
, raw_smp_processor_id(), false);
266 EXPORT_SYMBOL_GPL(round_jiffies
);
269 * round_jiffies_relative - function to round jiffies to a full second
270 * @j: the time in (relative) jiffies that should be rounded
272 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
273 * up or down to (approximately) full seconds. This is useful for timers
274 * for which the exact time they fire does not matter too much, as long as
275 * they fire approximately every X seconds.
277 * By rounding these timers to whole seconds, all such timers will fire
278 * at the same time, rather than at various times spread out. The goal
279 * of this is to have the CPU wake up less, which saves power.
281 * The return value is the rounded version of the @j parameter.
283 unsigned long round_jiffies_relative(unsigned long j
)
285 return __round_jiffies_relative(j
, raw_smp_processor_id());
287 EXPORT_SYMBOL_GPL(round_jiffies_relative
);
290 * __round_jiffies_up - function to round jiffies up to a full second
291 * @j: the time in (absolute) jiffies that should be rounded
292 * @cpu: the processor number on which the timeout will happen
294 * This is the same as __round_jiffies() except that it will never
295 * round down. This is useful for timeouts for which the exact time
296 * of firing does not matter too much, as long as they don't fire too
299 unsigned long __round_jiffies_up(unsigned long j
, int cpu
)
301 return round_jiffies_common(j
, cpu
, true);
303 EXPORT_SYMBOL_GPL(__round_jiffies_up
);
306 * __round_jiffies_up_relative - function to round jiffies up to a full second
307 * @j: the time in (relative) jiffies that should be rounded
308 * @cpu: the processor number on which the timeout will happen
310 * This is the same as __round_jiffies_relative() except that it will never
311 * round down. This is useful for timeouts for which the exact time
312 * of firing does not matter too much, as long as they don't fire too
315 unsigned long __round_jiffies_up_relative(unsigned long j
, int cpu
)
317 unsigned long j0
= jiffies
;
319 /* Use j0 because jiffies might change while we run */
320 return round_jiffies_common(j
+ j0
, cpu
, true) - j0
;
322 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative
);
325 * round_jiffies_up - function to round jiffies up to a full second
326 * @j: the time in (absolute) jiffies that should be rounded
328 * This is the same as round_jiffies() except that it will never
329 * round down. This is useful for timeouts for which the exact time
330 * of firing does not matter too much, as long as they don't fire too
333 unsigned long round_jiffies_up(unsigned long j
)
335 return round_jiffies_common(j
, raw_smp_processor_id(), true);
337 EXPORT_SYMBOL_GPL(round_jiffies_up
);
340 * round_jiffies_up_relative - function to round jiffies up to a full second
341 * @j: the time in (relative) jiffies that should be rounded
343 * This is the same as round_jiffies_relative() except that it will never
344 * round down. This is useful for timeouts for which the exact time
345 * of firing does not matter too much, as long as they don't fire too
348 unsigned long round_jiffies_up_relative(unsigned long j
)
350 return __round_jiffies_up_relative(j
, raw_smp_processor_id());
352 EXPORT_SYMBOL_GPL(round_jiffies_up_relative
);
355 * set_timer_slack - set the allowed slack for a timer
356 * @timer: the timer to be modified
357 * @slack_hz: the amount of time (in jiffies) allowed for rounding
359 * Set the amount of time, in jiffies, that a certain timer has
360 * in terms of slack. By setting this value, the timer subsystem
361 * will schedule the actual timer somewhere between
362 * the time mod_timer() asks for, and that time plus the slack.
364 * By setting the slack to -1, a percentage of the delay is used
367 void set_timer_slack(struct timer_list
*timer
, int slack_hz
)
369 timer
->slack
= slack_hz
;
371 EXPORT_SYMBOL_GPL(set_timer_slack
);
374 __internal_add_timer(struct tvec_base
*base
, struct timer_list
*timer
)
376 unsigned long expires
= timer
->expires
;
377 unsigned long idx
= expires
- base
->timer_jiffies
;
378 struct hlist_head
*vec
;
380 if (idx
< TVR_SIZE
) {
381 int i
= expires
& TVR_MASK
;
382 vec
= base
->tv1
.vec
+ i
;
383 } else if (idx
< 1 << (TVR_BITS
+ TVN_BITS
)) {
384 int i
= (expires
>> TVR_BITS
) & TVN_MASK
;
385 vec
= base
->tv2
.vec
+ i
;
386 } else if (idx
< 1 << (TVR_BITS
+ 2 * TVN_BITS
)) {
387 int i
= (expires
>> (TVR_BITS
+ TVN_BITS
)) & TVN_MASK
;
388 vec
= base
->tv3
.vec
+ i
;
389 } else if (idx
< 1 << (TVR_BITS
+ 3 * TVN_BITS
)) {
390 int i
= (expires
>> (TVR_BITS
+ 2 * TVN_BITS
)) & TVN_MASK
;
391 vec
= base
->tv4
.vec
+ i
;
392 } else if ((signed long) idx
< 0) {
394 * Can happen if you add a timer with expires == jiffies,
395 * or you set a timer to go off in the past
397 vec
= base
->tv1
.vec
+ (base
->timer_jiffies
& TVR_MASK
);
400 /* If the timeout is larger than MAX_TVAL (on 64-bit
401 * architectures or with CONFIG_BASE_SMALL=1) then we
402 * use the maximum timeout.
404 if (idx
> MAX_TVAL
) {
406 expires
= idx
+ base
->timer_jiffies
;
408 i
= (expires
>> (TVR_BITS
+ 3 * TVN_BITS
)) & TVN_MASK
;
409 vec
= base
->tv5
.vec
+ i
;
412 hlist_add_head(&timer
->entry
, vec
);
415 static void internal_add_timer(struct tvec_base
*base
, struct timer_list
*timer
)
417 /* Advance base->jiffies, if the base is empty */
418 if (!base
->all_timers
++)
419 base
->timer_jiffies
= jiffies
;
421 __internal_add_timer(base
, timer
);
423 * Update base->active_timers and base->next_timer
425 if (!(timer
->flags
& TIMER_DEFERRABLE
)) {
426 if (!base
->active_timers
++ ||
427 time_before(timer
->expires
, base
->next_timer
))
428 base
->next_timer
= timer
->expires
;
432 * Check whether the other CPU is in dynticks mode and needs
433 * to be triggered to reevaluate the timer wheel.
434 * We are protected against the other CPU fiddling
435 * with the timer by holding the timer base lock. This also
436 * makes sure that a CPU on the way to stop its tick can not
437 * evaluate the timer wheel.
439 * Spare the IPI for deferrable timers on idle targets though.
440 * The next busy ticks will take care of it. Except full dynticks
441 * require special care against races with idle_cpu(), lets deal
444 if (base
->nohz_active
) {
445 if (!(timer
->flags
& TIMER_DEFERRABLE
) ||
446 tick_nohz_full_cpu(base
->cpu
))
447 wake_up_nohz_cpu(base
->cpu
);
451 #ifdef CONFIG_TIMER_STATS
452 void __timer_stats_timer_set_start_info(struct timer_list
*timer
, void *addr
)
454 if (timer
->start_site
)
457 timer
->start_site
= addr
;
458 memcpy(timer
->start_comm
, current
->comm
, TASK_COMM_LEN
);
459 timer
->start_pid
= current
->pid
;
462 static void timer_stats_account_timer(struct timer_list
*timer
)
467 * start_site can be concurrently reset by
468 * timer_stats_timer_clear_start_info()
470 site
= READ_ONCE(timer
->start_site
);
474 timer_stats_update_stats(timer
, timer
->start_pid
, site
,
475 timer
->function
, timer
->start_comm
,
480 static void timer_stats_account_timer(struct timer_list
*timer
) {}
483 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
485 static struct debug_obj_descr timer_debug_descr
;
487 static void *timer_debug_hint(void *addr
)
489 return ((struct timer_list
*) addr
)->function
;
493 * fixup_init is called when:
494 * - an active object is initialized
496 static int timer_fixup_init(void *addr
, enum debug_obj_state state
)
498 struct timer_list
*timer
= addr
;
501 case ODEBUG_STATE_ACTIVE
:
502 del_timer_sync(timer
);
503 debug_object_init(timer
, &timer_debug_descr
);
510 /* Stub timer callback for improperly used timers. */
511 static void stub_timer(unsigned long data
)
517 * fixup_activate is called when:
518 * - an active object is activated
519 * - an unknown object is activated (might be a statically initialized object)
521 static int timer_fixup_activate(void *addr
, enum debug_obj_state state
)
523 struct timer_list
*timer
= addr
;
527 case ODEBUG_STATE_NOTAVAILABLE
:
529 * This is not really a fixup. The timer was
530 * statically initialized. We just make sure that it
531 * is tracked in the object tracker.
533 if (timer
->entry
.pprev
== NULL
&&
534 timer
->entry
.next
== TIMER_ENTRY_STATIC
) {
535 debug_object_init(timer
, &timer_debug_descr
);
536 debug_object_activate(timer
, &timer_debug_descr
);
539 setup_timer(timer
, stub_timer
, 0);
544 case ODEBUG_STATE_ACTIVE
:
553 * fixup_free is called when:
554 * - an active object is freed
556 static int timer_fixup_free(void *addr
, enum debug_obj_state state
)
558 struct timer_list
*timer
= addr
;
561 case ODEBUG_STATE_ACTIVE
:
562 del_timer_sync(timer
);
563 debug_object_free(timer
, &timer_debug_descr
);
571 * fixup_assert_init is called when:
572 * - an untracked/uninit-ed object is found
574 static int timer_fixup_assert_init(void *addr
, enum debug_obj_state state
)
576 struct timer_list
*timer
= addr
;
579 case ODEBUG_STATE_NOTAVAILABLE
:
580 if (timer
->entry
.next
== TIMER_ENTRY_STATIC
) {
582 * This is not really a fixup. The timer was
583 * statically initialized. We just make sure that it
584 * is tracked in the object tracker.
586 debug_object_init(timer
, &timer_debug_descr
);
589 setup_timer(timer
, stub_timer
, 0);
597 static struct debug_obj_descr timer_debug_descr
= {
598 .name
= "timer_list",
599 .debug_hint
= timer_debug_hint
,
600 .fixup_init
= timer_fixup_init
,
601 .fixup_activate
= timer_fixup_activate
,
602 .fixup_free
= timer_fixup_free
,
603 .fixup_assert_init
= timer_fixup_assert_init
,
606 static inline void debug_timer_init(struct timer_list
*timer
)
608 debug_object_init(timer
, &timer_debug_descr
);
611 static inline void debug_timer_activate(struct timer_list
*timer
)
613 debug_object_activate(timer
, &timer_debug_descr
);
616 static inline void debug_timer_deactivate(struct timer_list
*timer
)
618 debug_object_deactivate(timer
, &timer_debug_descr
);
621 static inline void debug_timer_free(struct timer_list
*timer
)
623 debug_object_free(timer
, &timer_debug_descr
);
626 static inline void debug_timer_assert_init(struct timer_list
*timer
)
628 debug_object_assert_init(timer
, &timer_debug_descr
);
631 static void do_init_timer(struct timer_list
*timer
, unsigned int flags
,
632 const char *name
, struct lock_class_key
*key
);
634 void init_timer_on_stack_key(struct timer_list
*timer
, unsigned int flags
,
635 const char *name
, struct lock_class_key
*key
)
637 debug_object_init_on_stack(timer
, &timer_debug_descr
);
638 do_init_timer(timer
, flags
, name
, key
);
640 EXPORT_SYMBOL_GPL(init_timer_on_stack_key
);
642 void destroy_timer_on_stack(struct timer_list
*timer
)
644 debug_object_free(timer
, &timer_debug_descr
);
646 EXPORT_SYMBOL_GPL(destroy_timer_on_stack
);
649 static inline void debug_timer_init(struct timer_list
*timer
) { }
650 static inline void debug_timer_activate(struct timer_list
*timer
) { }
651 static inline void debug_timer_deactivate(struct timer_list
*timer
) { }
652 static inline void debug_timer_assert_init(struct timer_list
*timer
) { }
655 static inline void debug_init(struct timer_list
*timer
)
657 debug_timer_init(timer
);
658 trace_timer_init(timer
);
662 debug_activate(struct timer_list
*timer
, unsigned long expires
)
664 debug_timer_activate(timer
);
665 trace_timer_start(timer
, expires
, timer
->flags
);
668 static inline void debug_deactivate(struct timer_list
*timer
)
670 debug_timer_deactivate(timer
);
671 trace_timer_cancel(timer
);
674 static inline void debug_assert_init(struct timer_list
*timer
)
676 debug_timer_assert_init(timer
);
679 static void do_init_timer(struct timer_list
*timer
, unsigned int flags
,
680 const char *name
, struct lock_class_key
*key
)
682 timer
->entry
.pprev
= NULL
;
683 timer
->flags
= flags
| raw_smp_processor_id();
685 #ifdef CONFIG_TIMER_STATS
686 timer
->start_site
= NULL
;
687 timer
->start_pid
= -1;
688 memset(timer
->start_comm
, 0, TASK_COMM_LEN
);
690 lockdep_init_map(&timer
->lockdep_map
, name
, key
, 0);
694 * init_timer_key - initialize a timer
695 * @timer: the timer to be initialized
696 * @flags: timer flags
697 * @name: name of the timer
698 * @key: lockdep class key of the fake lock used for tracking timer
699 * sync lock dependencies
701 * init_timer_key() must be done to a timer prior calling *any* of the
702 * other timer functions.
704 void init_timer_key(struct timer_list
*timer
, unsigned int flags
,
705 const char *name
, struct lock_class_key
*key
)
708 do_init_timer(timer
, flags
, name
, key
);
710 EXPORT_SYMBOL(init_timer_key
);
712 static inline void detach_timer(struct timer_list
*timer
, bool clear_pending
)
714 struct hlist_node
*entry
= &timer
->entry
;
716 debug_deactivate(timer
);
721 entry
->next
= LIST_POISON2
;
725 detach_expired_timer(struct timer_list
*timer
, struct tvec_base
*base
)
727 detach_timer(timer
, true);
728 if (!(timer
->flags
& TIMER_DEFERRABLE
))
729 base
->active_timers
--;
733 static int detach_if_pending(struct timer_list
*timer
, struct tvec_base
*base
,
736 if (!timer_pending(timer
))
739 detach_timer(timer
, clear_pending
);
740 if (!(timer
->flags
& TIMER_DEFERRABLE
)) {
741 base
->active_timers
--;
742 if (timer
->expires
== base
->next_timer
)
743 base
->next_timer
= base
->timer_jiffies
;
745 /* If this was the last timer, advance base->jiffies */
746 if (!--base
->all_timers
)
747 base
->timer_jiffies
= jiffies
;
752 * We are using hashed locking: holding per_cpu(tvec_bases).lock
753 * means that all timers which are tied to this base via timer->base are
754 * locked, and the base itself is locked too.
756 * So __run_timers/migrate_timers can safely modify all timers which could
757 * be found on ->tvX lists.
759 * When the timer's base is locked and removed from the list, the
760 * TIMER_MIGRATING flag is set, FIXME
762 static struct tvec_base
*lock_timer_base(struct timer_list
*timer
,
763 unsigned long *flags
)
764 __acquires(timer
->base
->lock
)
767 u32 tf
= timer
->flags
;
768 struct tvec_base
*base
;
770 if (!(tf
& TIMER_MIGRATING
)) {
771 base
= per_cpu_ptr(&tvec_bases
, tf
& TIMER_CPUMASK
);
772 spin_lock_irqsave(&base
->lock
, *flags
);
773 if (timer
->flags
== tf
)
775 spin_unlock_irqrestore(&base
->lock
, *flags
);
782 __mod_timer(struct timer_list
*timer
, unsigned long expires
,
783 bool pending_only
, int pinned
)
785 struct tvec_base
*base
, *new_base
;
789 timer_stats_timer_set_start_info(timer
);
790 BUG_ON(!timer
->function
);
792 base
= lock_timer_base(timer
, &flags
);
794 ret
= detach_if_pending(timer
, base
, false);
795 if (!ret
&& pending_only
)
798 debug_activate(timer
, expires
);
800 new_base
= get_target_base(base
, pinned
);
802 if (base
!= new_base
) {
804 * We are trying to schedule the timer on the local CPU.
805 * However we can't change timer's base while it is running,
806 * otherwise del_timer_sync() can't detect that the timer's
807 * handler yet has not finished. This also guarantees that
808 * the timer is serialized wrt itself.
810 if (likely(base
->running_timer
!= timer
)) {
811 /* See the comment in lock_timer_base() */
812 timer
->flags
|= TIMER_MIGRATING
;
814 spin_unlock(&base
->lock
);
816 spin_lock(&base
->lock
);
817 WRITE_ONCE(timer
->flags
,
818 (timer
->flags
& ~TIMER_BASEMASK
) | base
->cpu
);
822 timer
->expires
= expires
;
823 internal_add_timer(base
, timer
);
826 spin_unlock_irqrestore(&base
->lock
, flags
);
832 * mod_timer_pending - modify a pending timer's timeout
833 * @timer: the pending timer to be modified
834 * @expires: new timeout in jiffies
836 * mod_timer_pending() is the same for pending timers as mod_timer(),
837 * but will not re-activate and modify already deleted timers.
839 * It is useful for unserialized use of timers.
841 int mod_timer_pending(struct timer_list
*timer
, unsigned long expires
)
843 return __mod_timer(timer
, expires
, true, TIMER_NOT_PINNED
);
845 EXPORT_SYMBOL(mod_timer_pending
);
848 * Decide where to put the timer while taking the slack into account
851 * 1) calculate the maximum (absolute) time
852 * 2) calculate the highest bit where the expires and new max are different
853 * 3) use this bit to make a mask
854 * 4) use the bitmask to round down the maximum time, so that all last
858 unsigned long apply_slack(struct timer_list
*timer
, unsigned long expires
)
860 unsigned long expires_limit
, mask
;
863 if (timer
->slack
>= 0) {
864 expires_limit
= expires
+ timer
->slack
;
866 long delta
= expires
- jiffies
;
871 expires_limit
= expires
+ delta
/ 256;
873 mask
= expires
^ expires_limit
;
879 mask
= (1UL << bit
) - 1;
881 expires_limit
= expires_limit
& ~(mask
);
883 return expires_limit
;
887 * mod_timer - modify a timer's timeout
888 * @timer: the timer to be modified
889 * @expires: new timeout in jiffies
891 * mod_timer() is a more efficient way to update the expire field of an
892 * active timer (if the timer is inactive it will be activated)
894 * mod_timer(timer, expires) is equivalent to:
896 * del_timer(timer); timer->expires = expires; add_timer(timer);
898 * Note that if there are multiple unserialized concurrent users of the
899 * same timer, then mod_timer() is the only safe way to modify the timeout,
900 * since add_timer() cannot modify an already running timer.
902 * The function returns whether it has modified a pending timer or not.
903 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
904 * active timer returns 1.)
906 int mod_timer(struct timer_list
*timer
, unsigned long expires
)
908 expires
= apply_slack(timer
, expires
);
911 * This is a common optimization triggered by the
912 * networking code - if the timer is re-modified
913 * to be the same thing then just return:
915 if (timer_pending(timer
) && timer
->expires
== expires
)
918 return __mod_timer(timer
, expires
, false, TIMER_NOT_PINNED
);
920 EXPORT_SYMBOL(mod_timer
);
923 * mod_timer_pinned - modify a timer's timeout
924 * @timer: the timer to be modified
925 * @expires: new timeout in jiffies
927 * mod_timer_pinned() is a way to update the expire field of an
928 * active timer (if the timer is inactive it will be activated)
929 * and to ensure that the timer is scheduled on the current CPU.
931 * Note that this does not prevent the timer from being migrated
932 * when the current CPU goes offline. If this is a problem for
933 * you, use CPU-hotplug notifiers to handle it correctly, for
934 * example, cancelling the timer when the corresponding CPU goes
937 * mod_timer_pinned(timer, expires) is equivalent to:
939 * del_timer(timer); timer->expires = expires; add_timer(timer);
941 int mod_timer_pinned(struct timer_list
*timer
, unsigned long expires
)
943 if (timer
->expires
== expires
&& timer_pending(timer
))
946 return __mod_timer(timer
, expires
, false, TIMER_PINNED
);
948 EXPORT_SYMBOL(mod_timer_pinned
);
951 * add_timer - start a timer
952 * @timer: the timer to be added
954 * The kernel will do a ->function(->data) callback from the
955 * timer interrupt at the ->expires point in the future. The
956 * current time is 'jiffies'.
958 * The timer's ->expires, ->function (and if the handler uses it, ->data)
959 * fields must be set prior calling this function.
961 * Timers with an ->expires field in the past will be executed in the next
964 void add_timer(struct timer_list
*timer
)
966 BUG_ON(timer_pending(timer
));
967 mod_timer(timer
, timer
->expires
);
969 EXPORT_SYMBOL(add_timer
);
972 * add_timer_on - start a timer on a particular CPU
973 * @timer: the timer to be added
974 * @cpu: the CPU to start it on
976 * This is not very scalable on SMP. Double adds are not possible.
978 void add_timer_on(struct timer_list
*timer
, int cpu
)
980 struct tvec_base
*new_base
= per_cpu_ptr(&tvec_bases
, cpu
);
981 struct tvec_base
*base
;
984 timer_stats_timer_set_start_info(timer
);
985 BUG_ON(timer_pending(timer
) || !timer
->function
);
988 * If @timer was on a different CPU, it should be migrated with the
989 * old base locked to prevent other operations proceeding with the
990 * wrong base locked. See lock_timer_base().
992 base
= lock_timer_base(timer
, &flags
);
993 if (base
!= new_base
) {
994 timer
->flags
|= TIMER_MIGRATING
;
996 spin_unlock(&base
->lock
);
998 spin_lock(&base
->lock
);
999 WRITE_ONCE(timer
->flags
,
1000 (timer
->flags
& ~TIMER_BASEMASK
) | cpu
);
1003 debug_activate(timer
, timer
->expires
);
1004 internal_add_timer(base
, timer
);
1005 spin_unlock_irqrestore(&base
->lock
, flags
);
1007 EXPORT_SYMBOL_GPL(add_timer_on
);
1010 * del_timer - deactive a timer.
1011 * @timer: the timer to be deactivated
1013 * del_timer() deactivates a timer - this works on both active and inactive
1016 * The function returns whether it has deactivated a pending timer or not.
1017 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1018 * active timer returns 1.)
1020 int del_timer(struct timer_list
*timer
)
1022 struct tvec_base
*base
;
1023 unsigned long flags
;
1026 debug_assert_init(timer
);
1028 timer_stats_timer_clear_start_info(timer
);
1029 if (timer_pending(timer
)) {
1030 base
= lock_timer_base(timer
, &flags
);
1031 ret
= detach_if_pending(timer
, base
, true);
1032 spin_unlock_irqrestore(&base
->lock
, flags
);
1037 EXPORT_SYMBOL(del_timer
);
1040 * try_to_del_timer_sync - Try to deactivate a timer
1041 * @timer: timer do del
1043 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1044 * exit the timer is not queued and the handler is not running on any CPU.
1046 int try_to_del_timer_sync(struct timer_list
*timer
)
1048 struct tvec_base
*base
;
1049 unsigned long flags
;
1052 debug_assert_init(timer
);
1054 base
= lock_timer_base(timer
, &flags
);
1056 if (base
->running_timer
!= timer
) {
1057 timer_stats_timer_clear_start_info(timer
);
1058 ret
= detach_if_pending(timer
, base
, true);
1060 spin_unlock_irqrestore(&base
->lock
, flags
);
1064 EXPORT_SYMBOL(try_to_del_timer_sync
);
1068 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1069 * @timer: the timer to be deactivated
1071 * This function only differs from del_timer() on SMP: besides deactivating
1072 * the timer it also makes sure the handler has finished executing on other
1075 * Synchronization rules: Callers must prevent restarting of the timer,
1076 * otherwise this function is meaningless. It must not be called from
1077 * interrupt contexts unless the timer is an irqsafe one. The caller must
1078 * not hold locks which would prevent completion of the timer's
1079 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1080 * timer is not queued and the handler is not running on any CPU.
1082 * Note: For !irqsafe timers, you must not hold locks that are held in
1083 * interrupt context while calling this function. Even if the lock has
1084 * nothing to do with the timer in question. Here's why:
1090 * base->running_timer = mytimer;
1091 * spin_lock_irq(somelock);
1093 * spin_lock(somelock);
1094 * del_timer_sync(mytimer);
1095 * while (base->running_timer == mytimer);
1097 * Now del_timer_sync() will never return and never release somelock.
1098 * The interrupt on the other CPU is waiting to grab somelock but
1099 * it has interrupted the softirq that CPU0 is waiting to finish.
1101 * The function returns whether it has deactivated a pending timer or not.
1103 int del_timer_sync(struct timer_list
*timer
)
1105 #ifdef CONFIG_LOCKDEP
1106 unsigned long flags
;
1109 * If lockdep gives a backtrace here, please reference
1110 * the synchronization rules above.
1112 local_irq_save(flags
);
1113 lock_map_acquire(&timer
->lockdep_map
);
1114 lock_map_release(&timer
->lockdep_map
);
1115 local_irq_restore(flags
);
1118 * don't use it in hardirq context, because it
1119 * could lead to deadlock.
1121 WARN_ON(in_irq() && !(timer
->flags
& TIMER_IRQSAFE
));
1123 int ret
= try_to_del_timer_sync(timer
);
1129 EXPORT_SYMBOL(del_timer_sync
);
1132 static int cascade(struct tvec_base
*base
, struct tvec
*tv
, int index
)
1134 /* cascade all the timers from tv up one level */
1135 struct timer_list
*timer
;
1136 struct hlist_node
*tmp
;
1137 struct hlist_head tv_list
;
1139 hlist_move_list(tv
->vec
+ index
, &tv_list
);
1142 * We are removing _all_ timers from the list, so we
1143 * don't have to detach them individually.
1145 hlist_for_each_entry_safe(timer
, tmp
, &tv_list
, entry
) {
1146 /* No accounting, while moving them */
1147 __internal_add_timer(base
, timer
);
1153 static void call_timer_fn(struct timer_list
*timer
, void (*fn
)(unsigned long),
1156 int count
= preempt_count();
1158 #ifdef CONFIG_LOCKDEP
1160 * It is permissible to free the timer from inside the
1161 * function that is called from it, this we need to take into
1162 * account for lockdep too. To avoid bogus "held lock freed"
1163 * warnings as well as problems when looking into
1164 * timer->lockdep_map, make a copy and use that here.
1166 struct lockdep_map lockdep_map
;
1168 lockdep_copy_map(&lockdep_map
, &timer
->lockdep_map
);
1171 * Couple the lock chain with the lock chain at
1172 * del_timer_sync() by acquiring the lock_map around the fn()
1173 * call here and in del_timer_sync().
1175 lock_map_acquire(&lockdep_map
);
1177 trace_timer_expire_entry(timer
);
1179 trace_timer_expire_exit(timer
);
1181 lock_map_release(&lockdep_map
);
1183 if (count
!= preempt_count()) {
1184 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1185 fn
, count
, preempt_count());
1187 * Restore the preempt count. That gives us a decent
1188 * chance to survive and extract information. If the
1189 * callback kept a lock held, bad luck, but not worse
1190 * than the BUG() we had.
1192 preempt_count_set(count
);
1196 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1199 * __run_timers - run all expired timers (if any) on this CPU.
1200 * @base: the timer vector to be processed.
1202 * This function cascades all vectors and executes all expired timer
1205 static inline void __run_timers(struct tvec_base
*base
)
1207 struct timer_list
*timer
;
1209 spin_lock_irq(&base
->lock
);
1211 while (time_after_eq(jiffies
, base
->timer_jiffies
)) {
1212 struct hlist_head work_list
;
1213 struct hlist_head
*head
= &work_list
;
1216 if (!base
->all_timers
) {
1217 base
->timer_jiffies
= jiffies
;
1221 index
= base
->timer_jiffies
& TVR_MASK
;
1227 (!cascade(base
, &base
->tv2
, INDEX(0))) &&
1228 (!cascade(base
, &base
->tv3
, INDEX(1))) &&
1229 !cascade(base
, &base
->tv4
, INDEX(2)))
1230 cascade(base
, &base
->tv5
, INDEX(3));
1231 ++base
->timer_jiffies
;
1232 hlist_move_list(base
->tv1
.vec
+ index
, head
);
1233 while (!hlist_empty(head
)) {
1234 void (*fn
)(unsigned long);
1238 timer
= hlist_entry(head
->first
, struct timer_list
, entry
);
1239 fn
= timer
->function
;
1241 irqsafe
= timer
->flags
& TIMER_IRQSAFE
;
1243 timer_stats_account_timer(timer
);
1245 base
->running_timer
= timer
;
1246 detach_expired_timer(timer
, base
);
1249 spin_unlock(&base
->lock
);
1250 call_timer_fn(timer
, fn
, data
);
1251 spin_lock(&base
->lock
);
1253 spin_unlock_irq(&base
->lock
);
1254 call_timer_fn(timer
, fn
, data
);
1255 spin_lock_irq(&base
->lock
);
1259 base
->running_timer
= NULL
;
1260 spin_unlock_irq(&base
->lock
);
1263 #ifdef CONFIG_NO_HZ_COMMON
1265 * Find out when the next timer event is due to happen. This
1266 * is used on S/390 to stop all activity when a CPU is idle.
1267 * This function needs to be called with interrupts disabled.
1269 static unsigned long __next_timer_interrupt(struct tvec_base
*base
)
1271 unsigned long timer_jiffies
= base
->timer_jiffies
;
1272 unsigned long expires
= timer_jiffies
+ NEXT_TIMER_MAX_DELTA
;
1273 int index
, slot
, array
, found
= 0;
1274 struct timer_list
*nte
;
1275 struct tvec
*varray
[4];
1277 /* Look for timer events in tv1. */
1278 index
= slot
= timer_jiffies
& TVR_MASK
;
1280 hlist_for_each_entry(nte
, base
->tv1
.vec
+ slot
, entry
) {
1281 if (nte
->flags
& TIMER_DEFERRABLE
)
1285 expires
= nte
->expires
;
1286 /* Look at the cascade bucket(s)? */
1287 if (!index
|| slot
< index
)
1291 slot
= (slot
+ 1) & TVR_MASK
;
1292 } while (slot
!= index
);
1295 /* Calculate the next cascade event */
1297 timer_jiffies
+= TVR_SIZE
- index
;
1298 timer_jiffies
>>= TVR_BITS
;
1300 /* Check tv2-tv5. */
1301 varray
[0] = &base
->tv2
;
1302 varray
[1] = &base
->tv3
;
1303 varray
[2] = &base
->tv4
;
1304 varray
[3] = &base
->tv5
;
1306 for (array
= 0; array
< 4; array
++) {
1307 struct tvec
*varp
= varray
[array
];
1309 index
= slot
= timer_jiffies
& TVN_MASK
;
1311 hlist_for_each_entry(nte
, varp
->vec
+ slot
, entry
) {
1312 if (nte
->flags
& TIMER_DEFERRABLE
)
1316 if (time_before(nte
->expires
, expires
))
1317 expires
= nte
->expires
;
1320 * Do we still search for the first timer or are
1321 * we looking up the cascade buckets ?
1324 /* Look at the cascade bucket(s)? */
1325 if (!index
|| slot
< index
)
1329 slot
= (slot
+ 1) & TVN_MASK
;
1330 } while (slot
!= index
);
1333 timer_jiffies
+= TVN_SIZE
- index
;
1334 timer_jiffies
>>= TVN_BITS
;
1340 * Check, if the next hrtimer event is before the next timer wheel
1343 static u64
cmp_next_hrtimer_event(u64 basem
, u64 expires
)
1345 u64 nextevt
= hrtimer_get_next_event();
1348 * If high resolution timers are enabled
1349 * hrtimer_get_next_event() returns KTIME_MAX.
1351 if (expires
<= nextevt
)
1355 * If the next timer is already expired, return the tick base
1356 * time so the tick is fired immediately.
1358 if (nextevt
<= basem
)
1362 * Round up to the next jiffie. High resolution timers are
1363 * off, so the hrtimers are expired in the tick and we need to
1364 * make sure that this tick really expires the timer to avoid
1365 * a ping pong of the nohz stop code.
1367 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1369 return DIV_ROUND_UP_ULL(nextevt
, TICK_NSEC
) * TICK_NSEC
;
1373 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1374 * @basej: base time jiffies
1375 * @basem: base time clock monotonic
1377 * Returns the tick aligned clock monotonic time of the next pending
1378 * timer or KTIME_MAX if no timer is pending.
1380 u64
get_next_timer_interrupt(unsigned long basej
, u64 basem
)
1382 struct tvec_base
*base
= this_cpu_ptr(&tvec_bases
);
1383 u64 expires
= KTIME_MAX
;
1384 unsigned long nextevt
;
1387 * Pretend that there is no timer pending if the cpu is offline.
1388 * Possible pending timers will be migrated later to an active cpu.
1390 if (cpu_is_offline(smp_processor_id()))
1393 spin_lock(&base
->lock
);
1394 if (base
->active_timers
) {
1395 if (time_before_eq(base
->next_timer
, base
->timer_jiffies
))
1396 base
->next_timer
= __next_timer_interrupt(base
);
1397 nextevt
= base
->next_timer
;
1398 if (time_before_eq(nextevt
, basej
))
1401 expires
= basem
+ (nextevt
- basej
) * TICK_NSEC
;
1403 spin_unlock(&base
->lock
);
1405 return cmp_next_hrtimer_event(basem
, expires
);
1410 * Called from the timer interrupt handler to charge one tick to the current
1411 * process. user_tick is 1 if the tick is user time, 0 for system.
1413 void update_process_times(int user_tick
)
1415 struct task_struct
*p
= current
;
1417 /* Note: this timer irq context must be accounted for as well. */
1418 account_process_tick(p
, user_tick
);
1420 rcu_check_callbacks(user_tick
);
1421 #ifdef CONFIG_IRQ_WORK
1426 run_posix_cpu_timers(p
);
1430 * This function runs timers and the timer-tq in bottom half context.
1432 static void run_timer_softirq(struct softirq_action
*h
)
1434 struct tvec_base
*base
= this_cpu_ptr(&tvec_bases
);
1436 if (time_after_eq(jiffies
, base
->timer_jiffies
))
1441 * Called by the local, per-CPU timer interrupt on SMP.
1443 void run_local_timers(void)
1445 hrtimer_run_queues();
1446 raise_softirq(TIMER_SOFTIRQ
);
1449 #ifdef __ARCH_WANT_SYS_ALARM
1452 * For backwards compatibility? This can be done in libc so Alpha
1453 * and all newer ports shouldn't need it.
1455 SYSCALL_DEFINE1(alarm
, unsigned int, seconds
)
1457 return alarm_setitimer(seconds
);
1462 static void process_timeout(unsigned long __data
)
1464 wake_up_process((struct task_struct
*)__data
);
1468 * schedule_timeout - sleep until timeout
1469 * @timeout: timeout value in jiffies
1471 * Make the current task sleep until @timeout jiffies have
1472 * elapsed. The routine will return immediately unless
1473 * the current task state has been set (see set_current_state()).
1475 * You can set the task state as follows -
1477 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1478 * pass before the routine returns. The routine will return 0
1480 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1481 * delivered to the current task. In this case the remaining time
1482 * in jiffies will be returned, or 0 if the timer expired in time
1484 * The current task state is guaranteed to be TASK_RUNNING when this
1487 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1488 * the CPU away without a bound on the timeout. In this case the return
1489 * value will be %MAX_SCHEDULE_TIMEOUT.
1491 * In all cases the return value is guaranteed to be non-negative.
1493 signed long __sched
schedule_timeout(signed long timeout
)
1495 struct timer_list timer
;
1496 unsigned long expire
;
1500 case MAX_SCHEDULE_TIMEOUT
:
1502 * These two special cases are useful to be comfortable
1503 * in the caller. Nothing more. We could take
1504 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1505 * but I' d like to return a valid offset (>=0) to allow
1506 * the caller to do everything it want with the retval.
1512 * Another bit of PARANOID. Note that the retval will be
1513 * 0 since no piece of kernel is supposed to do a check
1514 * for a negative retval of schedule_timeout() (since it
1515 * should never happens anyway). You just have the printk()
1516 * that will tell you if something is gone wrong and where.
1519 printk(KERN_ERR
"schedule_timeout: wrong timeout "
1520 "value %lx\n", timeout
);
1522 current
->state
= TASK_RUNNING
;
1527 expire
= timeout
+ jiffies
;
1529 setup_timer_on_stack(&timer
, process_timeout
, (unsigned long)current
);
1530 __mod_timer(&timer
, expire
, false, TIMER_NOT_PINNED
);
1532 del_singleshot_timer_sync(&timer
);
1534 /* Remove the timer from the object tracker */
1535 destroy_timer_on_stack(&timer
);
1537 timeout
= expire
- jiffies
;
1540 return timeout
< 0 ? 0 : timeout
;
1542 EXPORT_SYMBOL(schedule_timeout
);
1545 * We can use __set_current_state() here because schedule_timeout() calls
1546 * schedule() unconditionally.
1548 signed long __sched
schedule_timeout_interruptible(signed long timeout
)
1550 __set_current_state(TASK_INTERRUPTIBLE
);
1551 return schedule_timeout(timeout
);
1553 EXPORT_SYMBOL(schedule_timeout_interruptible
);
1555 signed long __sched
schedule_timeout_killable(signed long timeout
)
1557 __set_current_state(TASK_KILLABLE
);
1558 return schedule_timeout(timeout
);
1560 EXPORT_SYMBOL(schedule_timeout_killable
);
1562 signed long __sched
schedule_timeout_uninterruptible(signed long timeout
)
1564 __set_current_state(TASK_UNINTERRUPTIBLE
);
1565 return schedule_timeout(timeout
);
1567 EXPORT_SYMBOL(schedule_timeout_uninterruptible
);
1570 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1573 signed long __sched
schedule_timeout_idle(signed long timeout
)
1575 __set_current_state(TASK_IDLE
);
1576 return schedule_timeout(timeout
);
1578 EXPORT_SYMBOL(schedule_timeout_idle
);
1580 #ifdef CONFIG_HOTPLUG_CPU
1581 static void migrate_timer_list(struct tvec_base
*new_base
, struct hlist_head
*head
)
1583 struct timer_list
*timer
;
1584 int cpu
= new_base
->cpu
;
1586 while (!hlist_empty(head
)) {
1587 timer
= hlist_entry(head
->first
, struct timer_list
, entry
);
1588 /* We ignore the accounting on the dying cpu */
1589 detach_timer(timer
, false);
1590 timer
->flags
= (timer
->flags
& ~TIMER_BASEMASK
) | cpu
;
1591 internal_add_timer(new_base
, timer
);
1595 static void migrate_timers(int cpu
)
1597 struct tvec_base
*old_base
;
1598 struct tvec_base
*new_base
;
1601 BUG_ON(cpu_online(cpu
));
1602 old_base
= per_cpu_ptr(&tvec_bases
, cpu
);
1603 new_base
= get_cpu_ptr(&tvec_bases
);
1605 * The caller is globally serialized and nobody else
1606 * takes two locks at once, deadlock is not possible.
1608 spin_lock_irq(&new_base
->lock
);
1609 spin_lock_nested(&old_base
->lock
, SINGLE_DEPTH_NESTING
);
1611 BUG_ON(old_base
->running_timer
);
1613 for (i
= 0; i
< TVR_SIZE
; i
++)
1614 migrate_timer_list(new_base
, old_base
->tv1
.vec
+ i
);
1615 for (i
= 0; i
< TVN_SIZE
; i
++) {
1616 migrate_timer_list(new_base
, old_base
->tv2
.vec
+ i
);
1617 migrate_timer_list(new_base
, old_base
->tv3
.vec
+ i
);
1618 migrate_timer_list(new_base
, old_base
->tv4
.vec
+ i
);
1619 migrate_timer_list(new_base
, old_base
->tv5
.vec
+ i
);
1622 old_base
->active_timers
= 0;
1623 old_base
->all_timers
= 0;
1625 spin_unlock(&old_base
->lock
);
1626 spin_unlock_irq(&new_base
->lock
);
1627 put_cpu_ptr(&tvec_bases
);
1630 static int timer_cpu_notify(struct notifier_block
*self
,
1631 unsigned long action
, void *hcpu
)
1635 case CPU_DEAD_FROZEN
:
1636 migrate_timers((long)hcpu
);
1645 static inline void timer_register_cpu_notifier(void)
1647 cpu_notifier(timer_cpu_notify
, 0);
1650 static inline void timer_register_cpu_notifier(void) { }
1651 #endif /* CONFIG_HOTPLUG_CPU */
1653 static void __init
init_timer_cpu(int cpu
)
1655 struct tvec_base
*base
= per_cpu_ptr(&tvec_bases
, cpu
);
1658 spin_lock_init(&base
->lock
);
1660 base
->timer_jiffies
= jiffies
;
1661 base
->next_timer
= base
->timer_jiffies
;
1664 static void __init
init_timer_cpus(void)
1668 for_each_possible_cpu(cpu
)
1669 init_timer_cpu(cpu
);
1672 void __init
init_timers(void)
1676 timer_register_cpu_notifier();
1677 open_softirq(TIMER_SOFTIRQ
, run_timer_softirq
);
1681 * msleep - sleep safely even with waitqueue interruptions
1682 * @msecs: Time in milliseconds to sleep for
1684 void msleep(unsigned int msecs
)
1686 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1689 timeout
= schedule_timeout_uninterruptible(timeout
);
1692 EXPORT_SYMBOL(msleep
);
1695 * msleep_interruptible - sleep waiting for signals
1696 * @msecs: Time in milliseconds to sleep for
1698 unsigned long msleep_interruptible(unsigned int msecs
)
1700 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1702 while (timeout
&& !signal_pending(current
))
1703 timeout
= schedule_timeout_interruptible(timeout
);
1704 return jiffies_to_msecs(timeout
);
1707 EXPORT_SYMBOL(msleep_interruptible
);
1709 static void __sched
do_usleep_range(unsigned long min
, unsigned long max
)
1714 kmin
= ktime_set(0, min
* NSEC_PER_USEC
);
1715 delta
= (u64
)(max
- min
) * NSEC_PER_USEC
;
1716 schedule_hrtimeout_range(&kmin
, delta
, HRTIMER_MODE_REL
);
1720 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1721 * @min: Minimum time in usecs to sleep
1722 * @max: Maximum time in usecs to sleep
1724 void __sched
usleep_range(unsigned long min
, unsigned long max
)
1726 __set_current_state(TASK_UNINTERRUPTIBLE
);
1727 do_usleep_range(min
, max
);
1729 EXPORT_SYMBOL(usleep_range
);