4 * Kernel internal timers, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/perf_event.h>
41 #include <linux/sched.h>
43 #include <asm/uaccess.h>
44 #include <asm/unistd.h>
45 #include <asm/div64.h>
46 #include <asm/timex.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/timer.h>
52 u64 jiffies_64 __cacheline_aligned_in_smp
= INITIAL_JIFFIES
;
54 EXPORT_SYMBOL(jiffies_64
);
57 * per-CPU timer vector definitions:
59 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
60 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
61 #define TVN_SIZE (1 << TVN_BITS)
62 #define TVR_SIZE (1 << TVR_BITS)
63 #define TVN_MASK (TVN_SIZE - 1)
64 #define TVR_MASK (TVR_SIZE - 1)
67 struct list_head vec
[TVN_SIZE
];
71 struct list_head vec
[TVR_SIZE
];
76 struct timer_list
*running_timer
;
77 unsigned long timer_jiffies
;
78 unsigned long next_timer
;
84 } ____cacheline_aligned
;
86 struct tvec_base boot_tvec_bases
;
87 EXPORT_SYMBOL(boot_tvec_bases
);
88 static DEFINE_PER_CPU(struct tvec_base
*, tvec_bases
) = &boot_tvec_bases
;
91 * Note that all tvec_bases are 2 byte aligned and lower bit of
92 * base in timer_list is guaranteed to be zero. Use the LSB for
93 * the new flag to indicate whether the timer is deferrable
95 #define TBASE_DEFERRABLE_FLAG (0x1)
97 /* Functions below help us manage 'deferrable' flag */
98 static inline unsigned int tbase_get_deferrable(struct tvec_base
*base
)
100 return ((unsigned int)(unsigned long)base
& TBASE_DEFERRABLE_FLAG
);
103 static inline struct tvec_base
*tbase_get_base(struct tvec_base
*base
)
105 return ((struct tvec_base
*)((unsigned long)base
& ~TBASE_DEFERRABLE_FLAG
));
108 static inline void timer_set_deferrable(struct timer_list
*timer
)
110 timer
->base
= ((struct tvec_base
*)((unsigned long)(timer
->base
) |
111 TBASE_DEFERRABLE_FLAG
));
115 timer_set_base(struct timer_list
*timer
, struct tvec_base
*new_base
)
117 timer
->base
= (struct tvec_base
*)((unsigned long)(new_base
) |
118 tbase_get_deferrable(timer
->base
));
121 static unsigned long round_jiffies_common(unsigned long j
, int cpu
,
125 unsigned long original
= j
;
128 * We don't want all cpus firing their timers at once hitting the
129 * same lock or cachelines, so we skew each extra cpu with an extra
130 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
132 * The skew is done by adding 3*cpunr, then round, then subtract this
133 * extra offset again.
140 * If the target jiffie is just after a whole second (which can happen
141 * due to delays of the timer irq, long irq off times etc etc) then
142 * we should round down to the whole second, not up. Use 1/4th second
143 * as cutoff for this rounding as an extreme upper bound for this.
144 * But never round down if @force_up is set.
146 if (rem
< HZ
/4 && !force_up
) /* round down */
151 /* now that we have rounded, subtract the extra skew again */
154 if (j
<= jiffies
) /* rounding ate our timeout entirely; */
160 * __round_jiffies - function to round jiffies to a full second
161 * @j: the time in (absolute) jiffies that should be rounded
162 * @cpu: the processor number on which the timeout will happen
164 * __round_jiffies() rounds an absolute time in the future (in jiffies)
165 * up or down to (approximately) full seconds. This is useful for timers
166 * for which the exact time they fire does not matter too much, as long as
167 * they fire approximately every X seconds.
169 * By rounding these timers to whole seconds, all such timers will fire
170 * at the same time, rather than at various times spread out. The goal
171 * of this is to have the CPU wake up less, which saves power.
173 * The exact rounding is skewed for each processor to avoid all
174 * processors firing at the exact same time, which could lead
175 * to lock contention or spurious cache line bouncing.
177 * The return value is the rounded version of the @j parameter.
179 unsigned long __round_jiffies(unsigned long j
, int cpu
)
181 return round_jiffies_common(j
, cpu
, false);
183 EXPORT_SYMBOL_GPL(__round_jiffies
);
186 * __round_jiffies_relative - function to round jiffies to a full second
187 * @j: the time in (relative) jiffies that should be rounded
188 * @cpu: the processor number on which the timeout will happen
190 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
191 * up or down to (approximately) full seconds. This is useful for timers
192 * for which the exact time they fire does not matter too much, as long as
193 * they fire approximately every X seconds.
195 * By rounding these timers to whole seconds, all such timers will fire
196 * at the same time, rather than at various times spread out. The goal
197 * of this is to have the CPU wake up less, which saves power.
199 * The exact rounding is skewed for each processor to avoid all
200 * processors firing at the exact same time, which could lead
201 * to lock contention or spurious cache line bouncing.
203 * The return value is the rounded version of the @j parameter.
205 unsigned long __round_jiffies_relative(unsigned long j
, int cpu
)
207 unsigned long j0
= jiffies
;
209 /* Use j0 because jiffies might change while we run */
210 return round_jiffies_common(j
+ j0
, cpu
, false) - j0
;
212 EXPORT_SYMBOL_GPL(__round_jiffies_relative
);
215 * round_jiffies - function to round jiffies to a full second
216 * @j: the time in (absolute) jiffies that should be rounded
218 * round_jiffies() rounds an absolute time in the future (in jiffies)
219 * up or down to (approximately) full seconds. This is useful for timers
220 * for which the exact time they fire does not matter too much, as long as
221 * they fire approximately every X seconds.
223 * By rounding these timers to whole seconds, all such timers will fire
224 * at the same time, rather than at various times spread out. The goal
225 * of this is to have the CPU wake up less, which saves power.
227 * The return value is the rounded version of the @j parameter.
229 unsigned long round_jiffies(unsigned long j
)
231 return round_jiffies_common(j
, raw_smp_processor_id(), false);
233 EXPORT_SYMBOL_GPL(round_jiffies
);
236 * round_jiffies_relative - function to round jiffies to a full second
237 * @j: the time in (relative) jiffies that should be rounded
239 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
240 * up or down to (approximately) full seconds. This is useful for timers
241 * for which the exact time they fire does not matter too much, as long as
242 * they fire approximately every X seconds.
244 * By rounding these timers to whole seconds, all such timers will fire
245 * at the same time, rather than at various times spread out. The goal
246 * of this is to have the CPU wake up less, which saves power.
248 * The return value is the rounded version of the @j parameter.
250 unsigned long round_jiffies_relative(unsigned long j
)
252 return __round_jiffies_relative(j
, raw_smp_processor_id());
254 EXPORT_SYMBOL_GPL(round_jiffies_relative
);
257 * __round_jiffies_up - function to round jiffies up to a full second
258 * @j: the time in (absolute) jiffies that should be rounded
259 * @cpu: the processor number on which the timeout will happen
261 * This is the same as __round_jiffies() except that it will never
262 * round down. This is useful for timeouts for which the exact time
263 * of firing does not matter too much, as long as they don't fire too
266 unsigned long __round_jiffies_up(unsigned long j
, int cpu
)
268 return round_jiffies_common(j
, cpu
, true);
270 EXPORT_SYMBOL_GPL(__round_jiffies_up
);
273 * __round_jiffies_up_relative - function to round jiffies up to a full second
274 * @j: the time in (relative) jiffies that should be rounded
275 * @cpu: the processor number on which the timeout will happen
277 * This is the same as __round_jiffies_relative() except that it will never
278 * round down. This is useful for timeouts for which the exact time
279 * of firing does not matter too much, as long as they don't fire too
282 unsigned long __round_jiffies_up_relative(unsigned long j
, int cpu
)
284 unsigned long j0
= jiffies
;
286 /* Use j0 because jiffies might change while we run */
287 return round_jiffies_common(j
+ j0
, cpu
, true) - j0
;
289 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative
);
292 * round_jiffies_up - function to round jiffies up to a full second
293 * @j: the time in (absolute) jiffies that should be rounded
295 * This is the same as round_jiffies() except that it will never
296 * round down. This is useful for timeouts for which the exact time
297 * of firing does not matter too much, as long as they don't fire too
300 unsigned long round_jiffies_up(unsigned long j
)
302 return round_jiffies_common(j
, raw_smp_processor_id(), true);
304 EXPORT_SYMBOL_GPL(round_jiffies_up
);
307 * round_jiffies_up_relative - function to round jiffies up to a full second
308 * @j: the time in (relative) jiffies that should be rounded
310 * This is the same as round_jiffies_relative() except that it will never
311 * round down. This is useful for timeouts for which the exact time
312 * of firing does not matter too much, as long as they don't fire too
315 unsigned long round_jiffies_up_relative(unsigned long j
)
317 return __round_jiffies_up_relative(j
, raw_smp_processor_id());
319 EXPORT_SYMBOL_GPL(round_jiffies_up_relative
);
322 static inline void set_running_timer(struct tvec_base
*base
,
323 struct timer_list
*timer
)
326 base
->running_timer
= timer
;
330 static void internal_add_timer(struct tvec_base
*base
, struct timer_list
*timer
)
332 unsigned long expires
= timer
->expires
;
333 unsigned long idx
= expires
- base
->timer_jiffies
;
334 struct list_head
*vec
;
336 if (idx
< TVR_SIZE
) {
337 int i
= expires
& TVR_MASK
;
338 vec
= base
->tv1
.vec
+ i
;
339 } else if (idx
< 1 << (TVR_BITS
+ TVN_BITS
)) {
340 int i
= (expires
>> TVR_BITS
) & TVN_MASK
;
341 vec
= base
->tv2
.vec
+ i
;
342 } else if (idx
< 1 << (TVR_BITS
+ 2 * TVN_BITS
)) {
343 int i
= (expires
>> (TVR_BITS
+ TVN_BITS
)) & TVN_MASK
;
344 vec
= base
->tv3
.vec
+ i
;
345 } else if (idx
< 1 << (TVR_BITS
+ 3 * TVN_BITS
)) {
346 int i
= (expires
>> (TVR_BITS
+ 2 * TVN_BITS
)) & TVN_MASK
;
347 vec
= base
->tv4
.vec
+ i
;
348 } else if ((signed long) idx
< 0) {
350 * Can happen if you add a timer with expires == jiffies,
351 * or you set a timer to go off in the past
353 vec
= base
->tv1
.vec
+ (base
->timer_jiffies
& TVR_MASK
);
356 /* If the timeout is larger than 0xffffffff on 64-bit
357 * architectures then we use the maximum timeout:
359 if (idx
> 0xffffffffUL
) {
361 expires
= idx
+ base
->timer_jiffies
;
363 i
= (expires
>> (TVR_BITS
+ 3 * TVN_BITS
)) & TVN_MASK
;
364 vec
= base
->tv5
.vec
+ i
;
369 list_add_tail(&timer
->entry
, vec
);
372 #ifdef CONFIG_TIMER_STATS
373 void __timer_stats_timer_set_start_info(struct timer_list
*timer
, void *addr
)
375 if (timer
->start_site
)
378 timer
->start_site
= addr
;
379 memcpy(timer
->start_comm
, current
->comm
, TASK_COMM_LEN
);
380 timer
->start_pid
= current
->pid
;
383 static void timer_stats_account_timer(struct timer_list
*timer
)
385 unsigned int flag
= 0;
387 if (likely(!timer
->start_site
))
389 if (unlikely(tbase_get_deferrable(timer
->base
)))
390 flag
|= TIMER_STATS_FLAG_DEFERRABLE
;
392 timer_stats_update_stats(timer
, timer
->start_pid
, timer
->start_site
,
393 timer
->function
, timer
->start_comm
, flag
);
397 static void timer_stats_account_timer(struct timer_list
*timer
) {}
400 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
402 static struct debug_obj_descr timer_debug_descr
;
405 * fixup_init is called when:
406 * - an active object is initialized
408 static int timer_fixup_init(void *addr
, enum debug_obj_state state
)
410 struct timer_list
*timer
= addr
;
413 case ODEBUG_STATE_ACTIVE
:
414 del_timer_sync(timer
);
415 debug_object_init(timer
, &timer_debug_descr
);
423 * fixup_activate is called when:
424 * - an active object is activated
425 * - an unknown object is activated (might be a statically initialized object)
427 static int timer_fixup_activate(void *addr
, enum debug_obj_state state
)
429 struct timer_list
*timer
= addr
;
433 case ODEBUG_STATE_NOTAVAILABLE
:
435 * This is not really a fixup. The timer was
436 * statically initialized. We just make sure that it
437 * is tracked in the object tracker.
439 if (timer
->entry
.next
== NULL
&&
440 timer
->entry
.prev
== TIMER_ENTRY_STATIC
) {
441 debug_object_init(timer
, &timer_debug_descr
);
442 debug_object_activate(timer
, &timer_debug_descr
);
449 case ODEBUG_STATE_ACTIVE
:
458 * fixup_free is called when:
459 * - an active object is freed
461 static int timer_fixup_free(void *addr
, enum debug_obj_state state
)
463 struct timer_list
*timer
= addr
;
466 case ODEBUG_STATE_ACTIVE
:
467 del_timer_sync(timer
);
468 debug_object_free(timer
, &timer_debug_descr
);
475 static struct debug_obj_descr timer_debug_descr
= {
476 .name
= "timer_list",
477 .fixup_init
= timer_fixup_init
,
478 .fixup_activate
= timer_fixup_activate
,
479 .fixup_free
= timer_fixup_free
,
482 static inline void debug_timer_init(struct timer_list
*timer
)
484 debug_object_init(timer
, &timer_debug_descr
);
487 static inline void debug_timer_activate(struct timer_list
*timer
)
489 debug_object_activate(timer
, &timer_debug_descr
);
492 static inline void debug_timer_deactivate(struct timer_list
*timer
)
494 debug_object_deactivate(timer
, &timer_debug_descr
);
497 static inline void debug_timer_free(struct timer_list
*timer
)
499 debug_object_free(timer
, &timer_debug_descr
);
502 static void __init_timer(struct timer_list
*timer
,
504 struct lock_class_key
*key
);
506 void init_timer_on_stack_key(struct timer_list
*timer
,
508 struct lock_class_key
*key
)
510 debug_object_init_on_stack(timer
, &timer_debug_descr
);
511 __init_timer(timer
, name
, key
);
513 EXPORT_SYMBOL_GPL(init_timer_on_stack_key
);
515 void destroy_timer_on_stack(struct timer_list
*timer
)
517 debug_object_free(timer
, &timer_debug_descr
);
519 EXPORT_SYMBOL_GPL(destroy_timer_on_stack
);
522 static inline void debug_timer_init(struct timer_list
*timer
) { }
523 static inline void debug_timer_activate(struct timer_list
*timer
) { }
524 static inline void debug_timer_deactivate(struct timer_list
*timer
) { }
527 static inline void debug_init(struct timer_list
*timer
)
529 debug_timer_init(timer
);
530 trace_timer_init(timer
);
534 debug_activate(struct timer_list
*timer
, unsigned long expires
)
536 debug_timer_activate(timer
);
537 trace_timer_start(timer
, expires
);
540 static inline void debug_deactivate(struct timer_list
*timer
)
542 debug_timer_deactivate(timer
);
543 trace_timer_cancel(timer
);
546 static void __init_timer(struct timer_list
*timer
,
548 struct lock_class_key
*key
)
550 timer
->entry
.next
= NULL
;
551 timer
->base
= __raw_get_cpu_var(tvec_bases
);
552 #ifdef CONFIG_TIMER_STATS
553 timer
->start_site
= NULL
;
554 timer
->start_pid
= -1;
555 memset(timer
->start_comm
, 0, TASK_COMM_LEN
);
557 lockdep_init_map(&timer
->lockdep_map
, name
, key
, 0);
561 * init_timer_key - initialize a timer
562 * @timer: the timer to be initialized
563 * @name: name of the timer
564 * @key: lockdep class key of the fake lock used for tracking timer
565 * sync lock dependencies
567 * init_timer_key() must be done to a timer prior calling *any* of the
568 * other timer functions.
570 void init_timer_key(struct timer_list
*timer
,
572 struct lock_class_key
*key
)
575 __init_timer(timer
, name
, key
);
577 EXPORT_SYMBOL(init_timer_key
);
579 void init_timer_deferrable_key(struct timer_list
*timer
,
581 struct lock_class_key
*key
)
583 init_timer_key(timer
, name
, key
);
584 timer_set_deferrable(timer
);
586 EXPORT_SYMBOL(init_timer_deferrable_key
);
588 static inline void detach_timer(struct timer_list
*timer
,
591 struct list_head
*entry
= &timer
->entry
;
593 debug_deactivate(timer
);
595 __list_del(entry
->prev
, entry
->next
);
598 entry
->prev
= LIST_POISON2
;
602 * We are using hashed locking: holding per_cpu(tvec_bases).lock
603 * means that all timers which are tied to this base via timer->base are
604 * locked, and the base itself is locked too.
606 * So __run_timers/migrate_timers can safely modify all timers which could
607 * be found on ->tvX lists.
609 * When the timer's base is locked, and the timer removed from list, it is
610 * possible to set timer->base = NULL and drop the lock: the timer remains
613 static struct tvec_base
*lock_timer_base(struct timer_list
*timer
,
614 unsigned long *flags
)
615 __acquires(timer
->base
->lock
)
617 struct tvec_base
*base
;
620 struct tvec_base
*prelock_base
= timer
->base
;
621 base
= tbase_get_base(prelock_base
);
622 if (likely(base
!= NULL
)) {
623 spin_lock_irqsave(&base
->lock
, *flags
);
624 if (likely(prelock_base
== timer
->base
))
626 /* The timer has migrated to another CPU */
627 spin_unlock_irqrestore(&base
->lock
, *flags
);
634 __mod_timer(struct timer_list
*timer
, unsigned long expires
,
635 bool pending_only
, int pinned
)
637 struct tvec_base
*base
, *new_base
;
641 timer_stats_timer_set_start_info(timer
);
642 BUG_ON(!timer
->function
);
644 base
= lock_timer_base(timer
, &flags
);
646 if (timer_pending(timer
)) {
647 detach_timer(timer
, 0);
648 if (timer
->expires
== base
->next_timer
&&
649 !tbase_get_deferrable(timer
->base
))
650 base
->next_timer
= base
->timer_jiffies
;
657 debug_activate(timer
, expires
);
659 new_base
= __get_cpu_var(tvec_bases
);
661 cpu
= smp_processor_id();
663 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
664 if (!pinned
&& get_sysctl_timer_migration() && idle_cpu(cpu
)) {
665 int preferred_cpu
= get_nohz_load_balancer();
667 if (preferred_cpu
>= 0)
671 new_base
= per_cpu(tvec_bases
, cpu
);
673 if (base
!= new_base
) {
675 * We are trying to schedule the timer on the local CPU.
676 * However we can't change timer's base while it is running,
677 * otherwise del_timer_sync() can't detect that the timer's
678 * handler yet has not finished. This also guarantees that
679 * the timer is serialized wrt itself.
681 if (likely(base
->running_timer
!= timer
)) {
682 /* See the comment in lock_timer_base() */
683 timer_set_base(timer
, NULL
);
684 spin_unlock(&base
->lock
);
686 spin_lock(&base
->lock
);
687 timer_set_base(timer
, base
);
691 timer
->expires
= expires
;
692 if (time_before(timer
->expires
, base
->next_timer
) &&
693 !tbase_get_deferrable(timer
->base
))
694 base
->next_timer
= timer
->expires
;
695 internal_add_timer(base
, timer
);
698 spin_unlock_irqrestore(&base
->lock
, flags
);
704 * mod_timer_pending - modify a pending timer's timeout
705 * @timer: the pending timer to be modified
706 * @expires: new timeout in jiffies
708 * mod_timer_pending() is the same for pending timers as mod_timer(),
709 * but will not re-activate and modify already deleted timers.
711 * It is useful for unserialized use of timers.
713 int mod_timer_pending(struct timer_list
*timer
, unsigned long expires
)
715 return __mod_timer(timer
, expires
, true, TIMER_NOT_PINNED
);
717 EXPORT_SYMBOL(mod_timer_pending
);
720 * mod_timer - modify a timer's timeout
721 * @timer: the timer to be modified
722 * @expires: new timeout in jiffies
724 * mod_timer() is a more efficient way to update the expire field of an
725 * active timer (if the timer is inactive it will be activated)
727 * mod_timer(timer, expires) is equivalent to:
729 * del_timer(timer); timer->expires = expires; add_timer(timer);
731 * Note that if there are multiple unserialized concurrent users of the
732 * same timer, then mod_timer() is the only safe way to modify the timeout,
733 * since add_timer() cannot modify an already running timer.
735 * The function returns whether it has modified a pending timer or not.
736 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
737 * active timer returns 1.)
739 int mod_timer(struct timer_list
*timer
, unsigned long expires
)
742 * This is a common optimization triggered by the
743 * networking code - if the timer is re-modified
744 * to be the same thing then just return:
746 if (timer_pending(timer
) && timer
->expires
== expires
)
749 return __mod_timer(timer
, expires
, false, TIMER_NOT_PINNED
);
751 EXPORT_SYMBOL(mod_timer
);
754 * mod_timer_pinned - modify a timer's timeout
755 * @timer: the timer to be modified
756 * @expires: new timeout in jiffies
758 * mod_timer_pinned() is a way to update the expire field of an
759 * active timer (if the timer is inactive it will be activated)
760 * and not allow the timer to be migrated to a different CPU.
762 * mod_timer_pinned(timer, expires) is equivalent to:
764 * del_timer(timer); timer->expires = expires; add_timer(timer);
766 int mod_timer_pinned(struct timer_list
*timer
, unsigned long expires
)
768 if (timer
->expires
== expires
&& timer_pending(timer
))
771 return __mod_timer(timer
, expires
, false, TIMER_PINNED
);
773 EXPORT_SYMBOL(mod_timer_pinned
);
776 * add_timer - start a timer
777 * @timer: the timer to be added
779 * The kernel will do a ->function(->data) callback from the
780 * timer interrupt at the ->expires point in the future. The
781 * current time is 'jiffies'.
783 * The timer's ->expires, ->function (and if the handler uses it, ->data)
784 * fields must be set prior calling this function.
786 * Timers with an ->expires field in the past will be executed in the next
789 void add_timer(struct timer_list
*timer
)
791 BUG_ON(timer_pending(timer
));
792 mod_timer(timer
, timer
->expires
);
794 EXPORT_SYMBOL(add_timer
);
797 * add_timer_on - start a timer on a particular CPU
798 * @timer: the timer to be added
799 * @cpu: the CPU to start it on
801 * This is not very scalable on SMP. Double adds are not possible.
803 void add_timer_on(struct timer_list
*timer
, int cpu
)
805 struct tvec_base
*base
= per_cpu(tvec_bases
, cpu
);
808 timer_stats_timer_set_start_info(timer
);
809 BUG_ON(timer_pending(timer
) || !timer
->function
);
810 spin_lock_irqsave(&base
->lock
, flags
);
811 timer_set_base(timer
, base
);
812 debug_activate(timer
, timer
->expires
);
813 if (time_before(timer
->expires
, base
->next_timer
) &&
814 !tbase_get_deferrable(timer
->base
))
815 base
->next_timer
= timer
->expires
;
816 internal_add_timer(base
, timer
);
818 * Check whether the other CPU is idle and needs to be
819 * triggered to reevaluate the timer wheel when nohz is
820 * active. We are protected against the other CPU fiddling
821 * with the timer by holding the timer base lock. This also
822 * makes sure that a CPU on the way to idle can not evaluate
825 wake_up_idle_cpu(cpu
);
826 spin_unlock_irqrestore(&base
->lock
, flags
);
828 EXPORT_SYMBOL_GPL(add_timer_on
);
831 * del_timer - deactive a timer.
832 * @timer: the timer to be deactivated
834 * del_timer() deactivates a timer - this works on both active and inactive
837 * The function returns whether it has deactivated a pending timer or not.
838 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
839 * active timer returns 1.)
841 int del_timer(struct timer_list
*timer
)
843 struct tvec_base
*base
;
847 timer_stats_timer_clear_start_info(timer
);
848 if (timer_pending(timer
)) {
849 base
= lock_timer_base(timer
, &flags
);
850 if (timer_pending(timer
)) {
851 detach_timer(timer
, 1);
852 if (timer
->expires
== base
->next_timer
&&
853 !tbase_get_deferrable(timer
->base
))
854 base
->next_timer
= base
->timer_jiffies
;
857 spin_unlock_irqrestore(&base
->lock
, flags
);
862 EXPORT_SYMBOL(del_timer
);
866 * try_to_del_timer_sync - Try to deactivate a timer
867 * @timer: timer do del
869 * This function tries to deactivate a timer. Upon successful (ret >= 0)
870 * exit the timer is not queued and the handler is not running on any CPU.
872 * It must not be called from interrupt contexts.
874 int try_to_del_timer_sync(struct timer_list
*timer
)
876 struct tvec_base
*base
;
880 base
= lock_timer_base(timer
, &flags
);
882 if (base
->running_timer
== timer
)
885 timer_stats_timer_clear_start_info(timer
);
887 if (timer_pending(timer
)) {
888 detach_timer(timer
, 1);
889 if (timer
->expires
== base
->next_timer
&&
890 !tbase_get_deferrable(timer
->base
))
891 base
->next_timer
= base
->timer_jiffies
;
895 spin_unlock_irqrestore(&base
->lock
, flags
);
899 EXPORT_SYMBOL(try_to_del_timer_sync
);
902 * del_timer_sync - deactivate a timer and wait for the handler to finish.
903 * @timer: the timer to be deactivated
905 * This function only differs from del_timer() on SMP: besides deactivating
906 * the timer it also makes sure the handler has finished executing on other
909 * Synchronization rules: Callers must prevent restarting of the timer,
910 * otherwise this function is meaningless. It must not be called from
911 * interrupt contexts. The caller must not hold locks which would prevent
912 * completion of the timer's handler. The timer's handler must not call
913 * add_timer_on(). Upon exit the timer is not queued and the handler is
914 * not running on any CPU.
916 * The function returns whether it has deactivated a pending timer or not.
918 int del_timer_sync(struct timer_list
*timer
)
920 #ifdef CONFIG_LOCKDEP
923 local_irq_save(flags
);
924 lock_map_acquire(&timer
->lockdep_map
);
925 lock_map_release(&timer
->lockdep_map
);
926 local_irq_restore(flags
);
930 int ret
= try_to_del_timer_sync(timer
);
936 EXPORT_SYMBOL(del_timer_sync
);
939 static int cascade(struct tvec_base
*base
, struct tvec
*tv
, int index
)
941 /* cascade all the timers from tv up one level */
942 struct timer_list
*timer
, *tmp
;
943 struct list_head tv_list
;
945 list_replace_init(tv
->vec
+ index
, &tv_list
);
948 * We are removing _all_ timers from the list, so we
949 * don't have to detach them individually.
951 list_for_each_entry_safe(timer
, tmp
, &tv_list
, entry
) {
952 BUG_ON(tbase_get_base(timer
->base
) != base
);
953 internal_add_timer(base
, timer
);
959 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
962 * __run_timers - run all expired timers (if any) on this CPU.
963 * @base: the timer vector to be processed.
965 * This function cascades all vectors and executes all expired timer
968 static inline void __run_timers(struct tvec_base
*base
)
970 struct timer_list
*timer
;
972 spin_lock_irq(&base
->lock
);
973 while (time_after_eq(jiffies
, base
->timer_jiffies
)) {
974 struct list_head work_list
;
975 struct list_head
*head
= &work_list
;
976 int index
= base
->timer_jiffies
& TVR_MASK
;
982 (!cascade(base
, &base
->tv2
, INDEX(0))) &&
983 (!cascade(base
, &base
->tv3
, INDEX(1))) &&
984 !cascade(base
, &base
->tv4
, INDEX(2)))
985 cascade(base
, &base
->tv5
, INDEX(3));
986 ++base
->timer_jiffies
;
987 list_replace_init(base
->tv1
.vec
+ index
, &work_list
);
988 while (!list_empty(head
)) {
989 void (*fn
)(unsigned long);
992 timer
= list_first_entry(head
, struct timer_list
,entry
);
993 fn
= timer
->function
;
996 timer_stats_account_timer(timer
);
998 set_running_timer(base
, timer
);
999 detach_timer(timer
, 1);
1001 spin_unlock_irq(&base
->lock
);
1003 int preempt_count
= preempt_count();
1005 #ifdef CONFIG_LOCKDEP
1007 * It is permissible to free the timer from
1008 * inside the function that is called from
1009 * it, this we need to take into account for
1010 * lockdep too. To avoid bogus "held lock
1011 * freed" warnings as well as problems when
1012 * looking into timer->lockdep_map, make a
1013 * copy and use that here.
1015 struct lockdep_map lockdep_map
=
1019 * Couple the lock chain with the lock chain at
1020 * del_timer_sync() by acquiring the lock_map
1021 * around the fn() call here and in
1024 lock_map_acquire(&lockdep_map
);
1026 trace_timer_expire_entry(timer
);
1028 trace_timer_expire_exit(timer
);
1030 lock_map_release(&lockdep_map
);
1032 if (preempt_count
!= preempt_count()) {
1033 printk(KERN_ERR
"huh, entered %p "
1034 "with preempt_count %08x, exited"
1041 spin_lock_irq(&base
->lock
);
1044 set_running_timer(base
, NULL
);
1045 spin_unlock_irq(&base
->lock
);
1050 * Find out when the next timer event is due to happen. This
1051 * is used on S/390 to stop all activity when a CPU is idle.
1052 * This function needs to be called with interrupts disabled.
1054 static unsigned long __next_timer_interrupt(struct tvec_base
*base
)
1056 unsigned long timer_jiffies
= base
->timer_jiffies
;
1057 unsigned long expires
= timer_jiffies
+ NEXT_TIMER_MAX_DELTA
;
1058 int index
, slot
, array
, found
= 0;
1059 struct timer_list
*nte
;
1060 struct tvec
*varray
[4];
1062 /* Look for timer events in tv1. */
1063 index
= slot
= timer_jiffies
& TVR_MASK
;
1065 list_for_each_entry(nte
, base
->tv1
.vec
+ slot
, entry
) {
1066 if (tbase_get_deferrable(nte
->base
))
1070 expires
= nte
->expires
;
1071 /* Look at the cascade bucket(s)? */
1072 if (!index
|| slot
< index
)
1076 slot
= (slot
+ 1) & TVR_MASK
;
1077 } while (slot
!= index
);
1080 /* Calculate the next cascade event */
1082 timer_jiffies
+= TVR_SIZE
- index
;
1083 timer_jiffies
>>= TVR_BITS
;
1085 /* Check tv2-tv5. */
1086 varray
[0] = &base
->tv2
;
1087 varray
[1] = &base
->tv3
;
1088 varray
[2] = &base
->tv4
;
1089 varray
[3] = &base
->tv5
;
1091 for (array
= 0; array
< 4; array
++) {
1092 struct tvec
*varp
= varray
[array
];
1094 index
= slot
= timer_jiffies
& TVN_MASK
;
1096 list_for_each_entry(nte
, varp
->vec
+ slot
, entry
) {
1097 if (tbase_get_deferrable(nte
->base
))
1101 if (time_before(nte
->expires
, expires
))
1102 expires
= nte
->expires
;
1105 * Do we still search for the first timer or are
1106 * we looking up the cascade buckets ?
1109 /* Look at the cascade bucket(s)? */
1110 if (!index
|| slot
< index
)
1114 slot
= (slot
+ 1) & TVN_MASK
;
1115 } while (slot
!= index
);
1118 timer_jiffies
+= TVN_SIZE
- index
;
1119 timer_jiffies
>>= TVN_BITS
;
1125 * Check, if the next hrtimer event is before the next timer wheel
1128 static unsigned long cmp_next_hrtimer_event(unsigned long now
,
1129 unsigned long expires
)
1131 ktime_t hr_delta
= hrtimer_get_next_event();
1132 struct timespec tsdelta
;
1133 unsigned long delta
;
1135 if (hr_delta
.tv64
== KTIME_MAX
)
1139 * Expired timer available, let it expire in the next tick
1141 if (hr_delta
.tv64
<= 0)
1144 tsdelta
= ktime_to_timespec(hr_delta
);
1145 delta
= timespec_to_jiffies(&tsdelta
);
1148 * Limit the delta to the max value, which is checked in
1149 * tick_nohz_stop_sched_tick():
1151 if (delta
> NEXT_TIMER_MAX_DELTA
)
1152 delta
= NEXT_TIMER_MAX_DELTA
;
1155 * Take rounding errors in to account and make sure, that it
1156 * expires in the next tick. Otherwise we go into an endless
1157 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1163 if (time_before(now
, expires
))
1169 * get_next_timer_interrupt - return the jiffy of the next pending timer
1170 * @now: current time (in jiffies)
1172 unsigned long get_next_timer_interrupt(unsigned long now
)
1174 struct tvec_base
*base
= __get_cpu_var(tvec_bases
);
1175 unsigned long expires
;
1177 spin_lock(&base
->lock
);
1178 if (time_before_eq(base
->next_timer
, base
->timer_jiffies
))
1179 base
->next_timer
= __next_timer_interrupt(base
);
1180 expires
= base
->next_timer
;
1181 spin_unlock(&base
->lock
);
1183 if (time_before_eq(expires
, now
))
1186 return cmp_next_hrtimer_event(now
, expires
);
1191 * Called from the timer interrupt handler to charge one tick to the current
1192 * process. user_tick is 1 if the tick is user time, 0 for system.
1194 void update_process_times(int user_tick
)
1196 struct task_struct
*p
= current
;
1197 int cpu
= smp_processor_id();
1199 /* Note: this timer irq context must be accounted for as well. */
1200 account_process_tick(p
, user_tick
);
1202 rcu_check_callbacks(cpu
, user_tick
);
1205 run_posix_cpu_timers(p
);
1209 * This function runs timers and the timer-tq in bottom half context.
1211 static void run_timer_softirq(struct softirq_action
*h
)
1213 struct tvec_base
*base
= __get_cpu_var(tvec_bases
);
1215 perf_event_do_pending();
1217 hrtimer_run_pending();
1219 if (time_after_eq(jiffies
, base
->timer_jiffies
))
1224 * Called by the local, per-CPU timer interrupt on SMP.
1226 void run_local_timers(void)
1228 hrtimer_run_queues();
1229 raise_softirq(TIMER_SOFTIRQ
);
1234 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1235 * without sampling the sequence number in xtime_lock.
1236 * jiffies is defined in the linker script...
1239 void do_timer(unsigned long ticks
)
1241 jiffies_64
+= ticks
;
1246 #ifdef __ARCH_WANT_SYS_ALARM
1249 * For backwards compatibility? This can be done in libc so Alpha
1250 * and all newer ports shouldn't need it.
1252 SYSCALL_DEFINE1(alarm
, unsigned int, seconds
)
1254 return alarm_setitimer(seconds
);
1262 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1263 * should be moved into arch/i386 instead?
1267 * sys_getpid - return the thread group id of the current process
1269 * Note, despite the name, this returns the tgid not the pid. The tgid and
1270 * the pid are identical unless CLONE_THREAD was specified on clone() in
1271 * which case the tgid is the same in all threads of the same group.
1273 * This is SMP safe as current->tgid does not change.
1275 SYSCALL_DEFINE0(getpid
)
1277 return task_tgid_vnr(current
);
1281 * Accessing ->real_parent is not SMP-safe, it could
1282 * change from under us. However, we can use a stale
1283 * value of ->real_parent under rcu_read_lock(), see
1284 * release_task()->call_rcu(delayed_put_task_struct).
1286 SYSCALL_DEFINE0(getppid
)
1291 pid
= task_tgid_vnr(current
->real_parent
);
1297 SYSCALL_DEFINE0(getuid
)
1299 /* Only we change this so SMP safe */
1300 return current_uid();
1303 SYSCALL_DEFINE0(geteuid
)
1305 /* Only we change this so SMP safe */
1306 return current_euid();
1309 SYSCALL_DEFINE0(getgid
)
1311 /* Only we change this so SMP safe */
1312 return current_gid();
1315 SYSCALL_DEFINE0(getegid
)
1317 /* Only we change this so SMP safe */
1318 return current_egid();
1323 static void process_timeout(unsigned long __data
)
1325 wake_up_process((struct task_struct
*)__data
);
1329 * schedule_timeout - sleep until timeout
1330 * @timeout: timeout value in jiffies
1332 * Make the current task sleep until @timeout jiffies have
1333 * elapsed. The routine will return immediately unless
1334 * the current task state has been set (see set_current_state()).
1336 * You can set the task state as follows -
1338 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1339 * pass before the routine returns. The routine will return 0
1341 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1342 * delivered to the current task. In this case the remaining time
1343 * in jiffies will be returned, or 0 if the timer expired in time
1345 * The current task state is guaranteed to be TASK_RUNNING when this
1348 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1349 * the CPU away without a bound on the timeout. In this case the return
1350 * value will be %MAX_SCHEDULE_TIMEOUT.
1352 * In all cases the return value is guaranteed to be non-negative.
1354 signed long __sched
schedule_timeout(signed long timeout
)
1356 struct timer_list timer
;
1357 unsigned long expire
;
1361 case MAX_SCHEDULE_TIMEOUT
:
1363 * These two special cases are useful to be comfortable
1364 * in the caller. Nothing more. We could take
1365 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1366 * but I' d like to return a valid offset (>=0) to allow
1367 * the caller to do everything it want with the retval.
1373 * Another bit of PARANOID. Note that the retval will be
1374 * 0 since no piece of kernel is supposed to do a check
1375 * for a negative retval of schedule_timeout() (since it
1376 * should never happens anyway). You just have the printk()
1377 * that will tell you if something is gone wrong and where.
1380 printk(KERN_ERR
"schedule_timeout: wrong timeout "
1381 "value %lx\n", timeout
);
1383 current
->state
= TASK_RUNNING
;
1388 expire
= timeout
+ jiffies
;
1390 setup_timer_on_stack(&timer
, process_timeout
, (unsigned long)current
);
1391 __mod_timer(&timer
, expire
, false, TIMER_NOT_PINNED
);
1393 del_singleshot_timer_sync(&timer
);
1395 /* Remove the timer from the object tracker */
1396 destroy_timer_on_stack(&timer
);
1398 timeout
= expire
- jiffies
;
1401 return timeout
< 0 ? 0 : timeout
;
1403 EXPORT_SYMBOL(schedule_timeout
);
1406 * We can use __set_current_state() here because schedule_timeout() calls
1407 * schedule() unconditionally.
1409 signed long __sched
schedule_timeout_interruptible(signed long timeout
)
1411 __set_current_state(TASK_INTERRUPTIBLE
);
1412 return schedule_timeout(timeout
);
1414 EXPORT_SYMBOL(schedule_timeout_interruptible
);
1416 signed long __sched
schedule_timeout_killable(signed long timeout
)
1418 __set_current_state(TASK_KILLABLE
);
1419 return schedule_timeout(timeout
);
1421 EXPORT_SYMBOL(schedule_timeout_killable
);
1423 signed long __sched
schedule_timeout_uninterruptible(signed long timeout
)
1425 __set_current_state(TASK_UNINTERRUPTIBLE
);
1426 return schedule_timeout(timeout
);
1428 EXPORT_SYMBOL(schedule_timeout_uninterruptible
);
1430 /* Thread ID - the internal kernel "pid" */
1431 SYSCALL_DEFINE0(gettid
)
1433 return task_pid_vnr(current
);
1437 * do_sysinfo - fill in sysinfo struct
1438 * @info: pointer to buffer to fill
1440 int do_sysinfo(struct sysinfo
*info
)
1442 unsigned long mem_total
, sav_total
;
1443 unsigned int mem_unit
, bitcount
;
1446 memset(info
, 0, sizeof(struct sysinfo
));
1449 monotonic_to_bootbased(&tp
);
1450 info
->uptime
= tp
.tv_sec
+ (tp
.tv_nsec
? 1 : 0);
1452 get_avenrun(info
->loads
, 0, SI_LOAD_SHIFT
- FSHIFT
);
1454 info
->procs
= nr_threads
;
1460 * If the sum of all the available memory (i.e. ram + swap)
1461 * is less than can be stored in a 32 bit unsigned long then
1462 * we can be binary compatible with 2.2.x kernels. If not,
1463 * well, in that case 2.2.x was broken anyways...
1465 * -Erik Andersen <andersee@debian.org>
1468 mem_total
= info
->totalram
+ info
->totalswap
;
1469 if (mem_total
< info
->totalram
|| mem_total
< info
->totalswap
)
1472 mem_unit
= info
->mem_unit
;
1473 while (mem_unit
> 1) {
1476 sav_total
= mem_total
;
1478 if (mem_total
< sav_total
)
1483 * If mem_total did not overflow, multiply all memory values by
1484 * info->mem_unit and set it to 1. This leaves things compatible
1485 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1490 info
->totalram
<<= bitcount
;
1491 info
->freeram
<<= bitcount
;
1492 info
->sharedram
<<= bitcount
;
1493 info
->bufferram
<<= bitcount
;
1494 info
->totalswap
<<= bitcount
;
1495 info
->freeswap
<<= bitcount
;
1496 info
->totalhigh
<<= bitcount
;
1497 info
->freehigh
<<= bitcount
;
1503 SYSCALL_DEFINE1(sysinfo
, struct sysinfo __user
*, info
)
1509 if (copy_to_user(info
, &val
, sizeof(struct sysinfo
)))
1515 static int __cpuinit
init_timers_cpu(int cpu
)
1518 struct tvec_base
*base
;
1519 static char __cpuinitdata tvec_base_done
[NR_CPUS
];
1521 if (!tvec_base_done
[cpu
]) {
1522 static char boot_done
;
1526 * The APs use this path later in boot
1528 base
= kmalloc_node(sizeof(*base
),
1529 GFP_KERNEL
| __GFP_ZERO
,
1534 /* Make sure that tvec_base is 2 byte aligned */
1535 if (tbase_get_deferrable(base
)) {
1540 per_cpu(tvec_bases
, cpu
) = base
;
1543 * This is for the boot CPU - we use compile-time
1544 * static initialisation because per-cpu memory isn't
1545 * ready yet and because the memory allocators are not
1546 * initialised either.
1549 base
= &boot_tvec_bases
;
1551 tvec_base_done
[cpu
] = 1;
1553 base
= per_cpu(tvec_bases
, cpu
);
1556 spin_lock_init(&base
->lock
);
1558 for (j
= 0; j
< TVN_SIZE
; j
++) {
1559 INIT_LIST_HEAD(base
->tv5
.vec
+ j
);
1560 INIT_LIST_HEAD(base
->tv4
.vec
+ j
);
1561 INIT_LIST_HEAD(base
->tv3
.vec
+ j
);
1562 INIT_LIST_HEAD(base
->tv2
.vec
+ j
);
1564 for (j
= 0; j
< TVR_SIZE
; j
++)
1565 INIT_LIST_HEAD(base
->tv1
.vec
+ j
);
1567 base
->timer_jiffies
= jiffies
;
1568 base
->next_timer
= base
->timer_jiffies
;
1572 #ifdef CONFIG_HOTPLUG_CPU
1573 static void migrate_timer_list(struct tvec_base
*new_base
, struct list_head
*head
)
1575 struct timer_list
*timer
;
1577 while (!list_empty(head
)) {
1578 timer
= list_first_entry(head
, struct timer_list
, entry
);
1579 detach_timer(timer
, 0);
1580 timer_set_base(timer
, new_base
);
1581 if (time_before(timer
->expires
, new_base
->next_timer
) &&
1582 !tbase_get_deferrable(timer
->base
))
1583 new_base
->next_timer
= timer
->expires
;
1584 internal_add_timer(new_base
, timer
);
1588 static void __cpuinit
migrate_timers(int cpu
)
1590 struct tvec_base
*old_base
;
1591 struct tvec_base
*new_base
;
1594 BUG_ON(cpu_online(cpu
));
1595 old_base
= per_cpu(tvec_bases
, cpu
);
1596 new_base
= get_cpu_var(tvec_bases
);
1598 * The caller is globally serialized and nobody else
1599 * takes two locks at once, deadlock is not possible.
1601 spin_lock_irq(&new_base
->lock
);
1602 spin_lock_nested(&old_base
->lock
, SINGLE_DEPTH_NESTING
);
1604 BUG_ON(old_base
->running_timer
);
1606 for (i
= 0; i
< TVR_SIZE
; i
++)
1607 migrate_timer_list(new_base
, old_base
->tv1
.vec
+ i
);
1608 for (i
= 0; i
< TVN_SIZE
; i
++) {
1609 migrate_timer_list(new_base
, old_base
->tv2
.vec
+ i
);
1610 migrate_timer_list(new_base
, old_base
->tv3
.vec
+ i
);
1611 migrate_timer_list(new_base
, old_base
->tv4
.vec
+ i
);
1612 migrate_timer_list(new_base
, old_base
->tv5
.vec
+ i
);
1615 spin_unlock(&old_base
->lock
);
1616 spin_unlock_irq(&new_base
->lock
);
1617 put_cpu_var(tvec_bases
);
1619 #endif /* CONFIG_HOTPLUG_CPU */
1621 static int __cpuinit
timer_cpu_notify(struct notifier_block
*self
,
1622 unsigned long action
, void *hcpu
)
1624 long cpu
= (long)hcpu
;
1626 case CPU_UP_PREPARE
:
1627 case CPU_UP_PREPARE_FROZEN
:
1628 if (init_timers_cpu(cpu
) < 0)
1631 #ifdef CONFIG_HOTPLUG_CPU
1633 case CPU_DEAD_FROZEN
:
1634 migrate_timers(cpu
);
1643 static struct notifier_block __cpuinitdata timers_nb
= {
1644 .notifier_call
= timer_cpu_notify
,
1648 void __init
init_timers(void)
1650 int err
= timer_cpu_notify(&timers_nb
, (unsigned long)CPU_UP_PREPARE
,
1651 (void *)(long)smp_processor_id());
1655 BUG_ON(err
== NOTIFY_BAD
);
1656 register_cpu_notifier(&timers_nb
);
1657 open_softirq(TIMER_SOFTIRQ
, run_timer_softirq
);
1661 * msleep - sleep safely even with waitqueue interruptions
1662 * @msecs: Time in milliseconds to sleep for
1664 void msleep(unsigned int msecs
)
1666 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1669 timeout
= schedule_timeout_uninterruptible(timeout
);
1672 EXPORT_SYMBOL(msleep
);
1675 * msleep_interruptible - sleep waiting for signals
1676 * @msecs: Time in milliseconds to sleep for
1678 unsigned long msleep_interruptible(unsigned int msecs
)
1680 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1682 while (timeout
&& !signal_pending(current
))
1683 timeout
= schedule_timeout_interruptible(timeout
);
1684 return jiffies_to_msecs(timeout
);
1687 EXPORT_SYMBOL(msleep_interruptible
);