2 * kernel/sched/bfs.c, was kernel/sched.c
4 * Kernel scheduler and related syscalls
6 * Copyright (C) 1991-2002 Linus Torvalds
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
27 * now Brainfuck deadline scheduling policy by Con Kolivas deletes
28 * a whole lot of those previous things.
32 #include <linux/module.h>
33 #include <linux/nmi.h>
34 #include <linux/init.h>
35 #include <asm/uaccess.h>
36 #include <linux/highmem.h>
37 #include <asm/mmu_context.h>
38 #include <linux/interrupt.h>
39 #include <linux/capability.h>
40 #include <linux/completion.h>
41 #include <linux/kernel_stat.h>
42 #include <linux/debug_locks.h>
43 #include <linux/perf_event.h>
44 #include <linux/security.h>
45 #include <linux/notifier.h>
46 #include <linux/profile.h>
47 #include <linux/freezer.h>
48 #include <linux/vmalloc.h>
49 #include <linux/blkdev.h>
50 #include <linux/delay.h>
51 #include <linux/smp.h>
52 #include <linux/threads.h>
53 #include <linux/timer.h>
54 #include <linux/rcupdate.h>
55 #include <linux/cpu.h>
56 #include <linux/cpuset.h>
57 #include <linux/cpumask.h>
58 #include <linux/percpu.h>
59 #include <linux/proc_fs.h>
60 #include <linux/seq_file.h>
61 #include <linux/syscalls.h>
62 #include <linux/times.h>
63 #include <linux/tsacct_kern.h>
64 #include <linux/kprobes.h>
65 #include <linux/delayacct.h>
66 #include <linux/log2.h>
67 #include <linux/bootmem.h>
68 #include <linux/ftrace.h>
69 #include <linux/slab.h>
70 #include <linux/init_task.h>
71 #include <linux/zentune.h>
74 #include <asm/unistd.h>
75 #include <asm/mutex.h>
76 #ifdef CONFIG_PARAVIRT
77 #include <asm/paravirt.h>
81 #include "../workqueue_sched.h"
83 #define CREATE_TRACE_POINTS
84 #include <trace/events/sched.h>
86 #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
87 #define rt_task(p) rt_prio((p)->prio)
88 #define rt_queue(rq) rt_prio((rq)->rq_prio)
89 #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
90 #define is_rt_policy(policy) ((policy) == SCHED_FIFO || \
92 #define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
93 #define idleprio_task(p) unlikely((p)->policy == SCHED_IDLEPRIO)
94 #define iso_task(p) unlikely((p)->policy == SCHED_ISO)
95 #define iso_queue(rq) unlikely((rq)->rq_policy == SCHED_ISO)
96 #define rq_running_iso(rq) ((rq)->rq_prio == ISO_PRIO)
98 #define ISO_PERIOD ((5 * HZ * grq.noc) + 1)
101 * Convert user-nice values [ -20 ... 0 ... 19 ]
102 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
105 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
106 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
107 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
110 * 'User priority' is the nice value converted to something we
111 * can work with better when scaling various scheduler parameters,
112 * it's a [ 0 ... 39 ] range.
114 #define USER_PRIO(p) ((p) - MAX_RT_PRIO)
115 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
116 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
117 #define SCHED_PRIO(p) ((p) + MAX_RT_PRIO)
118 #define STOP_PRIO (MAX_RT_PRIO - 1)
121 * Some helpers for converting to/from various scales. Use shifts to get
122 * approximate multiples of ten for less overhead.
124 #define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
125 #define JIFFY_NS (1000000000 / HZ)
126 #define HALF_JIFFY_NS (1000000000 / HZ / 2)
127 #define HALF_JIFFY_US (1000000 / HZ / 2)
128 #define MS_TO_NS(TIME) ((TIME) << 20)
129 #define MS_TO_US(TIME) ((TIME) << 10)
130 #define NS_TO_MS(TIME) ((TIME) >> 20)
131 #define NS_TO_US(TIME) ((TIME) >> 10)
133 #define RESCHED_US (100) /* Reschedule if less than this many μs left */
135 void print_scheduler_version(void)
137 printk(KERN_INFO
"BFS CPU scheduler v0.420 by Con Kolivas.\n");
141 * This is the time all tasks within the same priority round robin.
142 * Value is in ms and set to a minimum of 6ms. Scales with number of cpus.
143 * Tunable via /proc interface.
145 #if defined(CONFIG_ZEN_DEFAULT)
146 int rr_interval __read_mostly
= 6;
147 #elif defined(CONFIG_ZEN_CUSTOM)
148 int rr_interval __read_mostly
= rr_interval_custom
;
152 * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
153 * are allowed to run five seconds as real time tasks. This is the total over
156 #if defined(CONFIG_ZEN_DEFAULT)
157 int sched_iso_cpu __read_mostly
= 70;
158 #elif defined(CONFIG_ZEN_CUSTOM)
159 int sched_iso_cpu __read_mostly
= sched_iso_cpu_custom
;
163 * The relative length of deadline for each priority(nice) level.
165 static int prio_ratios
[PRIO_RANGE
] __read_mostly
;
168 * The quota handed out to tasks of all priority levels when refilling their
171 static inline int timeslice(void)
173 return MS_TO_US(rr_interval
);
177 * The global runqueue data that all CPUs work off. Data is protected either
178 * by the global grq lock, or the discrete lock that precedes the data in this
183 unsigned long nr_running
;
184 unsigned long nr_uninterruptible
;
185 unsigned long long nr_switches
;
186 struct list_head queue
[PRIO_LIMIT
];
187 DECLARE_BITMAP(prio_bitmap
, PRIO_LIMIT
+ 1);
189 unsigned long qnr
; /* queued not running */
190 cpumask_t cpu_idle_map
;
193 int noc
; /* num_online_cpus stored and updated when it changes */
194 u64 niffies
; /* Nanosecond jiffies */
195 unsigned long last_jiffy
; /* Last jiffy we updated niffies */
197 raw_spinlock_t iso_lock
;
205 * We add the notion of a root-domain which will be used to define per-domain
206 * variables. Each exclusive cpuset essentially defines an island domain by
207 * fully partitioning the member cpus from any other cpuset. Whenever a new
208 * exclusive cpuset is created, we also create and attach a new root-domain
217 cpumask_var_t online
;
220 * The "RT overload" flag: it gets set if a CPU has more than
221 * one runnable RT task.
223 cpumask_var_t rto_mask
;
224 struct cpupri cpupri
;
228 * By default the system creates a single root-domain with all cpus as
229 * members (mimicking the global state we have today).
231 static struct root_domain def_root_domain
;
233 #endif /* CONFIG_SMP */
235 /* There can be only one */
236 static struct global_rq grq
;
239 * This is the main, per-CPU runqueue data structure.
240 * This data should only be modified by the local cpu.
246 unsigned char in_nohz_recently
;
250 struct task_struct
*curr
, *idle
, *stop
;
251 struct mm_struct
*prev_mm
;
253 /* Stored data about rq->curr to work outside grq lock */
255 unsigned int rq_policy
;
259 bool rq_running
; /* There is a task running */
261 /* Accurate timekeeping data */
263 unsigned long user_pc
, nice_pc
, irq_pc
, softirq_pc
, system_pc
,
269 int cpu
; /* cpu of this runqueue */
271 bool scaling
; /* This CPU is managed by a scaling CPU freq governor */
272 struct task_struct
*sticky_task
;
274 struct root_domain
*rd
;
275 struct sched_domain
*sd
;
276 int *cpu_locality
; /* CPU relative cache distance */
277 #ifdef CONFIG_SCHED_SMT
278 bool (*siblings_idle
)(int cpu
);
279 /* See if all smt siblings are idle */
280 cpumask_t smt_siblings
;
282 #ifdef CONFIG_SCHED_MC
283 bool (*cache_idle
)(int cpu
);
284 /* See if all cache siblings are idle */
285 cpumask_t cache_siblings
;
287 u64 last_niffy
; /* Last time this RQ updated grq.niffies */
289 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
292 #ifdef CONFIG_PARAVIRT
295 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
296 u64 prev_steal_time_rq
;
299 u64 clock
, old_clock
, last_tick
;
303 #ifdef CONFIG_SCHEDSTATS
306 struct sched_info rq_sched_info
;
307 unsigned long long rq_cpu_time
;
308 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
310 /* sys_sched_yield() stats */
311 unsigned int yld_count
;
313 /* schedule() stats */
314 unsigned int sched_switch
;
315 unsigned int sched_count
;
316 unsigned int sched_goidle
;
318 /* try_to_wake_up() stats */
319 unsigned int ttwu_count
;
320 unsigned int ttwu_local
;
324 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq
, runqueues
);
325 static DEFINE_MUTEX(sched_hotcpu_mutex
);
329 * sched_domains_mutex serialises calls to init_sched_domains,
330 * detach_destroy_domains and partition_sched_domains.
332 static DEFINE_MUTEX(sched_domains_mutex
);
335 * By default the system creates a single root-domain with all cpus as
336 * members (mimicking the global state we have today).
338 static struct root_domain def_root_domain
;
340 int __weak
arch_sd_sibling_asym_packing(void)
342 return 0*SD_ASYM_PACKING
;
346 #define rcu_dereference_check_sched_domain(p) \
347 rcu_dereference_check((p), \
348 lockdep_is_held(&sched_domains_mutex))
351 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
352 * See detach_destroy_domains: synchronize_sched for details.
354 * The domain tree of any CPU may only be accessed from within
355 * preempt-disabled sections.
357 #define for_each_domain(cpu, __sd) \
358 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
360 static inline void update_rq_clock(struct rq
*rq
);
363 * Sanity check should sched_clock return bogus values. We make sure it does
364 * not appear to go backwards, and use jiffies to determine the maximum and
365 * minimum it could possibly have increased, and round down to the nearest
366 * jiffy when it falls outside this.
368 static inline void niffy_diff(s64
*niff_diff
, int jiff_diff
)
370 unsigned long min_diff
, max_diff
;
373 min_diff
= JIFFIES_TO_NS(jiff_diff
- 1);
376 /* Round up to the nearest tick for maximum */
377 max_diff
= JIFFIES_TO_NS(jiff_diff
+ 1);
379 if (unlikely(*niff_diff
< min_diff
|| *niff_diff
> max_diff
))
380 *niff_diff
= min_diff
;
384 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
385 #define this_rq() (&__get_cpu_var(runqueues))
386 #define task_rq(p) cpu_rq(task_cpu(p))
387 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
388 static inline int cpu_of(struct rq
*rq
)
394 * Niffies are a globally increasing nanosecond counter. Whenever a runqueue
395 * clock is updated with the grq.lock held, it is an opportunity to update the
396 * niffies value. Any CPU can update it by adding how much its clock has
397 * increased since it last updated niffies, minus any added niffies by other
400 static inline void update_clocks(struct rq
*rq
)
406 ndiff
= rq
->clock
- rq
->old_clock
;
407 /* old_clock is only updated when we are updating niffies */
408 rq
->old_clock
= rq
->clock
;
409 ndiff
-= grq
.niffies
- rq
->last_niffy
;
410 jdiff
= jiffies
- grq
.last_jiffy
;
411 niffy_diff(&ndiff
, jdiff
);
412 grq
.last_jiffy
+= jdiff
;
413 grq
.niffies
+= ndiff
;
414 rq
->last_niffy
= grq
.niffies
;
416 #else /* CONFIG_SMP */
417 static struct rq
*uprq
;
418 #define cpu_rq(cpu) (uprq)
419 #define this_rq() (uprq)
420 #define task_rq(p) (uprq)
421 #define cpu_curr(cpu) ((uprq)->curr)
422 static inline int cpu_of(struct rq
*rq
)
427 static inline void update_clocks(struct rq
*rq
)
433 ndiff
= rq
->clock
- rq
->old_clock
;
434 rq
->old_clock
= rq
->clock
;
435 jdiff
= jiffies
- grq
.last_jiffy
;
436 niffy_diff(&ndiff
, jdiff
);
437 grq
.last_jiffy
+= jdiff
;
438 grq
.niffies
+= ndiff
;
441 #define raw_rq() (&__raw_get_cpu_var(runqueues))
445 #ifndef prepare_arch_switch
446 # define prepare_arch_switch(next) do { } while (0)
448 #ifndef finish_arch_switch
449 # define finish_arch_switch(prev) do { } while (0)
453 * All common locking functions performed on grq.lock. rq->clock is local to
454 * the CPU accessing it so it can be modified just with interrupts disabled
455 * when we're not updating niffies.
456 * Looking up task_rq must be done under grq.lock to be safe.
458 static void update_rq_clock_task(struct rq
*rq
, s64 delta
);
460 static inline void update_rq_clock(struct rq
*rq
)
462 s64 delta
= sched_clock_cpu(cpu_of(rq
)) - rq
->clock
;
465 update_rq_clock_task(rq
, delta
);
468 static inline bool task_running(struct task_struct
*p
)
473 static inline void grq_lock(void)
476 raw_spin_lock(&grq
.lock
);
479 static inline void grq_unlock(void)
482 raw_spin_unlock(&grq
.lock
);
485 static inline void grq_lock_irq(void)
488 raw_spin_lock_irq(&grq
.lock
);
491 static inline void time_lock_grq(struct rq
*rq
)
498 static inline void grq_unlock_irq(void)
501 raw_spin_unlock_irq(&grq
.lock
);
504 static inline void grq_lock_irqsave(unsigned long *flags
)
507 raw_spin_lock_irqsave(&grq
.lock
, *flags
);
510 static inline void grq_unlock_irqrestore(unsigned long *flags
)
513 raw_spin_unlock_irqrestore(&grq
.lock
, *flags
);
516 static inline struct rq
517 *task_grq_lock(struct task_struct
*p
, unsigned long *flags
)
520 grq_lock_irqsave(flags
);
524 static inline struct rq
525 *time_task_grq_lock(struct task_struct
*p
, unsigned long *flags
)
528 struct rq
*rq
= task_grq_lock(p
, flags
);
533 static inline struct rq
*task_grq_lock_irq(struct task_struct
*p
)
540 static inline void time_task_grq_lock_irq(struct task_struct
*p
)
543 struct rq
*rq
= task_grq_lock_irq(p
);
547 static inline void task_grq_unlock_irq(void)
553 static inline void task_grq_unlock(unsigned long *flags
)
556 grq_unlock_irqrestore(flags
);
560 * grunqueue_is_locked
562 * Returns true if the global runqueue is locked.
563 * This interface allows printk to be called with the runqueue lock
564 * held and know whether or not it is OK to wake up the klogd.
566 bool grunqueue_is_locked(void)
568 return raw_spin_is_locked(&grq
.lock
);
571 void grq_unlock_wait(void)
574 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
575 raw_spin_unlock_wait(&grq
.lock
);
578 static inline void time_grq_lock(struct rq
*rq
, unsigned long *flags
)
581 local_irq_save(*flags
);
585 static inline struct rq
*__task_grq_lock(struct task_struct
*p
)
592 static inline void __task_grq_unlock(void)
599 * Look for any tasks *anywhere* that are running nice 0 or better. We do
600 * this lockless for overhead reasons since the occasional wrong result
603 bool above_background_load(void)
607 for_each_online_cpu(cpu
) {
608 struct task_struct
*cpu_curr
= cpu_rq(cpu
)->curr
;
610 if (unlikely(!cpu_curr
))
612 if (PRIO_TO_NICE(cpu_curr
->static_prio
) < 1) {
619 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
620 static inline void prepare_lock_switch(struct rq
*rq
, struct task_struct
*next
)
624 static inline void finish_lock_switch(struct rq
*rq
, struct task_struct
*prev
)
626 #ifdef CONFIG_DEBUG_SPINLOCK
627 /* this is a valid case when another task releases the spinlock */
628 grq
.lock
.owner
= current
;
631 * If we are tracking spinlock dependencies then we have to
632 * fix up the runqueue lock - which gets 'carried over' from
635 spin_acquire(&grq
.lock
.dep_map
, 0, 0, _THIS_IP_
);
640 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
642 static inline void prepare_lock_switch(struct rq
*rq
, struct task_struct
*next
)
644 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
651 static inline void finish_lock_switch(struct rq
*rq
, struct task_struct
*prev
)
654 #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
658 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
660 static inline bool deadline_before(u64 deadline
, u64 time
)
662 return (deadline
< time
);
665 static inline bool deadline_after(u64 deadline
, u64 time
)
667 return (deadline
> time
);
671 * A task that is queued but not running will be on the grq run list.
672 * A task that is not running or queued will not be on the grq run list.
673 * A task that is currently running will have ->on_cpu set but not on the
676 static inline bool task_queued(struct task_struct
*p
)
678 return (!list_empty(&p
->run_list
));
682 * Removing from the global runqueue. Enter with grq locked.
684 static void dequeue_task(struct task_struct
*p
)
686 list_del_init(&p
->run_list
);
687 if (list_empty(grq
.queue
+ p
->prio
))
688 __clear_bit(p
->prio
, grq
.prio_bitmap
);
692 * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as
693 * an idle task, we ensure none of the following conditions are met.
695 static bool idleprio_suitable(struct task_struct
*p
)
697 return (!freezing(p
) && !signal_pending(p
) &&
698 !(task_contributes_to_load(p
)) && !(p
->flags
& (PF_EXITING
)));
702 * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check
703 * that the iso_refractory flag is not set.
705 static bool isoprio_suitable(void)
707 return !grq
.iso_refractory
;
711 * Adding to the global runqueue. Enter with grq locked.
713 static void enqueue_task(struct task_struct
*p
)
716 /* Check it hasn't gotten rt from PI */
717 if ((idleprio_task(p
) && idleprio_suitable(p
)) ||
718 (iso_task(p
) && isoprio_suitable()))
719 p
->prio
= p
->normal_prio
;
721 p
->prio
= NORMAL_PRIO
;
723 __set_bit(p
->prio
, grq
.prio_bitmap
);
724 list_add_tail(&p
->run_list
, grq
.queue
+ p
->prio
);
725 sched_info_queued(p
);
728 /* Only idle task does this as a real time task*/
729 static inline void enqueue_task_head(struct task_struct
*p
)
731 __set_bit(p
->prio
, grq
.prio_bitmap
);
732 list_add(&p
->run_list
, grq
.queue
+ p
->prio
);
733 sched_info_queued(p
);
736 static inline void requeue_task(struct task_struct
*p
)
738 sched_info_queued(p
);
742 * Returns the relative length of deadline all compared to the shortest
743 * deadline which is that of nice -20.
745 static inline int task_prio_ratio(struct task_struct
*p
)
747 return prio_ratios
[TASK_USER_PRIO(p
)];
751 * task_timeslice - all tasks of all priorities get the exact same timeslice
752 * length. CPU distribution is handled by giving different deadlines to
753 * tasks of different priorities. Use 128 as the base value for fast shifts.
755 static inline int task_timeslice(struct task_struct
*p
)
757 return (rr_interval
* task_prio_ratio(p
) / 128);
762 * qnr is the "queued but not running" count which is the total number of
763 * tasks on the global runqueue list waiting for cpu time but not actually
764 * currently running on a cpu.
766 static inline void inc_qnr(void)
771 static inline void dec_qnr(void)
776 static inline int queued_notrunning(void)
782 * The cpu_idle_map stores a bitmap of all the CPUs currently idle to
783 * allow easy lookup of whether any suitable idle CPUs are available.
784 * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the
785 * idle_cpus variable than to do a full bitmask check when we are busy.
787 static inline void set_cpuidle_map(int cpu
)
789 if (likely(cpu_online(cpu
))) {
790 cpu_set(cpu
, grq
.cpu_idle_map
);
791 grq
.idle_cpus
= true;
795 static inline void clear_cpuidle_map(int cpu
)
797 cpu_clear(cpu
, grq
.cpu_idle_map
);
798 if (cpus_empty(grq
.cpu_idle_map
))
799 grq
.idle_cpus
= false;
802 static bool suitable_idle_cpus(struct task_struct
*p
)
806 return (cpus_intersects(p
->cpus_allowed
, grq
.cpu_idle_map
));
809 #define CPUIDLE_DIFF_THREAD (1)
810 #define CPUIDLE_DIFF_CORE (2)
811 #define CPUIDLE_CACHE_BUSY (4)
812 #define CPUIDLE_DIFF_CPU (8)
813 #define CPUIDLE_THREAD_BUSY (16)
814 #define CPUIDLE_DIFF_NODE (32)
816 static void resched_task(struct task_struct
*p
);
819 * The best idle CPU is chosen according to the CPUIDLE ranking above where the
820 * lowest value would give the most suitable CPU to schedule p onto next. The
821 * order works out to be the following:
823 * Same core, idle or busy cache, idle or busy threads
824 * Other core, same cache, idle or busy cache, idle threads.
825 * Same node, other CPU, idle cache, idle threads.
826 * Same node, other CPU, busy cache, idle threads.
827 * Other core, same cache, busy threads.
828 * Same node, other CPU, busy threads.
829 * Other node, other CPU, idle cache, idle threads.
830 * Other node, other CPU, busy cache, idle threads.
831 * Other node, other CPU, busy threads.
834 resched_best_mask(int best_cpu
, struct rq
*rq
, cpumask_t
*tmpmask
)
836 unsigned int best_ranking
= CPUIDLE_DIFF_NODE
| CPUIDLE_THREAD_BUSY
|
837 CPUIDLE_DIFF_CPU
| CPUIDLE_CACHE_BUSY
| CPUIDLE_DIFF_CORE
|
841 if (cpu_isset(best_cpu
, *tmpmask
))
844 for_each_cpu_mask(cpu_tmp
, *tmpmask
) {
845 unsigned int ranking
;
849 tmp_rq
= cpu_rq(cpu_tmp
);
852 if (rq
->cpu_locality
[cpu_tmp
] > 3)
853 ranking
|= CPUIDLE_DIFF_NODE
;
856 if (rq
->cpu_locality
[cpu_tmp
] > 2)
857 ranking
|= CPUIDLE_DIFF_CPU
;
858 #ifdef CONFIG_SCHED_MC
859 if (rq
->cpu_locality
[cpu_tmp
] == 2)
860 ranking
|= CPUIDLE_DIFF_CORE
;
861 if (!(tmp_rq
->cache_idle(cpu_tmp
)))
862 ranking
|= CPUIDLE_CACHE_BUSY
;
864 #ifdef CONFIG_SCHED_SMT
865 if (rq
->cpu_locality
[cpu_tmp
] == 1)
866 ranking
|= CPUIDLE_DIFF_THREAD
;
867 if (!(tmp_rq
->siblings_idle(cpu_tmp
)))
868 ranking
|= CPUIDLE_THREAD_BUSY
;
870 if (ranking
< best_ranking
) {
872 best_ranking
= ranking
;
876 resched_task(cpu_rq(best_cpu
)->curr
);
879 static void resched_best_idle(struct task_struct
*p
)
883 cpus_and(tmpmask
, p
->cpus_allowed
, grq
.cpu_idle_map
);
884 resched_best_mask(task_cpu(p
), task_rq(p
), &tmpmask
);
887 static inline void resched_suitable_idle(struct task_struct
*p
)
889 if (suitable_idle_cpus(p
))
890 resched_best_idle(p
);
893 * Flags to tell us whether this CPU is running a CPU frequency governor that
894 * has slowed its speed or not. No locking required as the very rare wrongly
895 * read value would be harmless.
897 void cpu_scaling(int cpu
)
899 cpu_rq(cpu
)->scaling
= true;
902 void cpu_nonscaling(int cpu
)
904 cpu_rq(cpu
)->scaling
= false;
907 static inline bool scaling_rq(struct rq
*rq
)
912 static inline int locality_diff(struct task_struct
*p
, struct rq
*rq
)
914 return rq
->cpu_locality
[task_cpu(p
)];
916 #else /* CONFIG_SMP */
917 static inline void inc_qnr(void)
921 static inline void dec_qnr(void)
925 static inline int queued_notrunning(void)
927 return grq
.nr_running
;
930 static inline void set_cpuidle_map(int cpu
)
934 static inline void clear_cpuidle_map(int cpu
)
938 static inline bool suitable_idle_cpus(struct task_struct
*p
)
940 return uprq
->curr
== uprq
->idle
;
943 static inline void resched_suitable_idle(struct task_struct
*p
)
947 void cpu_scaling(int __unused
)
951 void cpu_nonscaling(int __unused
)
956 * Although CPUs can scale in UP, there is nowhere else for tasks to go so this
959 static inline bool scaling_rq(struct rq
*rq
)
964 static inline int locality_diff(struct task_struct
*p
, struct rq
*rq
)
968 #endif /* CONFIG_SMP */
969 EXPORT_SYMBOL_GPL(cpu_scaling
);
970 EXPORT_SYMBOL_GPL(cpu_nonscaling
);
973 * activate_idle_task - move idle task to the _front_ of runqueue.
975 static inline void activate_idle_task(struct task_struct
*p
)
977 enqueue_task_head(p
);
982 static inline int normal_prio(struct task_struct
*p
)
984 if (has_rt_policy(p
))
985 return MAX_RT_PRIO
- 1 - p
->rt_priority
;
986 if (idleprio_task(p
))
994 * Calculate the current priority, i.e. the priority
995 * taken into account by the scheduler. This value might
996 * be boosted by RT tasks as it will be RT if the task got
997 * RT-boosted. If not then it returns p->normal_prio.
999 static int effective_prio(struct task_struct
*p
)
1001 p
->normal_prio
= normal_prio(p
);
1003 * If we are RT tasks or we were boosted to RT priority,
1004 * keep the priority unchanged. Otherwise, update priority
1005 * to the normal priority:
1007 if (!rt_prio(p
->prio
))
1008 return p
->normal_prio
;
1013 * activate_task - move a task to the runqueue. Enter with grq locked.
1015 static void activate_task(struct task_struct
*p
, struct rq
*rq
)
1020 * Sleep time is in units of nanosecs, so shift by 20 to get a
1021 * milliseconds-range estimation of the amount of time that the task
1024 if (unlikely(prof_on
== SLEEP_PROFILING
)) {
1025 if (p
->state
== TASK_UNINTERRUPTIBLE
)
1026 profile_hits(SLEEP_PROFILING
, (void *)get_wchan(p
),
1027 (rq
->clock
- p
->last_ran
) >> 20);
1030 p
->prio
= effective_prio(p
);
1031 if (task_contributes_to_load(p
))
1032 grq
.nr_uninterruptible
--;
1038 static inline void clear_sticky(struct task_struct
*p
);
1041 * deactivate_task - If it's running, it's not on the grq and we can just
1042 * decrement the nr_running. Enter with grq locked.
1044 static inline void deactivate_task(struct task_struct
*p
)
1046 if (task_contributes_to_load(p
))
1047 grq
.nr_uninterruptible
++;
1053 void set_task_cpu(struct task_struct
*p
, unsigned int cpu
)
1055 #ifdef CONFIG_LOCKDEP
1057 * The caller should hold grq lock.
1059 WARN_ON_ONCE(debug_locks
&& !lockdep_is_held(&grq
.lock
));
1061 trace_sched_migrate_task(p
, cpu
);
1062 if (task_cpu(p
) != cpu
)
1063 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS
, 1, NULL
, 0);
1066 * After ->cpu is set up to a new value, task_grq_lock(p, ...) can be
1067 * successfully executed on another CPU. We must ensure that updates of
1068 * per-task data have been completed by this moment.
1071 task_thread_info(p
)->cpu
= cpu
;
1074 static inline void clear_sticky(struct task_struct
*p
)
1079 static inline bool task_sticky(struct task_struct
*p
)
1084 /* Reschedule the best idle CPU that is not this one. */
1086 resched_closest_idle(struct rq
*rq
, int cpu
, struct task_struct
*p
)
1090 cpus_and(tmpmask
, p
->cpus_allowed
, grq
.cpu_idle_map
);
1091 cpu_clear(cpu
, tmpmask
);
1092 if (cpus_empty(tmpmask
))
1094 resched_best_mask(cpu
, rq
, &tmpmask
);
1098 * We set the sticky flag on a task that is descheduled involuntarily meaning
1099 * it is awaiting further CPU time. If the last sticky task is still sticky
1100 * but unlucky enough to not be the next task scheduled, we unstick it and try
1101 * to find it an idle CPU. Realtime tasks do not stick to minimise their
1102 * latency at all times.
1105 swap_sticky(struct rq
*rq
, int cpu
, struct task_struct
*p
)
1107 if (rq
->sticky_task
) {
1108 if (rq
->sticky_task
== p
) {
1112 if (task_sticky(rq
->sticky_task
)) {
1113 clear_sticky(rq
->sticky_task
);
1114 resched_closest_idle(rq
, cpu
, rq
->sticky_task
);
1119 rq
->sticky_task
= p
;
1121 resched_closest_idle(rq
, cpu
, p
);
1122 rq
->sticky_task
= NULL
;
1126 static inline void unstick_task(struct rq
*rq
, struct task_struct
*p
)
1128 rq
->sticky_task
= NULL
;
1132 static inline void clear_sticky(struct task_struct
*p
)
1136 static inline bool task_sticky(struct task_struct
*p
)
1142 swap_sticky(struct rq
*rq
, int cpu
, struct task_struct
*p
)
1146 static inline void unstick_task(struct rq
*rq
, struct task_struct
*p
)
1152 * Move a task off the global queue and take it to a cpu for it will
1153 * become the running task.
1155 static inline void take_task(int cpu
, struct task_struct
*p
)
1157 set_task_cpu(p
, cpu
);
1164 * Returns a descheduling task to the grq runqueue unless it is being
1167 static inline void return_task(struct task_struct
*p
, bool deactivate
)
1178 * resched_task - mark a task 'to be rescheduled now'.
1180 * On UP this means the setting of the need_resched flag, on SMP it
1181 * might also involve a cross-CPU call to trigger the scheduler on
1186 #ifndef tsk_is_polling
1187 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1190 static void resched_task(struct task_struct
*p
)
1194 assert_raw_spin_locked(&grq
.lock
);
1196 if (unlikely(test_tsk_thread_flag(p
, TIF_NEED_RESCHED
)))
1199 set_tsk_thread_flag(p
, TIF_NEED_RESCHED
);
1202 if (cpu
== smp_processor_id())
1205 /* NEED_RESCHED must be visible before we test polling */
1207 if (!tsk_is_polling(p
))
1208 smp_send_reschedule(cpu
);
1212 static inline void resched_task(struct task_struct
*p
)
1214 assert_raw_spin_locked(&grq
.lock
);
1215 set_tsk_need_resched(p
);
1220 * task_curr - is this task currently executing on a CPU?
1221 * @p: the task in question.
1223 inline int task_curr(const struct task_struct
*p
)
1225 return cpu_curr(task_cpu(p
)) == p
;
1229 struct migration_req
{
1230 struct task_struct
*task
;
1235 * wait_task_inactive - wait for a thread to unschedule.
1237 * If @match_state is nonzero, it's the @p->state value just checked and
1238 * not expected to change. If it changes, i.e. @p might have woken up,
1239 * then return zero. When we succeed in waiting for @p to be off its CPU,
1240 * we return a positive number (its total switch count). If a second call
1241 * a short while later returns the same number, the caller can be sure that
1242 * @p has remained unscheduled the whole time.
1244 * The caller must ensure that the task *will* unschedule sometime soon,
1245 * else this function might spin for a *long* time. This function can't
1246 * be called with interrupts off, or it may introduce deadlock with
1247 * smp_call_function() if an IPI is sent by the same process we are
1248 * waiting to become inactive.
1250 unsigned long wait_task_inactive(struct task_struct
*p
, long match_state
)
1252 unsigned long flags
;
1253 bool running
, on_rq
;
1259 * We do the initial early heuristics without holding
1260 * any task-queue locks at all. We'll only try to get
1261 * the runqueue lock when things look like they will
1262 * work out! In the unlikely event rq is dereferenced
1263 * since we're lockless, grab it again.
1270 #else /* CONFIG_SMP */
1274 * If the task is actively running on another CPU
1275 * still, just relax and busy-wait without holding
1278 * NOTE! Since we don't hold any locks, it's not
1279 * even sure that "rq" stays as the right runqueue!
1280 * But we don't care, since this will return false
1281 * if the runqueue has changed and p is actually now
1282 * running somewhere else!
1284 while (task_running(p
) && p
== rq
->curr
) {
1285 if (match_state
&& unlikely(p
->state
!= match_state
))
1291 * Ok, time to look more closely! We need the grq
1292 * lock now, to be *sure*. If we're wrong, we'll
1293 * just go back and repeat.
1295 rq
= task_grq_lock(p
, &flags
);
1296 trace_sched_wait_task(p
);
1297 running
= task_running(p
);
1298 on_rq
= task_queued(p
);
1300 if (!match_state
|| p
->state
== match_state
)
1301 ncsw
= p
->nvcsw
| LONG_MIN
; /* sets MSB */
1302 task_grq_unlock(&flags
);
1305 * If it changed from the expected state, bail out now.
1307 if (unlikely(!ncsw
))
1311 * Was it really running after all now that we
1312 * checked with the proper locks actually held?
1314 * Oops. Go back and try again..
1316 if (unlikely(running
)) {
1322 * It's not enough that it's not actively running,
1323 * it must be off the runqueue _entirely_, and not
1326 * So if it was still runnable (but just not actively
1327 * running right now), it's preempted, and we should
1328 * yield - it could be a while.
1330 if (unlikely(on_rq
)) {
1331 ktime_t to
= ktime_set(0, NSEC_PER_SEC
/ HZ
);
1333 set_current_state(TASK_UNINTERRUPTIBLE
);
1334 schedule_hrtimeout(&to
, HRTIMER_MODE_REL
);
1339 * Ahh, all good. It wasn't running, and it wasn't
1340 * runnable, which means that it will never become
1341 * running in the future either. We're all done!
1350 * kick_process - kick a running thread to enter/exit the kernel
1351 * @p: the to-be-kicked thread
1353 * Cause a process which is running on another CPU to enter
1354 * kernel-mode, without any delay. (to get signals handled.)
1356 * NOTE: this function doesn't have to take the runqueue lock,
1357 * because all it wants to ensure is that the remote task enters
1358 * the kernel. If the IPI races and the task has been migrated
1359 * to another CPU then no harm is done and the purpose has been
1362 void kick_process(struct task_struct
*p
)
1368 if ((cpu
!= smp_processor_id()) && task_curr(p
))
1369 smp_send_reschedule(cpu
);
1372 EXPORT_SYMBOL_GPL(kick_process
);
1375 #define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
1378 * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
1379 * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or
1380 * between themselves, they cooperatively multitask. An idle rq scores as
1381 * prio PRIO_LIMIT so it is always preempted.
1384 can_preempt(struct task_struct
*p
, int prio
, u64 deadline
)
1386 /* Better static priority RT task or better policy preemption */
1391 /* SCHED_NORMAL, BATCH and ISO will preempt based on deadline */
1392 if (!deadline_before(p
->deadline
, deadline
))
1398 #ifdef CONFIG_HOTPLUG_CPU
1400 * Check to see if there is a task that is affined only to offline CPUs but
1401 * still wants runtime. This happens to kernel threads during suspend/halt and
1402 * disabling of CPUs.
1404 static inline bool online_cpus(struct task_struct
*p
)
1406 return (likely(cpus_intersects(cpu_online_map
, p
->cpus_allowed
)));
1408 #else /* CONFIG_HOTPLUG_CPU */
1409 /* All available CPUs are always online without hotplug. */
1410 static inline bool online_cpus(struct task_struct
*p
)
1417 * Check to see if p can run on cpu, and if not, whether there are any online
1418 * CPUs it can run on instead.
1420 static inline bool needs_other_cpu(struct task_struct
*p
, int cpu
)
1422 if (unlikely(!cpu_isset(cpu
, p
->cpus_allowed
)))
1428 * When all else is equal, still prefer this_rq.
1430 static void try_preempt(struct task_struct
*p
, struct rq
*this_rq
)
1432 struct rq
*highest_prio_rq
= NULL
;
1433 int cpu
, highest_prio
;
1434 u64 latest_deadline
;
1438 * We clear the sticky flag here because for a task to have called
1439 * try_preempt with the sticky flag enabled means some complicated
1440 * re-scheduling has occurred and we should ignore the sticky flag.
1444 if (suitable_idle_cpus(p
)) {
1445 resched_best_idle(p
);
1449 /* IDLEPRIO tasks never preempt anything but idle */
1450 if (p
->policy
== SCHED_IDLEPRIO
)
1453 if (likely(online_cpus(p
)))
1454 cpus_and(tmp
, cpu_online_map
, p
->cpus_allowed
);
1458 highest_prio
= latest_deadline
= 0;
1460 for_each_cpu_mask(cpu
, tmp
) {
1465 rq_prio
= rq
->rq_prio
;
1466 if (rq_prio
< highest_prio
)
1469 if (rq_prio
> highest_prio
||
1470 deadline_after(rq
->rq_deadline
, latest_deadline
)) {
1471 latest_deadline
= rq
->rq_deadline
;
1472 highest_prio
= rq_prio
;
1473 highest_prio_rq
= rq
;
1477 if (likely(highest_prio_rq
)) {
1478 if (can_preempt(p
, highest_prio
, highest_prio_rq
->rq_deadline
))
1479 resched_task(highest_prio_rq
->curr
);
1482 #else /* CONFIG_SMP */
1483 static inline bool needs_other_cpu(struct task_struct
*p
, int cpu
)
1488 static void try_preempt(struct task_struct
*p
, struct rq
*this_rq
)
1490 if (p
->policy
== SCHED_IDLEPRIO
)
1492 if (can_preempt(p
, uprq
->rq_prio
, uprq
->rq_deadline
))
1493 resched_task(uprq
->curr
);
1495 #endif /* CONFIG_SMP */
1498 ttwu_stat(struct task_struct
*p
, int cpu
, int wake_flags
)
1500 #ifdef CONFIG_SCHEDSTATS
1501 struct rq
*rq
= this_rq();
1504 int this_cpu
= smp_processor_id();
1506 if (cpu
== this_cpu
)
1507 schedstat_inc(rq
, ttwu_local
);
1509 struct sched_domain
*sd
;
1512 for_each_domain(this_cpu
, sd
) {
1513 if (cpumask_test_cpu(cpu
, sched_domain_span(sd
))) {
1514 schedstat_inc(sd
, ttwu_wake_remote
);
1521 #endif /* CONFIG_SMP */
1523 schedstat_inc(rq
, ttwu_count
);
1524 #endif /* CONFIG_SCHEDSTATS */
1527 static inline void ttwu_activate(struct task_struct
*p
, struct rq
*rq
,
1530 activate_task(p
, rq
);
1533 * Sync wakeups (i.e. those types of wakeups where the waker
1534 * has indicated that it will leave the CPU in short order)
1535 * don't trigger a preemption if there are no idle cpus,
1536 * instead waiting for current to deschedule.
1538 if (!is_sync
|| suitable_idle_cpus(p
))
1542 static inline void ttwu_post_activation(struct task_struct
*p
, struct rq
*rq
,
1545 trace_sched_wakeup(p
, success
);
1546 p
->state
= TASK_RUNNING
;
1549 * if a worker is waking up, notify workqueue. Note that on BFS, we
1550 * don't really know what cpu it will be, so we fake it for
1551 * wq_worker_waking_up :/
1553 if ((p
->flags
& PF_WQ_WORKER
) && success
)
1554 wq_worker_waking_up(p
, cpu_of(rq
));
1558 void scheduler_ipi(void)
1561 #endif /* CONFIG_SMP */
1564 * try_to_wake_up - wake up a thread
1565 * @p: the thread to be awakened
1566 * @state: the mask of task states that can be woken
1567 * @wake_flags: wake modifier flags (WF_*)
1569 * Put it on the run-queue if it's not already there. The "current"
1570 * thread is always on the run-queue (except when the actual
1571 * re-schedule is in progress), and as such you're allowed to do
1572 * the simpler "current->state = TASK_RUNNING" to mark yourself
1573 * runnable without the overhead of this.
1575 * Returns %true if @p was woken up, %false if it was already running
1576 * or @state didn't match @p's state.
1578 static bool try_to_wake_up(struct task_struct
*p
, unsigned int state
,
1581 bool success
= false;
1582 unsigned long flags
;
1588 /* This barrier is undocumented, probably for p->state? くそ */
1592 * No need to do time_lock_grq as we only need to update the rq clock
1593 * if we activate the task
1595 rq
= task_grq_lock(p
, &flags
);
1598 /* state is a volatile long, どうして、分からない */
1599 if (!((unsigned int)p
->state
& state
))
1602 if (task_queued(p
) || task_running(p
))
1605 ttwu_activate(p
, rq
, wake_flags
& WF_SYNC
);
1609 ttwu_post_activation(p
, rq
, success
);
1611 task_grq_unlock(&flags
);
1613 ttwu_stat(p
, cpu
, wake_flags
);
1621 * try_to_wake_up_local - try to wake up a local task with grq lock held
1622 * @p: the thread to be awakened
1624 * Put @p on the run-queue if it's not already there. The caller must
1625 * ensure that grq is locked and, @p is not the current task.
1626 * grq stays locked over invocation.
1628 static void try_to_wake_up_local(struct task_struct
*p
)
1630 struct rq
*rq
= task_rq(p
);
1631 bool success
= false;
1633 lockdep_assert_held(&grq
.lock
);
1635 if (!(p
->state
& TASK_NORMAL
))
1638 if (!task_queued(p
)) {
1639 if (likely(!task_running(p
))) {
1640 schedstat_inc(rq
, ttwu_count
);
1641 schedstat_inc(rq
, ttwu_local
);
1643 ttwu_activate(p
, rq
, false);
1644 ttwu_stat(p
, smp_processor_id(), 0);
1647 ttwu_post_activation(p
, rq
, success
);
1651 * wake_up_process - Wake up a specific process
1652 * @p: The process to be woken up.
1654 * Attempt to wake up the nominated process and move it to the set of runnable
1655 * processes. Returns 1 if the process was woken up, 0 if it was already
1658 * It may be assumed that this function implies a write memory barrier before
1659 * changing the task state if and only if any tasks are woken up.
1661 int wake_up_process(struct task_struct
*p
)
1663 return try_to_wake_up(p
, TASK_ALL
, 0);
1665 EXPORT_SYMBOL(wake_up_process
);
1667 int wake_up_state(struct task_struct
*p
, unsigned int state
)
1669 return try_to_wake_up(p
, state
, 0);
1672 static void time_slice_expired(struct task_struct
*p
);
1675 * Perform scheduler related setup for a newly forked process p.
1676 * p is forked by current.
1678 void sched_fork(struct task_struct
*p
)
1680 struct task_struct
*curr
;
1681 int cpu
= get_cpu();
1684 #ifdef CONFIG_PREEMPT_NOTIFIERS
1685 INIT_HLIST_HEAD(&p
->preempt_notifiers
);
1688 * We mark the process as running here. This guarantees that
1689 * nobody will actually run it, and a signal or other external
1690 * event cannot wake it up and insert it on the runqueue either.
1692 p
->state
= TASK_RUNNING
;
1693 set_task_cpu(p
, cpu
);
1695 /* Should be reset in fork.c but done here for ease of bfs patching */
1696 p
->sched_time
= p
->stime_pc
= p
->utime_pc
= 0;
1699 * Revert to default priority/policy on fork if requested.
1701 if (unlikely(p
->sched_reset_on_fork
)) {
1702 if (p
->policy
== SCHED_FIFO
|| p
->policy
== SCHED_RR
) {
1703 p
->policy
= SCHED_NORMAL
;
1704 p
->normal_prio
= normal_prio(p
);
1707 if (PRIO_TO_NICE(p
->static_prio
) < 0) {
1708 p
->static_prio
= NICE_TO_PRIO(0);
1709 p
->normal_prio
= p
->static_prio
;
1713 * We don't need the reset flag anymore after the fork. It has
1714 * fulfilled its duty:
1716 p
->sched_reset_on_fork
= 0;
1721 * Make sure we do not leak PI boosting priority to the child.
1723 p
->prio
= curr
->normal_prio
;
1725 INIT_LIST_HEAD(&p
->run_list
);
1726 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1727 if (unlikely(sched_info_on()))
1728 memset(&p
->sched_info
, 0, sizeof(p
->sched_info
));
1734 #ifdef CONFIG_PREEMPT_COUNT
1735 /* Want to start with kernel preemption disabled. */
1736 task_thread_info(p
)->preempt_count
= 1;
1738 if (unlikely(p
->policy
== SCHED_FIFO
))
1741 * Share the timeslice between parent and child, thus the
1742 * total amount of pending timeslices in the system doesn't change,
1743 * resulting in more scheduling fairness. If it's negative, it won't
1744 * matter since that's the same as being 0. current's time_slice is
1745 * actually in rq_time_slice when it's running, as is its last_ran
1746 * value. rq->rq_deadline is only modified within schedule() so it
1747 * is always equal to current->deadline.
1749 rq
= task_grq_lock_irq(curr
);
1750 if (likely(rq
->rq_time_slice
>= RESCHED_US
* 2)) {
1751 rq
->rq_time_slice
/= 2;
1752 p
->time_slice
= rq
->rq_time_slice
;
1755 * Forking task has run out of timeslice. Reschedule it and
1756 * start its child with a new time slice and deadline. The
1757 * child will end up running first because its deadline will
1758 * be slightly earlier.
1760 rq
->rq_time_slice
= 0;
1761 set_tsk_need_resched(curr
);
1762 time_slice_expired(p
);
1764 p
->last_ran
= rq
->rq_last_ran
;
1765 task_grq_unlock_irq();
1771 * wake_up_new_task - wake up a newly created task for the first time.
1773 * This function will do some initial scheduler statistics housekeeping
1774 * that must be done for every newly created context, then puts the task
1775 * on the runqueue and wakes it.
1777 void wake_up_new_task(struct task_struct
*p
)
1779 struct task_struct
*parent
;
1780 unsigned long flags
;
1783 rq
= task_grq_lock(p
, &flags
);
1784 p
->state
= TASK_RUNNING
;
1786 /* Unnecessary but small chance that the parent changed CPU */
1787 set_task_cpu(p
, task_cpu(parent
));
1788 activate_task(p
, rq
);
1789 trace_sched_wakeup_new(p
, 1);
1790 if (rq
->curr
== parent
&& !suitable_idle_cpus(p
)) {
1792 * The VM isn't cloned, so we're in a good position to
1793 * do child-runs-first in anticipation of an exec. This
1794 * usually avoids a lot of COW overhead.
1796 resched_task(parent
);
1799 task_grq_unlock(&flags
);
1802 #ifdef CONFIG_PREEMPT_NOTIFIERS
1805 * preempt_notifier_register - tell me when current is being preempted & rescheduled
1806 * @notifier: notifier struct to register
1808 void preempt_notifier_register(struct preempt_notifier
*notifier
)
1810 hlist_add_head(¬ifier
->link
, ¤t
->preempt_notifiers
);
1812 EXPORT_SYMBOL_GPL(preempt_notifier_register
);
1815 * preempt_notifier_unregister - no longer interested in preemption notifications
1816 * @notifier: notifier struct to unregister
1818 * This is safe to call from within a preemption notifier.
1820 void preempt_notifier_unregister(struct preempt_notifier
*notifier
)
1822 hlist_del(¬ifier
->link
);
1824 EXPORT_SYMBOL_GPL(preempt_notifier_unregister
);
1826 static void fire_sched_in_preempt_notifiers(struct task_struct
*curr
)
1828 struct preempt_notifier
*notifier
;
1829 struct hlist_node
*node
;
1831 hlist_for_each_entry(notifier
, node
, &curr
->preempt_notifiers
, link
)
1832 notifier
->ops
->sched_in(notifier
, raw_smp_processor_id());
1836 fire_sched_out_preempt_notifiers(struct task_struct
*curr
,
1837 struct task_struct
*next
)
1839 struct preempt_notifier
*notifier
;
1840 struct hlist_node
*node
;
1842 hlist_for_each_entry(notifier
, node
, &curr
->preempt_notifiers
, link
)
1843 notifier
->ops
->sched_out(notifier
, next
);
1846 #else /* !CONFIG_PREEMPT_NOTIFIERS */
1848 static void fire_sched_in_preempt_notifiers(struct task_struct
*curr
)
1853 fire_sched_out_preempt_notifiers(struct task_struct
*curr
,
1854 struct task_struct
*next
)
1858 #endif /* CONFIG_PREEMPT_NOTIFIERS */
1861 * prepare_task_switch - prepare to switch tasks
1862 * @rq: the runqueue preparing to switch
1863 * @next: the task we are going to switch to.
1865 * This is called with the rq lock held and interrupts off. It must
1866 * be paired with a subsequent finish_task_switch after the context
1869 * prepare_task_switch sets up locking and calls architecture specific
1873 prepare_task_switch(struct rq
*rq
, struct task_struct
*prev
,
1874 struct task_struct
*next
)
1876 sched_info_switch(prev
, next
);
1877 perf_event_task_sched_out(prev
, next
);
1878 fire_sched_out_preempt_notifiers(prev
, next
);
1879 prepare_lock_switch(rq
, next
);
1880 prepare_arch_switch(next
);
1881 trace_sched_switch(prev
, next
);
1885 * finish_task_switch - clean up after a task-switch
1886 * @rq: runqueue associated with task-switch
1887 * @prev: the thread we just switched away from.
1889 * finish_task_switch must be called after the context switch, paired
1890 * with a prepare_task_switch call before the context switch.
1891 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1892 * and do any other architecture-specific cleanup actions.
1894 * Note that we may have delayed dropping an mm in context_switch(). If
1895 * so, we finish that here outside of the runqueue lock. (Doing it
1896 * with the lock held can cause deadlocks; see schedule() for
1899 static inline void finish_task_switch(struct rq
*rq
, struct task_struct
*prev
)
1900 __releases(grq
.lock
)
1902 struct mm_struct
*mm
= rq
->prev_mm
;
1908 * A task struct has one reference for the use as "current".
1909 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1910 * schedule one last time. The schedule call will never return, and
1911 * the scheduled task must drop that reference.
1912 * The test for TASK_DEAD must occur while the runqueue locks are
1913 * still held, otherwise prev could be scheduled on another cpu, die
1914 * there before we look at prev->state, and then the reference would
1916 * Manfred Spraul <manfred@colorfullife.com>
1918 prev_state
= prev
->state
;
1919 finish_arch_switch(prev
);
1920 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1921 local_irq_disable();
1922 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1923 perf_event_task_sched_in(prev
, current
);
1924 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1926 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1927 finish_lock_switch(rq
, prev
);
1929 fire_sched_in_preempt_notifiers(current
);
1932 if (unlikely(prev_state
== TASK_DEAD
)) {
1934 * Remove function-return probe instances associated with this
1935 * task and put them back on the free list.
1937 kprobe_flush_task(prev
);
1938 put_task_struct(prev
);
1943 * schedule_tail - first thing a freshly forked thread must call.
1944 * @prev: the thread we just switched away from.
1946 asmlinkage
void schedule_tail(struct task_struct
*prev
)
1947 __releases(grq
.lock
)
1949 struct rq
*rq
= this_rq();
1951 finish_task_switch(rq
, prev
);
1952 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
1953 /* In this case, finish_task_switch does not reenable preemption */
1956 if (current
->set_child_tid
)
1957 put_user(current
->pid
, current
->set_child_tid
);
1961 * context_switch - switch to the new MM and the new
1962 * thread's register state.
1965 context_switch(struct rq
*rq
, struct task_struct
*prev
,
1966 struct task_struct
*next
)
1968 struct mm_struct
*mm
, *oldmm
;
1970 prepare_task_switch(rq
, prev
, next
);
1973 oldmm
= prev
->active_mm
;
1975 * For paravirt, this is coupled with an exit in switch_to to
1976 * combine the page table reload and the switch backend into
1979 arch_start_context_switch(prev
);
1982 next
->active_mm
= oldmm
;
1983 atomic_inc(&oldmm
->mm_count
);
1984 enter_lazy_tlb(oldmm
, next
);
1986 switch_mm(oldmm
, mm
, next
);
1989 prev
->active_mm
= NULL
;
1990 rq
->prev_mm
= oldmm
;
1993 * Since the runqueue lock will be released by the next
1994 * task (which is an invalid locking op but in the case
1995 * of the scheduler it's an obvious special-case), so we
1996 * do an early lockdep release here:
1998 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
1999 spin_release(&grq
.lock
.dep_map
, 1, _THIS_IP_
);
2002 /* Here we just switch the register state and the stack. */
2003 switch_to(prev
, next
, prev
);
2007 * this_rq must be evaluated again because prev may have moved
2008 * CPUs since it called schedule(), thus the 'rq' on its stack
2009 * frame will be invalid.
2011 finish_task_switch(this_rq(), prev
);
2015 * nr_running, nr_uninterruptible and nr_context_switches:
2017 * externally visible scheduler statistics: current number of runnable
2018 * threads, current number of uninterruptible-sleeping threads, total
2019 * number of context switches performed since bootup. All are measured
2020 * without grabbing the grq lock but the occasional inaccurate result
2021 * doesn't matter so long as it's positive.
2023 unsigned long nr_running(void)
2025 long nr
= grq
.nr_running
;
2027 if (unlikely(nr
< 0))
2029 return (unsigned long)nr
;
2032 unsigned long nr_uninterruptible(void)
2034 long nu
= grq
.nr_uninterruptible
;
2036 if (unlikely(nu
< 0))
2041 unsigned long long nr_context_switches(void)
2043 long long ns
= grq
.nr_switches
;
2045 /* This is of course impossible */
2046 if (unlikely(ns
< 0))
2048 return (unsigned long long)ns
;
2051 unsigned long nr_iowait(void)
2053 unsigned long i
, sum
= 0;
2055 for_each_possible_cpu(i
)
2056 sum
+= atomic_read(&cpu_rq(i
)->nr_iowait
);
2061 unsigned long nr_iowait_cpu(int cpu
)
2063 struct rq
*this = cpu_rq(cpu
);
2064 return atomic_read(&this->nr_iowait
);
2067 unsigned long nr_active(void)
2069 return nr_running() + nr_uninterruptible();
2072 /* Beyond a task running on this CPU, load is equal everywhere on BFS */
2073 unsigned long this_cpu_load(void)
2075 return this_rq()->rq_running
+
2076 ((queued_notrunning() + nr_uninterruptible()) / grq
.noc
);
2079 /* Variables and functions for calc_load */
2080 static unsigned long calc_load_update
;
2081 unsigned long avenrun
[3];
2082 EXPORT_SYMBOL(avenrun
);
2085 * get_avenrun - get the load average array
2086 * @loads: pointer to dest load array
2087 * @offset: offset to add
2088 * @shift: shift count to shift the result left
2090 * These values are estimates at best, so no need for locking.
2092 void get_avenrun(unsigned long *loads
, unsigned long offset
, int shift
)
2094 loads
[0] = (avenrun
[0] + offset
) << shift
;
2095 loads
[1] = (avenrun
[1] + offset
) << shift
;
2096 loads
[2] = (avenrun
[2] + offset
) << shift
;
2099 static unsigned long
2100 calc_load(unsigned long load
, unsigned long exp
, unsigned long active
)
2103 load
+= active
* (FIXED_1
- exp
);
2104 return load
>> FSHIFT
;
2108 * calc_load - update the avenrun load estimates every LOAD_FREQ seconds.
2110 void calc_global_load(unsigned long ticks
)
2114 if (time_before(jiffies
, calc_load_update
))
2116 active
= nr_active() * FIXED_1
;
2118 avenrun
[0] = calc_load(avenrun
[0], EXP_1
, active
);
2119 avenrun
[1] = calc_load(avenrun
[1], EXP_5
, active
);
2120 avenrun
[2] = calc_load(avenrun
[2], EXP_15
, active
);
2122 calc_load_update
= jiffies
+ LOAD_FREQ
;
2125 DEFINE_PER_CPU(struct kernel_stat
, kstat
);
2126 DEFINE_PER_CPU(struct kernel_cpustat
, kernel_cpustat
);
2128 EXPORT_PER_CPU_SYMBOL(kstat
);
2129 EXPORT_PER_CPU_SYMBOL(kernel_cpustat
);
2131 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2134 * There are no locks covering percpu hardirq/softirq time.
2135 * They are only modified in account_system_vtime, on corresponding CPU
2136 * with interrupts disabled. So, writes are safe.
2137 * They are read and saved off onto struct rq in update_rq_clock().
2138 * This may result in other CPU reading this CPU's irq time and can
2139 * race with irq/account_system_vtime on this CPU. We would either get old
2140 * or new value with a side effect of accounting a slice of irq time to wrong
2141 * task when irq is in progress while we read rq->clock. That is a worthy
2142 * compromise in place of having locks on each irq in account_system_time.
2144 static DEFINE_PER_CPU(u64
, cpu_hardirq_time
);
2145 static DEFINE_PER_CPU(u64
, cpu_softirq_time
);
2147 static DEFINE_PER_CPU(u64
, irq_start_time
);
2148 static int sched_clock_irqtime
;
2150 void enable_sched_clock_irqtime(void)
2152 sched_clock_irqtime
= 1;
2155 void disable_sched_clock_irqtime(void)
2157 sched_clock_irqtime
= 0;
2160 #ifndef CONFIG_64BIT
2161 static DEFINE_PER_CPU(seqcount_t
, irq_time_seq
);
2163 static inline void irq_time_write_begin(void)
2165 __this_cpu_inc(irq_time_seq
.sequence
);
2169 static inline void irq_time_write_end(void)
2172 __this_cpu_inc(irq_time_seq
.sequence
);
2175 static inline u64
irq_time_read(int cpu
)
2181 seq
= read_seqcount_begin(&per_cpu(irq_time_seq
, cpu
));
2182 irq_time
= per_cpu(cpu_softirq_time
, cpu
) +
2183 per_cpu(cpu_hardirq_time
, cpu
);
2184 } while (read_seqcount_retry(&per_cpu(irq_time_seq
, cpu
), seq
));
2188 #else /* CONFIG_64BIT */
2189 static inline void irq_time_write_begin(void)
2193 static inline void irq_time_write_end(void)
2197 static inline u64
irq_time_read(int cpu
)
2199 return per_cpu(cpu_softirq_time
, cpu
) + per_cpu(cpu_hardirq_time
, cpu
);
2201 #endif /* CONFIG_64BIT */
2204 * Called before incrementing preempt_count on {soft,}irq_enter
2205 * and before decrementing preempt_count on {soft,}irq_exit.
2207 void account_system_vtime(struct task_struct
*curr
)
2209 unsigned long flags
;
2213 if (!sched_clock_irqtime
)
2216 local_irq_save(flags
);
2218 cpu
= smp_processor_id();
2219 delta
= sched_clock_cpu(cpu
) - __this_cpu_read(irq_start_time
);
2220 __this_cpu_add(irq_start_time
, delta
);
2222 irq_time_write_begin();
2224 * We do not account for softirq time from ksoftirqd here.
2225 * We want to continue accounting softirq time to ksoftirqd thread
2226 * in that case, so as not to confuse scheduler with a special task
2227 * that do not consume any time, but still wants to run.
2229 if (hardirq_count())
2230 __this_cpu_add(cpu_hardirq_time
, delta
);
2231 else if (in_serving_softirq() && curr
!= this_cpu_ksoftirqd())
2232 __this_cpu_add(cpu_softirq_time
, delta
);
2234 irq_time_write_end();
2235 local_irq_restore(flags
);
2237 EXPORT_SYMBOL_GPL(account_system_vtime
);
2239 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2241 #ifdef CONFIG_PARAVIRT
2242 static inline u64
steal_ticks(u64 steal
)
2244 if (unlikely(steal
> NSEC_PER_SEC
))
2245 return div_u64(steal
, TICK_NSEC
);
2247 return __iter_div_u64_rem(steal
, TICK_NSEC
, &steal
);
2251 static void update_rq_clock_task(struct rq
*rq
, s64 delta
)
2253 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2254 s64 irq_delta
= irq_time_read(cpu_of(rq
)) - rq
->prev_irq_time
;
2257 * Since irq_time is only updated on {soft,}irq_exit, we might run into
2258 * this case when a previous update_rq_clock() happened inside a
2259 * {soft,}irq region.
2261 * When this happens, we stop ->clock_task and only update the
2262 * prev_irq_time stamp to account for the part that fit, so that a next
2263 * update will consume the rest. This ensures ->clock_task is
2266 * It does however cause some slight miss-attribution of {soft,}irq
2267 * time, a more accurate solution would be to update the irq_time using
2268 * the current rq->clock timestamp, except that would require using
2271 if (irq_delta
> delta
)
2274 rq
->prev_irq_time
+= irq_delta
;
2277 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
2278 if (static_branch((¶virt_steal_rq_enabled
))) {
2279 u64 st
, steal
= paravirt_steal_clock(cpu_of(rq
));
2281 steal
-= rq
->prev_steal_time_rq
;
2283 if (unlikely(steal
> delta
))
2286 st
= steal_ticks(steal
);
2287 steal
= st
* TICK_NSEC
;
2289 rq
->prev_steal_time_rq
+= steal
;
2295 rq
->clock_task
+= delta
;
2298 #ifndef nsecs_to_cputime
2299 # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
2302 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2303 static void irqtime_account_hi_si(void)
2305 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
2308 latest_ns
= nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time
));
2309 if (latest_ns
> cpustat
[CPUTIME_IRQ
])
2310 cpustat
[CPUTIME_IRQ
] += (__force u64
)cputime_one_jiffy
;
2312 latest_ns
= nsecs_to_cputime64(this_cpu_read(cpu_softirq_time
));
2313 if (latest_ns
> cpustat
[CPUTIME_SOFTIRQ
])
2314 cpustat
[CPUTIME_SOFTIRQ
] += (__force u64
)cputime_one_jiffy
;
2316 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
2318 #define sched_clock_irqtime (0)
2320 static inline void irqtime_account_hi_si(void)
2323 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2325 static __always_inline
bool steal_account_process_tick(void)
2327 #ifdef CONFIG_PARAVIRT
2328 if (static_branch(¶virt_steal_enabled
)) {
2331 steal
= paravirt_steal_clock(smp_processor_id());
2332 steal
-= this_rq()->prev_steal_time
;
2334 st
= steal_ticks(steal
);
2335 this_rq()->prev_steal_time
+= st
* TICK_NSEC
;
2337 account_steal_time(st
);
2345 * On each tick, see what percentage of that tick was attributed to each
2346 * component and add the percentage to the _pc values. Once a _pc value has
2347 * accumulated one tick's worth, account for that. This means the total
2348 * percentage of load components will always be 128 (pseudo 100) per tick.
2350 static void pc_idle_time(struct rq
*rq
, unsigned long pc
)
2352 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
2354 if (atomic_read(&rq
->nr_iowait
) > 0) {
2355 rq
->iowait_pc
+= pc
;
2356 if (rq
->iowait_pc
>= 128) {
2357 rq
->iowait_pc
%= 128;
2358 cpustat
[CPUTIME_IOWAIT
] += (__force u64
)cputime_one_jiffy
;
2362 if (rq
->idle_pc
>= 128) {
2364 cpustat
[CPUTIME_IDLE
] += (__force u64
)cputime_one_jiffy
;
2370 pc_system_time(struct rq
*rq
, struct task_struct
*p
, int hardirq_offset
,
2371 unsigned long pc
, unsigned long ns
)
2373 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
2374 cputime_t one_jiffy_scaled
= cputime_to_scaled(cputime_one_jiffy
);
2377 if (p
->stime_pc
>= 128) {
2379 p
->stime
+= (__force u64
)cputime_one_jiffy
;
2380 p
->stimescaled
+= one_jiffy_scaled
;
2381 account_group_system_time(p
, cputime_one_jiffy
);
2382 acct_update_integrals(p
);
2384 p
->sched_time
+= ns
;
2386 if (hardirq_count() - hardirq_offset
) {
2388 if (rq
->irq_pc
>= 128) {
2390 cpustat
[CPUTIME_IRQ
] += (__force u64
)cputime_one_jiffy
;
2392 } else if (in_serving_softirq()) {
2393 rq
->softirq_pc
+= pc
;
2394 if (rq
->softirq_pc
>= 128) {
2395 rq
->softirq_pc
%= 128;
2396 cpustat
[CPUTIME_SOFTIRQ
] += (__force u64
)cputime_one_jiffy
;
2399 rq
->system_pc
+= pc
;
2400 if (rq
->system_pc
>= 128) {
2401 rq
->system_pc
%= 128;
2402 cpustat
[CPUTIME_SYSTEM
] += (__force u64
)cputime_one_jiffy
;
2407 static void pc_user_time(struct rq
*rq
, struct task_struct
*p
,
2408 unsigned long pc
, unsigned long ns
)
2410 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
2411 cputime_t one_jiffy_scaled
= cputime_to_scaled(cputime_one_jiffy
);
2414 if (p
->utime_pc
>= 128) {
2416 p
->utime
+= (__force u64
)cputime_one_jiffy
;
2417 p
->utimescaled
+= one_jiffy_scaled
;
2418 account_group_user_time(p
, cputime_one_jiffy
);
2419 acct_update_integrals(p
);
2421 p
->sched_time
+= ns
;
2423 if (this_cpu_ksoftirqd() == p
) {
2425 * ksoftirqd time do not get accounted in cpu_softirq_time.
2426 * So, we have to handle it separately here.
2428 rq
->softirq_pc
+= pc
;
2429 if (rq
->softirq_pc
>= 128) {
2430 rq
->softirq_pc
%= 128;
2431 cpustat
[CPUTIME_SOFTIRQ
] += (__force u64
)cputime_one_jiffy
;
2435 if (TASK_NICE(p
) > 0 || idleprio_task(p
)) {
2437 if (rq
->nice_pc
>= 128) {
2439 cpustat
[CPUTIME_NICE
] += (__force u64
)cputime_one_jiffy
;
2443 if (rq
->user_pc
>= 128) {
2445 cpustat
[CPUTIME_USER
] += (__force u64
)cputime_one_jiffy
;
2451 * Convert nanoseconds to pseudo percentage of one tick. Use 128 for fast
2452 * shifts instead of 100
2454 #define NS_TO_PC(NS) (NS * 128 / JIFFY_NS)
2457 * This is called on clock ticks and on context switches.
2458 * Bank in p->sched_time the ns elapsed since the last tick or switch.
2459 * CPU scheduler quota accounting is also performed here in microseconds.
2462 update_cpu_clock(struct rq
*rq
, struct task_struct
*p
, bool tick
)
2464 long account_ns
= rq
->clock
- rq
->timekeep_clock
;
2465 struct task_struct
*idle
= rq
->idle
;
2466 unsigned long account_pc
;
2468 if (unlikely(account_ns
< 0))
2471 account_pc
= NS_TO_PC(account_ns
);
2476 /* Accurate tick timekeeping */
2477 rq
->account_pc
+= account_pc
- 128;
2478 if (rq
->account_pc
< 0) {
2480 * Small errors in micro accounting may not make the
2481 * accounting add up to 128 each tick so we keep track
2482 * of the percentage and round it up when less than 128
2484 account_pc
+= -rq
->account_pc
;
2487 if (steal_account_process_tick())
2490 user_tick
= user_mode(get_irq_regs());
2493 pc_user_time(rq
, p
, account_pc
, account_ns
);
2494 else if (p
!= idle
|| (irq_count() != HARDIRQ_OFFSET
))
2495 pc_system_time(rq
, p
, HARDIRQ_OFFSET
,
2496 account_pc
, account_ns
);
2498 pc_idle_time(rq
, account_pc
);
2500 if (sched_clock_irqtime
)
2501 irqtime_account_hi_si();
2503 /* Accurate subtick timekeeping */
2504 rq
->account_pc
+= account_pc
;
2506 pc_idle_time(rq
, account_pc
);
2508 pc_user_time(rq
, p
, account_pc
, account_ns
);
2512 /* time_slice accounting is done in usecs to avoid overflow on 32bit */
2513 if (rq
->rq_policy
!= SCHED_FIFO
&& p
!= idle
) {
2514 s64 time_diff
= rq
->clock
- rq
->rq_last_ran
;
2516 niffy_diff(&time_diff
, 1);
2517 rq
->rq_time_slice
-= NS_TO_US(time_diff
);
2519 rq
->rq_last_ran
= rq
->timekeep_clock
= rq
->clock
;
2523 * Return any ns on the sched_clock that have not yet been accounted in
2524 * @p in case that task is currently running.
2526 * Called with task_grq_lock() held.
2528 static u64
do_task_delta_exec(struct task_struct
*p
, struct rq
*rq
)
2532 if (p
== rq
->curr
) {
2534 ns
= rq
->clock_task
- rq
->rq_last_ran
;
2535 if (unlikely((s64
)ns
< 0))
2542 unsigned long long task_delta_exec(struct task_struct
*p
)
2544 unsigned long flags
;
2548 rq
= task_grq_lock(p
, &flags
);
2549 ns
= do_task_delta_exec(p
, rq
);
2550 task_grq_unlock(&flags
);
2556 * Return accounted runtime for the task.
2557 * In case the task is currently running, return the runtime plus current's
2558 * pending runtime that have not been accounted yet.
2560 unsigned long long task_sched_runtime(struct task_struct
*p
)
2562 unsigned long flags
;
2566 rq
= task_grq_lock(p
, &flags
);
2567 ns
= p
->sched_time
+ do_task_delta_exec(p
, rq
);
2568 task_grq_unlock(&flags
);
2573 /* Compatibility crap for removal */
2574 void account_user_time(struct task_struct
*p
, cputime_t cputime
,
2575 cputime_t cputime_scaled
)
2579 void account_idle_time(cputime_t cputime
)
2584 * Account guest cpu time to a process.
2585 * @p: the process that the cpu time gets accounted to
2586 * @cputime: the cpu time spent in virtual machine since the last update
2587 * @cputime_scaled: cputime scaled by cpu frequency
2589 static void account_guest_time(struct task_struct
*p
, cputime_t cputime
,
2590 cputime_t cputime_scaled
)
2592 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
2594 /* Add guest time to process. */
2595 p
->utime
+= (__force u64
)cputime
;
2596 p
->utimescaled
+= (__force u64
)cputime_scaled
;
2597 account_group_user_time(p
, cputime
);
2598 p
->gtime
+= (__force u64
)cputime
;
2600 /* Add guest time to cpustat. */
2601 if (TASK_NICE(p
) > 0) {
2602 cpustat
[CPUTIME_NICE
] += (__force u64
)cputime
;
2603 cpustat
[CPUTIME_GUEST_NICE
] += (__force u64
)cputime
;
2605 cpustat
[CPUTIME_USER
] += (__force u64
)cputime
;
2606 cpustat
[CPUTIME_GUEST
] += (__force u64
)cputime
;
2611 * Account system cpu time to a process and desired cpustat field
2612 * @p: the process that the cpu time gets accounted to
2613 * @cputime: the cpu time spent in kernel space since the last update
2614 * @cputime_scaled: cputime scaled by cpu frequency
2615 * @target_cputime64: pointer to cpustat field that has to be updated
2618 void __account_system_time(struct task_struct
*p
, cputime_t cputime
,
2619 cputime_t cputime_scaled
, cputime64_t
*target_cputime64
)
2621 /* Add system time to process. */
2622 p
->stime
+= (__force u64
)cputime
;
2623 p
->stimescaled
+= (__force u64
)cputime_scaled
;
2624 account_group_system_time(p
, cputime
);
2626 /* Add system time to cpustat. */
2627 *target_cputime64
+= (__force u64
)cputime
;
2629 /* Account for system time used */
2630 acct_update_integrals(p
);
2634 * Account system cpu time to a process.
2635 * @p: the process that the cpu time gets accounted to
2636 * @hardirq_offset: the offset to subtract from hardirq_count()
2637 * @cputime: the cpu time spent in kernel space since the last update
2638 * @cputime_scaled: cputime scaled by cpu frequency
2639 * This is for guest only now.
2641 void account_system_time(struct task_struct
*p
, int hardirq_offset
,
2642 cputime_t cputime
, cputime_t cputime_scaled
)
2645 if ((p
->flags
& PF_VCPU
) && (irq_count() - hardirq_offset
== 0))
2646 account_guest_time(p
, cputime
, cputime_scaled
);
2650 * Account for involuntary wait time.
2651 * @steal: the cpu time spent in involuntary wait
2653 void account_steal_time(cputime_t cputime
)
2655 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
2657 cpustat
[CPUTIME_STEAL
] += (__force u64
)cputime
;
2661 * Account for idle time.
2662 * @cputime: the cpu time spent in idle wait
2664 static void account_idle_times(cputime_t cputime
)
2666 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
2667 struct rq
*rq
= this_rq();
2669 if (atomic_read(&rq
->nr_iowait
) > 0)
2670 cpustat
[CPUTIME_IOWAIT
] += (__force u64
)cputime
;
2672 cpustat
[CPUTIME_IDLE
] += (__force u64
)cputime
;
2675 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
2677 void account_process_tick(struct task_struct
*p
, int user_tick
)
2682 * Account multiple ticks of steal time.
2683 * @p: the process from which the cpu time has been stolen
2684 * @ticks: number of stolen ticks
2686 void account_steal_ticks(unsigned long ticks
)
2688 account_steal_time(jiffies_to_cputime(ticks
));
2692 * Account multiple ticks of idle time.
2693 * @ticks: number of stolen ticks
2695 void account_idle_ticks(unsigned long ticks
)
2697 account_idle_times(jiffies_to_cputime(ticks
));
2701 static inline void grq_iso_lock(void)
2702 __acquires(grq
.iso_lock
)
2704 raw_spin_lock(&grq
.iso_lock
);
2707 static inline void grq_iso_unlock(void)
2708 __releases(grq
.iso_lock
)
2710 raw_spin_unlock(&grq
.iso_lock
);
2714 * Functions to test for when SCHED_ISO tasks have used their allocated
2715 * quota as real time scheduling and convert them back to SCHED_NORMAL.
2716 * Where possible, the data is tested lockless, to avoid grabbing iso_lock
2717 * because the occasional inaccurate result won't matter. However the
2718 * tick data is only ever modified under lock. iso_refractory is only simply
2719 * set to 0 or 1 so it's not worth grabbing the lock yet again for that.
2721 static bool set_iso_refractory(void)
2723 grq
.iso_refractory
= true;
2724 return grq
.iso_refractory
;
2727 static bool clear_iso_refractory(void)
2729 grq
.iso_refractory
= false;
2730 return grq
.iso_refractory
;
2734 * Test if SCHED_ISO tasks have run longer than their alloted period as RT
2735 * tasks and set the refractory flag if necessary. There is 10% hysteresis
2736 * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a
2739 static bool test_ret_isorefractory(struct rq
*rq
)
2741 if (likely(!grq
.iso_refractory
)) {
2742 if (grq
.iso_ticks
> ISO_PERIOD
* sched_iso_cpu
)
2743 return set_iso_refractory();
2745 if (grq
.iso_ticks
< ISO_PERIOD
* (sched_iso_cpu
* 115 / 128))
2746 return clear_iso_refractory();
2748 return grq
.iso_refractory
;
2751 static void iso_tick(void)
2754 grq
.iso_ticks
+= 100;
2758 /* No SCHED_ISO task was running so decrease rq->iso_ticks */
2759 static inline void no_iso_tick(void)
2761 if (grq
.iso_ticks
) {
2763 grq
.iso_ticks
-= grq
.iso_ticks
/ ISO_PERIOD
+ 1;
2764 if (unlikely(grq
.iso_refractory
&& grq
.iso_ticks
<
2765 ISO_PERIOD
* (sched_iso_cpu
* 115 / 128)))
2766 clear_iso_refractory();
2771 /* This manages tasks that have run out of timeslice during a scheduler_tick */
2772 static void task_running_tick(struct rq
*rq
)
2774 struct task_struct
*p
;
2777 * If a SCHED_ISO task is running we increment the iso_ticks. In
2778 * order to prevent SCHED_ISO tasks from causing starvation in the
2779 * presence of true RT tasks we account those as iso_ticks as well.
2781 if ((rt_queue(rq
) || (iso_queue(rq
) && !grq
.iso_refractory
))) {
2782 if (grq
.iso_ticks
<= (ISO_PERIOD
* 128) - 128)
2787 if (iso_queue(rq
)) {
2788 if (unlikely(test_ret_isorefractory(rq
))) {
2789 if (rq_running_iso(rq
)) {
2791 * SCHED_ISO task is running as RT and limit
2792 * has been hit. Force it to reschedule as
2793 * SCHED_NORMAL by zeroing its time_slice
2795 rq
->rq_time_slice
= 0;
2800 /* SCHED_FIFO tasks never run out of timeslice. */
2801 if (rq
->rq_policy
== SCHED_FIFO
)
2804 * Tasks that were scheduled in the first half of a tick are not
2805 * allowed to run into the 2nd half of the next tick if they will
2806 * run out of time slice in the interim. Otherwise, if they have
2807 * less than RESCHED_US μs of time slice left they will be rescheduled.
2810 if (rq
->rq_time_slice
> HALF_JIFFY_US
)
2813 rq
->rq_time_slice
= 0;
2814 } else if (rq
->rq_time_slice
>= RESCHED_US
)
2817 /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */
2821 set_tsk_need_resched(p
);
2825 void wake_up_idle_cpu(int cpu
);
2828 * This function gets called by the timer code, with HZ frequency.
2829 * We call it with interrupts disabled. The data modified is all
2830 * local to struct rq so we don't need to grab grq lock.
2832 void scheduler_tick(void)
2834 int cpu __maybe_unused
= smp_processor_id();
2835 struct rq
*rq
= cpu_rq(cpu
);
2838 /* grq lock not grabbed, so only update rq clock */
2839 update_rq_clock(rq
);
2840 update_cpu_clock(rq
, rq
->curr
, true);
2842 task_running_tick(rq
);
2845 rq
->last_tick
= rq
->clock
;
2846 perf_event_task_tick();
2849 notrace
unsigned long get_parent_ip(unsigned long addr
)
2851 if (in_lock_functions(addr
)) {
2852 addr
= CALLER_ADDR2
;
2853 if (in_lock_functions(addr
))
2854 addr
= CALLER_ADDR3
;
2859 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2860 defined(CONFIG_PREEMPT_TRACER))
2861 void __kprobes
add_preempt_count(int val
)
2863 #ifdef CONFIG_DEBUG_PREEMPT
2867 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2870 preempt_count() += val
;
2871 #ifdef CONFIG_DEBUG_PREEMPT
2873 * Spinlock count overflowing soon?
2875 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK
) >=
2878 if (preempt_count() == val
)
2879 trace_preempt_off(CALLER_ADDR0
, get_parent_ip(CALLER_ADDR1
));
2881 EXPORT_SYMBOL(add_preempt_count
);
2883 void __kprobes
sub_preempt_count(int val
)
2885 #ifdef CONFIG_DEBUG_PREEMPT
2889 if (DEBUG_LOCKS_WARN_ON(val
> preempt_count()))
2892 * Is the spinlock portion underflowing?
2894 if (DEBUG_LOCKS_WARN_ON((val
< PREEMPT_MASK
) &&
2895 !(preempt_count() & PREEMPT_MASK
)))
2899 if (preempt_count() == val
)
2900 trace_preempt_on(CALLER_ADDR0
, get_parent_ip(CALLER_ADDR1
));
2901 preempt_count() -= val
;
2903 EXPORT_SYMBOL(sub_preempt_count
);
2907 * Deadline is "now" in niffies + (offset by priority). Setting the deadline
2908 * is the key to everything. It distributes cpu fairly amongst tasks of the
2909 * same nice value, it proportions cpu according to nice level, it means the
2910 * task that last woke up the longest ago has the earliest deadline, thus
2911 * ensuring that interactive tasks get low latency on wake up. The CPU
2912 * proportion works out to the square of the virtual deadline difference, so
2913 * this equation will give nice 19 3% CPU compared to nice 0.
2915 static inline u64
prio_deadline_diff(int user_prio
)
2917 return (prio_ratios
[user_prio
] * rr_interval
* (MS_TO_NS(1) / 128));
2920 static inline u64
task_deadline_diff(struct task_struct
*p
)
2922 return prio_deadline_diff(TASK_USER_PRIO(p
));
2925 static inline u64
static_deadline_diff(int static_prio
)
2927 return prio_deadline_diff(USER_PRIO(static_prio
));
2930 static inline int longest_deadline_diff(void)
2932 return prio_deadline_diff(39);
2935 static inline int ms_longest_deadline_diff(void)
2937 return NS_TO_MS(longest_deadline_diff());
2941 * The time_slice is only refilled when it is empty and that is when we set a
2944 static void time_slice_expired(struct task_struct
*p
)
2946 p
->time_slice
= timeslice();
2947 p
->deadline
= grq
.niffies
+ task_deadline_diff(p
);
2951 * Timeslices below RESCHED_US are considered as good as expired as there's no
2952 * point rescheduling when there's so little time left. SCHED_BATCH tasks
2953 * have been flagged be not latency sensitive and likely to be fully CPU
2954 * bound so every time they're rescheduled they have their time_slice
2955 * refilled, but get a new later deadline to have little effect on
2956 * SCHED_NORMAL tasks.
2959 static inline void check_deadline(struct task_struct
*p
)
2961 if (p
->time_slice
< RESCHED_US
|| batch_task(p
))
2962 time_slice_expired(p
);
2965 #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
2968 * Scheduler queue bitmap specific find next bit.
2970 static inline unsigned long
2971 next_sched_bit(const unsigned long *addr
, unsigned long offset
)
2973 const unsigned long *p
;
2974 unsigned long result
;
2982 p
= addr
+ BITOP_WORD(offset
);
2983 result
= offset
& ~(BITS_PER_LONG
-1);
2985 offset
%= BITS_PER_LONG
;
2988 tmp
&= (~0UL << offset
);
2989 if (size
< BITS_PER_LONG
)
2993 size
-= BITS_PER_LONG
;
2994 result
+= BITS_PER_LONG
;
2996 while (size
& ~(BITS_PER_LONG
-1)) {
2999 result
+= BITS_PER_LONG
;
3000 size
-= BITS_PER_LONG
;
3007 tmp
&= (~0UL >> (BITS_PER_LONG
- size
));
3008 if (tmp
== 0UL) /* Are any bits set? */
3009 return result
+ size
; /* Nope. */
3011 return result
+ __ffs(tmp
);
3015 * O(n) lookup of all tasks in the global runqueue. The real brainfuck
3016 * of lock contention and O(n). It's not really O(n) as only the queued,
3017 * but not running tasks are scanned, and is O(n) queued in the worst case
3018 * scenario only because the right task can be found before scanning all of
3020 * Tasks are selected in this order:
3021 * Real time tasks are selected purely by their static priority and in the
3022 * order they were queued, so the lowest value idx, and the first queued task
3023 * of that priority value is chosen.
3024 * If no real time tasks are found, the SCHED_ISO priority is checked, and
3025 * all SCHED_ISO tasks have the same priority value, so they're selected by
3026 * the earliest deadline value.
3027 * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the
3028 * earliest deadline.
3029 * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are
3030 * selected by the earliest deadline.
3032 static inline struct
3033 task_struct
*earliest_deadline_task(struct rq
*rq
, int cpu
, struct task_struct
*idle
)
3035 struct task_struct
*edt
= NULL
;
3036 unsigned long idx
= -1;
3039 struct list_head
*queue
;
3040 struct task_struct
*p
;
3041 u64 earliest_deadline
;
3043 idx
= next_sched_bit(grq
.prio_bitmap
, ++idx
);
3044 if (idx
>= PRIO_LIMIT
)
3046 queue
= grq
.queue
+ idx
;
3048 if (idx
< MAX_RT_PRIO
) {
3049 /* We found an rt task */
3050 list_for_each_entry(p
, queue
, run_list
) {
3051 /* Make sure cpu affinity is ok */
3052 if (needs_other_cpu(p
, cpu
))
3058 * None of the RT tasks at this priority can run on
3065 * No rt tasks. Find the earliest deadline task. Now we're in
3068 earliest_deadline
= ~0ULL;
3069 list_for_each_entry(p
, queue
, run_list
) {
3072 /* Make sure cpu affinity is ok */
3073 if (needs_other_cpu(p
, cpu
))
3077 * Soft affinity happens here by not scheduling a task
3078 * with its sticky flag set that ran on a different CPU
3079 * last when the CPU is scaling, or by greatly biasing
3080 * against its deadline when not, based on cpu cache
3083 if (task_sticky(p
) && task_rq(p
) != rq
) {
3086 dl
= p
->deadline
<< locality_diff(p
, rq
);
3090 if (deadline_before(dl
, earliest_deadline
)) {
3091 earliest_deadline
= dl
;
3098 take_task(cpu
, edt
);
3104 * Print scheduling while atomic bug:
3106 static noinline
void __schedule_bug(struct task_struct
*prev
)
3108 struct pt_regs
*regs
= get_irq_regs();
3110 printk(KERN_ERR
"BUG: scheduling while atomic: %s/%d/0x%08x\n",
3111 prev
->comm
, prev
->pid
, preempt_count());
3113 debug_show_held_locks(prev
);
3115 if (irqs_disabled())
3116 print_irqtrace_events(prev
);
3125 * Various schedule()-time debugging checks and statistics:
3127 static inline void schedule_debug(struct task_struct
*prev
)
3130 * Test if we are atomic. Since do_exit() needs to call into
3131 * schedule() atomically, we ignore that path for now.
3132 * Otherwise, whine if we are scheduling when we should not be.
3134 if (unlikely(in_atomic_preempt_off() && !prev
->exit_state
))
3135 __schedule_bug(prev
);
3138 profile_hit(SCHED_PROFILING
, __builtin_return_address(0));
3140 schedstat_inc(this_rq(), sched_count
);
3144 * The currently running task's information is all stored in rq local data
3145 * which is only modified by the local CPU, thereby allowing the data to be
3146 * changed without grabbing the grq lock.
3148 static inline void set_rq_task(struct rq
*rq
, struct task_struct
*p
)
3150 rq
->rq_time_slice
= p
->time_slice
;
3151 rq
->rq_deadline
= p
->deadline
;
3152 rq
->rq_last_ran
= p
->last_ran
= rq
->clock
;
3153 rq
->rq_policy
= p
->policy
;
3154 rq
->rq_prio
= p
->prio
;
3156 rq
->rq_running
= true;
3158 rq
->rq_running
= false;
3161 static void reset_rq_task(struct rq
*rq
, struct task_struct
*p
)
3163 rq
->rq_policy
= p
->policy
;
3164 rq
->rq_prio
= p
->prio
;
3168 * schedule() is the main scheduler function.
3170 asmlinkage
void __sched
schedule(void)
3172 struct task_struct
*prev
, *next
, *idle
;
3173 unsigned long *switch_count
;
3181 cpu
= smp_processor_id();
3183 rcu_note_context_switch(cpu
);
3187 schedule_debug(prev
);
3191 switch_count
= &prev
->nivcsw
;
3192 if (prev
->state
&& !(preempt_count() & PREEMPT_ACTIVE
)) {
3193 if (unlikely(signal_pending_state(prev
->state
, prev
))) {
3194 prev
->state
= TASK_RUNNING
;
3198 * If a worker is going to sleep, notify and
3199 * ask workqueue whether it wants to wake up a
3200 * task to maintain concurrency. If so, wake
3203 if (prev
->flags
& PF_WQ_WORKER
) {
3204 struct task_struct
*to_wakeup
;
3206 to_wakeup
= wq_worker_sleeping(prev
, cpu
);
3208 /* This shouldn't happen, but does */
3209 if (unlikely(to_wakeup
== prev
))
3212 try_to_wake_up_local(to_wakeup
);
3216 switch_count
= &prev
->nvcsw
;
3220 * If we are going to sleep and we have plugged IO queued, make
3221 * sure to submit it to avoid deadlocks.
3223 if (unlikely(deactivate
&& blk_needs_flush_plug(prev
))) {
3225 preempt_enable_no_resched();
3226 blk_schedule_flush_plug(prev
);
3231 update_cpu_clock(rq
, prev
, false);
3232 if (rq
->clock
- rq
->last_tick
> HALF_JIFFY_NS
)
3237 clear_tsk_need_resched(prev
);
3241 /* Update all the information stored on struct rq */
3242 prev
->time_slice
= rq
->rq_time_slice
;
3243 prev
->deadline
= rq
->rq_deadline
;
3244 check_deadline(prev
);
3245 prev
->last_ran
= rq
->clock
;
3247 /* Task changed affinity off this CPU */
3248 if (needs_other_cpu(prev
, cpu
))
3249 resched_suitable_idle(prev
);
3250 else if (!deactivate
) {
3251 if (!queued_notrunning()) {
3253 * We now know prev is the only thing that is
3254 * awaiting CPU so we can bypass rechecking for
3255 * the earliest deadline task and just run it
3258 set_rq_task(rq
, prev
);
3260 goto rerun_prev_unlocked
;
3262 swap_sticky(rq
, cpu
, prev
);
3264 return_task(prev
, deactivate
);
3267 if (unlikely(!queued_notrunning())) {
3269 * This CPU is now truly idle as opposed to when idle is
3270 * scheduled as a high priority task in its own right.
3273 schedstat_inc(rq
, sched_goidle
);
3274 set_cpuidle_map(cpu
);
3276 next
= earliest_deadline_task(rq
, cpu
, idle
);
3277 if (likely(next
->prio
!= PRIO_LIMIT
))
3278 clear_cpuidle_map(cpu
);
3280 set_cpuidle_map(cpu
);
3283 if (likely(prev
!= next
)) {
3285 * Don't stick tasks when a real time task is going to run as
3286 * they may literally get stuck.
3289 unstick_task(rq
, prev
);
3290 set_rq_task(rq
, next
);
3292 prev
->on_cpu
= false;
3293 next
->on_cpu
= true;
3297 context_switch(rq
, prev
, next
); /* unlocks the grq */
3299 * The context switch have flipped the stack from under us
3300 * and restored the local variables which were saved when
3301 * this task called schedule() in the past. prev == current
3302 * is still correct, but it can be moved to another cpu/rq.
3304 cpu
= smp_processor_id();
3310 rerun_prev_unlocked
:
3311 preempt_enable_no_resched();
3312 if (unlikely(need_resched()))
3315 EXPORT_SYMBOL(schedule
);
3317 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
3319 static inline bool owner_running(struct mutex
*lock
, struct task_struct
*owner
)
3321 if (lock
->owner
!= owner
)
3325 * Ensure we emit the owner->on_cpu, dereference _after_ checking
3326 * lock->owner still matches owner, if that fails, owner might
3327 * point to free()d memory, if it still matches, the rcu_read_lock()
3328 * ensures the memory stays valid.
3332 return owner
->on_cpu
;
3336 * Look out! "owner" is an entirely speculative pointer
3337 * access and not reliable.
3339 int mutex_spin_on_owner(struct mutex
*lock
, struct task_struct
*owner
)
3342 while (owner_running(lock
, owner
)) {
3346 arch_mutex_cpu_relax();
3351 * We break out the loop above on need_resched() and when the
3352 * owner changed, which is a sign for heavy contention. Return
3353 * success only when lock->owner is NULL.
3355 return lock
->owner
== NULL
;
3359 #ifdef CONFIG_PREEMPT
3361 * this is the entry point to schedule() from in-kernel preemption
3362 * off of preempt_enable. Kernel preemptions off return from interrupt
3363 * occur there and call schedule directly.
3365 asmlinkage
void __sched notrace
preempt_schedule(void)
3367 struct thread_info
*ti
= current_thread_info();
3370 * If there is a non-zero preempt_count or interrupts are disabled,
3371 * we do not want to preempt the current task. Just return..
3373 if (likely(ti
->preempt_count
|| irqs_disabled()))
3377 add_preempt_count_notrace(PREEMPT_ACTIVE
);
3379 sub_preempt_count_notrace(PREEMPT_ACTIVE
);
3382 * Check again in case we missed a preemption opportunity
3383 * between schedule and now.
3386 } while (need_resched());
3388 EXPORT_SYMBOL(preempt_schedule
);
3391 * this is the entry point to schedule() from kernel preemption
3392 * off of irq context.
3393 * Note, that this is called and return with irqs disabled. This will
3394 * protect us against recursive calling from irq.
3396 asmlinkage
void __sched
preempt_schedule_irq(void)
3398 struct thread_info
*ti
= current_thread_info();
3400 /* Catch callers which need to be fixed */
3401 BUG_ON(ti
->preempt_count
|| !irqs_disabled());
3404 add_preempt_count(PREEMPT_ACTIVE
);
3407 local_irq_disable();
3408 sub_preempt_count(PREEMPT_ACTIVE
);
3411 * Check again in case we missed a preemption opportunity
3412 * between schedule and now.
3415 } while (need_resched());
3418 #endif /* CONFIG_PREEMPT */
3420 int default_wake_function(wait_queue_t
*curr
, unsigned mode
, int wake_flags
,
3423 return try_to_wake_up(curr
->private, mode
, wake_flags
);
3425 EXPORT_SYMBOL(default_wake_function
);
3428 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3429 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
3430 * number) then we wake all the non-exclusive tasks and one exclusive task.
3432 * There are circumstances in which we can try to wake a task which has already
3433 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
3434 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3436 static void __wake_up_common(wait_queue_head_t
*q
, unsigned int mode
,
3437 int nr_exclusive
, int wake_flags
, void *key
)
3439 struct list_head
*tmp
, *next
;
3441 list_for_each_safe(tmp
, next
, &q
->task_list
) {
3442 wait_queue_t
*curr
= list_entry(tmp
, wait_queue_t
, task_list
);
3443 unsigned int flags
= curr
->flags
;
3445 if (curr
->func(curr
, mode
, wake_flags
, key
) &&
3446 (flags
& WQ_FLAG_EXCLUSIVE
) && !--nr_exclusive
)
3452 * __wake_up - wake up threads blocked on a waitqueue.
3454 * @mode: which threads
3455 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3456 * @key: is directly passed to the wakeup function
3458 * It may be assumed that this function implies a write memory barrier before
3459 * changing the task state if and only if any tasks are woken up.
3461 void __wake_up(wait_queue_head_t
*q
, unsigned int mode
,
3462 int nr_exclusive
, void *key
)
3464 unsigned long flags
;
3466 spin_lock_irqsave(&q
->lock
, flags
);
3467 __wake_up_common(q
, mode
, nr_exclusive
, 0, key
);
3468 spin_unlock_irqrestore(&q
->lock
, flags
);
3470 EXPORT_SYMBOL(__wake_up
);
3473 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3475 void __wake_up_locked(wait_queue_head_t
*q
, unsigned int mode
)
3477 __wake_up_common(q
, mode
, 1, 0, NULL
);
3479 EXPORT_SYMBOL_GPL(__wake_up_locked
);
3481 void __wake_up_locked_key(wait_queue_head_t
*q
, unsigned int mode
, void *key
)
3483 __wake_up_common(q
, mode
, 1, 0, key
);
3485 EXPORT_SYMBOL_GPL(__wake_up_locked_key
);
3488 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
3490 * @mode: which threads
3491 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3492 * @key: opaque value to be passed to wakeup targets
3494 * The sync wakeup differs that the waker knows that it will schedule
3495 * away soon, so while the target thread will be woken up, it will not
3496 * be migrated to another CPU - ie. the two threads are 'synchronised'
3497 * with each other. This can prevent needless bouncing between CPUs.
3499 * On UP it can prevent extra preemption.
3501 * It may be assumed that this function implies a write memory barrier before
3502 * changing the task state if and only if any tasks are woken up.
3504 void __wake_up_sync_key(wait_queue_head_t
*q
, unsigned int mode
,
3505 int nr_exclusive
, void *key
)
3507 unsigned long flags
;
3508 int wake_flags
= WF_SYNC
;
3513 if (unlikely(!nr_exclusive
))
3516 spin_lock_irqsave(&q
->lock
, flags
);
3517 __wake_up_common(q
, mode
, nr_exclusive
, wake_flags
, key
);
3518 spin_unlock_irqrestore(&q
->lock
, flags
);
3520 EXPORT_SYMBOL_GPL(__wake_up_sync_key
);
3523 * __wake_up_sync - wake up threads blocked on a waitqueue.
3525 * @mode: which threads
3526 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3528 * The sync wakeup differs that the waker knows that it will schedule
3529 * away soon, so while the target thread will be woken up, it will not
3530 * be migrated to another CPU - ie. the two threads are 'synchronised'
3531 * with each other. This can prevent needless bouncing between CPUs.
3533 * On UP it can prevent extra preemption.
3535 void __wake_up_sync(wait_queue_head_t
*q
, unsigned int mode
, int nr_exclusive
)
3537 unsigned long flags
;
3543 if (unlikely(!nr_exclusive
))
3546 spin_lock_irqsave(&q
->lock
, flags
);
3547 __wake_up_common(q
, mode
, nr_exclusive
, sync
, NULL
);
3548 spin_unlock_irqrestore(&q
->lock
, flags
);
3550 EXPORT_SYMBOL_GPL(__wake_up_sync
); /* For internal use only */
3553 * complete: - signals a single thread waiting on this completion
3554 * @x: holds the state of this particular completion
3556 * This will wake up a single thread waiting on this completion. Threads will be
3557 * awakened in the same order in which they were queued.
3559 * See also complete_all(), wait_for_completion() and related routines.
3561 * It may be assumed that this function implies a write memory barrier before
3562 * changing the task state if and only if any tasks are woken up.
3564 void complete(struct completion
*x
)
3566 unsigned long flags
;
3568 spin_lock_irqsave(&x
->wait
.lock
, flags
);
3570 __wake_up_common(&x
->wait
, TASK_NORMAL
, 1, 0, NULL
);
3571 spin_unlock_irqrestore(&x
->wait
.lock
, flags
);
3573 EXPORT_SYMBOL(complete
);
3576 * complete_all: - signals all threads waiting on this completion
3577 * @x: holds the state of this particular completion
3579 * This will wake up all threads waiting on this particular completion event.
3581 * It may be assumed that this function implies a write memory barrier before
3582 * changing the task state if and only if any tasks are woken up.
3584 void complete_all(struct completion
*x
)
3586 unsigned long flags
;
3588 spin_lock_irqsave(&x
->wait
.lock
, flags
);
3589 x
->done
+= UINT_MAX
/2;
3590 __wake_up_common(&x
->wait
, TASK_NORMAL
, 0, 0, NULL
);
3591 spin_unlock_irqrestore(&x
->wait
.lock
, flags
);
3593 EXPORT_SYMBOL(complete_all
);
3595 static inline long __sched
3596 do_wait_for_common(struct completion
*x
, long timeout
, int state
)
3599 DECLARE_WAITQUEUE(wait
, current
);
3601 __add_wait_queue_tail_exclusive(&x
->wait
, &wait
);
3603 if (signal_pending_state(state
, current
)) {
3604 timeout
= -ERESTARTSYS
;
3607 __set_current_state(state
);
3608 spin_unlock_irq(&x
->wait
.lock
);
3609 timeout
= schedule_timeout(timeout
);
3610 spin_lock_irq(&x
->wait
.lock
);
3611 } while (!x
->done
&& timeout
);
3612 __remove_wait_queue(&x
->wait
, &wait
);
3617 return timeout
?: 1;
3621 wait_for_common(struct completion
*x
, long timeout
, int state
)
3625 spin_lock_irq(&x
->wait
.lock
);
3626 timeout
= do_wait_for_common(x
, timeout
, state
);
3627 spin_unlock_irq(&x
->wait
.lock
);
3632 * wait_for_completion: - waits for completion of a task
3633 * @x: holds the state of this particular completion
3635 * This waits to be signaled for completion of a specific task. It is NOT
3636 * interruptible and there is no timeout.
3638 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
3639 * and interrupt capability. Also see complete().
3641 void __sched
wait_for_completion(struct completion
*x
)
3643 wait_for_common(x
, MAX_SCHEDULE_TIMEOUT
, TASK_UNINTERRUPTIBLE
);
3645 EXPORT_SYMBOL(wait_for_completion
);
3648 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
3649 * @x: holds the state of this particular completion
3650 * @timeout: timeout value in jiffies
3652 * This waits for either a completion of a specific task to be signaled or for a
3653 * specified timeout to expire. The timeout is in jiffies. It is not
3656 * The return value is 0 if timed out, and positive (at least 1, or number of
3657 * jiffies left till timeout) if completed.
3659 unsigned long __sched
3660 wait_for_completion_timeout(struct completion
*x
, unsigned long timeout
)
3662 return wait_for_common(x
, timeout
, TASK_UNINTERRUPTIBLE
);
3664 EXPORT_SYMBOL(wait_for_completion_timeout
);
3667 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3668 * @x: holds the state of this particular completion
3670 * This waits for completion of a specific task to be signaled. It is
3673 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
3675 int __sched
wait_for_completion_interruptible(struct completion
*x
)
3677 long t
= wait_for_common(x
, MAX_SCHEDULE_TIMEOUT
, TASK_INTERRUPTIBLE
);
3678 if (t
== -ERESTARTSYS
)
3682 EXPORT_SYMBOL(wait_for_completion_interruptible
);
3685 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
3686 * @x: holds the state of this particular completion
3687 * @timeout: timeout value in jiffies
3689 * This waits for either a completion of a specific task to be signaled or for a
3690 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
3692 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3693 * positive (at least 1, or number of jiffies left till timeout) if completed.
3696 wait_for_completion_interruptible_timeout(struct completion
*x
,
3697 unsigned long timeout
)
3699 return wait_for_common(x
, timeout
, TASK_INTERRUPTIBLE
);
3701 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout
);
3704 * wait_for_completion_killable: - waits for completion of a task (killable)
3705 * @x: holds the state of this particular completion
3707 * This waits to be signaled for completion of a specific task. It can be
3708 * interrupted by a kill signal.
3710 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3711 * positive (at least 1, or number of jiffies left till timeout) if completed.
3713 int __sched
wait_for_completion_killable(struct completion
*x
)
3715 long t
= wait_for_common(x
, MAX_SCHEDULE_TIMEOUT
, TASK_KILLABLE
);
3716 if (t
== -ERESTARTSYS
)
3720 EXPORT_SYMBOL(wait_for_completion_killable
);
3723 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
3724 * @x: holds the state of this particular completion
3725 * @timeout: timeout value in jiffies
3727 * This waits for either a completion of a specific task to be
3728 * signaled or for a specified timeout to expire. It can be
3729 * interrupted by a kill signal. The timeout is in jiffies.
3732 wait_for_completion_killable_timeout(struct completion
*x
,
3733 unsigned long timeout
)
3735 return wait_for_common(x
, timeout
, TASK_KILLABLE
);
3737 EXPORT_SYMBOL(wait_for_completion_killable_timeout
);
3740 * try_wait_for_completion - try to decrement a completion without blocking
3741 * @x: completion structure
3743 * Returns: 0 if a decrement cannot be done without blocking
3744 * 1 if a decrement succeeded.
3746 * If a completion is being used as a counting completion,
3747 * attempt to decrement the counter without blocking. This
3748 * enables us to avoid waiting if the resource the completion
3749 * is protecting is not available.
3751 bool try_wait_for_completion(struct completion
*x
)
3753 unsigned long flags
;
3756 spin_lock_irqsave(&x
->wait
.lock
, flags
);
3761 spin_unlock_irqrestore(&x
->wait
.lock
, flags
);
3764 EXPORT_SYMBOL(try_wait_for_completion
);
3767 * completion_done - Test to see if a completion has any waiters
3768 * @x: completion structure
3770 * Returns: 0 if there are waiters (wait_for_completion() in progress)
3771 * 1 if there are no waiters.
3774 bool completion_done(struct completion
*x
)
3776 unsigned long flags
;
3779 spin_lock_irqsave(&x
->wait
.lock
, flags
);
3782 spin_unlock_irqrestore(&x
->wait
.lock
, flags
);
3785 EXPORT_SYMBOL(completion_done
);
3788 sleep_on_common(wait_queue_head_t
*q
, int state
, long timeout
)
3790 unsigned long flags
;
3793 init_waitqueue_entry(&wait
, current
);
3795 __set_current_state(state
);
3797 spin_lock_irqsave(&q
->lock
, flags
);
3798 __add_wait_queue(q
, &wait
);
3799 spin_unlock(&q
->lock
);
3800 timeout
= schedule_timeout(timeout
);
3801 spin_lock_irq(&q
->lock
);
3802 __remove_wait_queue(q
, &wait
);
3803 spin_unlock_irqrestore(&q
->lock
, flags
);
3808 void __sched
interruptible_sleep_on(wait_queue_head_t
*q
)
3810 sleep_on_common(q
, TASK_INTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
3812 EXPORT_SYMBOL(interruptible_sleep_on
);
3815 interruptible_sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
3817 return sleep_on_common(q
, TASK_INTERRUPTIBLE
, timeout
);
3819 EXPORT_SYMBOL(interruptible_sleep_on_timeout
);
3821 void __sched
sleep_on(wait_queue_head_t
*q
)
3823 sleep_on_common(q
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
3825 EXPORT_SYMBOL(sleep_on
);
3827 long __sched
sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
3829 return sleep_on_common(q
, TASK_UNINTERRUPTIBLE
, timeout
);
3831 EXPORT_SYMBOL(sleep_on_timeout
);
3833 #ifdef CONFIG_RT_MUTEXES
3836 * rt_mutex_setprio - set the current priority of a task
3838 * @prio: prio value (kernel-internal form)
3840 * This function changes the 'effective' priority of a task. It does
3841 * not touch ->normal_prio like __setscheduler().
3843 * Used by the rt_mutex code to implement priority inheritance logic.
3845 void rt_mutex_setprio(struct task_struct
*p
, int prio
)
3847 unsigned long flags
;
3848 int queued
, oldprio
;
3851 BUG_ON(prio
< 0 || prio
> MAX_PRIO
);
3853 rq
= task_grq_lock(p
, &flags
);
3855 trace_sched_pi_setprio(p
, prio
);
3857 queued
= task_queued(p
);
3861 if (task_running(p
) && prio
> oldprio
)
3868 task_grq_unlock(&flags
);
3874 * Adjust the deadline for when the priority is to change, before it's
3877 static inline void adjust_deadline(struct task_struct
*p
, int new_prio
)
3879 p
->deadline
+= static_deadline_diff(new_prio
) - task_deadline_diff(p
);
3882 void set_user_nice(struct task_struct
*p
, long nice
)
3884 int queued
, new_static
, old_static
;
3885 unsigned long flags
;
3888 if (TASK_NICE(p
) == nice
|| nice
< -20 || nice
> 19)
3890 new_static
= NICE_TO_PRIO(nice
);
3892 * We have to be careful, if called from sys_setpriority(),
3893 * the task might be in the middle of scheduling on another CPU.
3895 rq
= time_task_grq_lock(p
, &flags
);
3897 * The RT priorities are set via sched_setscheduler(), but we still
3898 * allow the 'normal' nice value to be set - but as expected
3899 * it wont have any effect on scheduling until the task is
3900 * not SCHED_NORMAL/SCHED_BATCH:
3902 if (has_rt_policy(p
)) {
3903 p
->static_prio
= new_static
;
3906 queued
= task_queued(p
);
3910 adjust_deadline(p
, new_static
);
3911 old_static
= p
->static_prio
;
3912 p
->static_prio
= new_static
;
3913 p
->prio
= effective_prio(p
);
3917 if (new_static
< old_static
)
3919 } else if (task_running(p
)) {
3920 reset_rq_task(rq
, p
);
3921 if (old_static
< new_static
)
3925 task_grq_unlock(&flags
);
3927 EXPORT_SYMBOL(set_user_nice
);
3930 * can_nice - check if a task can reduce its nice value
3934 int can_nice(const struct task_struct
*p
, const int nice
)
3936 /* convert nice value [19,-20] to rlimit style value [1,40] */
3937 int nice_rlim
= 20 - nice
;
3939 return (nice_rlim
<= task_rlimit(p
, RLIMIT_NICE
) ||
3940 capable(CAP_SYS_NICE
));
3943 #ifdef __ARCH_WANT_SYS_NICE
3946 * sys_nice - change the priority of the current process.
3947 * @increment: priority increment
3949 * sys_setpriority is a more generic, but much slower function that
3950 * does similar things.
3952 SYSCALL_DEFINE1(nice
, int, increment
)
3957 * Setpriority might change our priority at the same moment.
3958 * We don't have to worry. Conceptually one call occurs first
3959 * and we have a single winner.
3961 if (increment
< -40)
3966 nice
= TASK_NICE(current
) + increment
;
3972 if (increment
< 0 && !can_nice(current
, nice
))
3975 retval
= security_task_setnice(current
, nice
);
3979 set_user_nice(current
, nice
);
3986 * task_prio - return the priority value of a given task.
3987 * @p: the task in question.
3989 * This is the priority value as seen by users in /proc.
3990 * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
3991 * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO).
3993 int task_prio(const struct task_struct
*p
)
3995 int delta
, prio
= p
->prio
- MAX_RT_PRIO
;
3997 /* rt tasks and iso tasks */
4001 /* Convert to ms to avoid overflows */
4002 delta
= NS_TO_MS(p
->deadline
- grq
.niffies
);
4003 delta
= delta
* 40 / ms_longest_deadline_diff();
4004 if (delta
> 0 && delta
<= 80)
4006 if (idleprio_task(p
))
4013 * task_nice - return the nice value of a given task.
4014 * @p: the task in question.
4016 int task_nice(const struct task_struct
*p
)
4018 return TASK_NICE(p
);
4020 EXPORT_SYMBOL_GPL(task_nice
);
4023 * idle_cpu - is a given cpu idle currently?
4024 * @cpu: the processor in question.
4026 int idle_cpu(int cpu
)
4028 return cpu_curr(cpu
) == cpu_rq(cpu
)->idle
;
4032 * idle_task - return the idle task for a given cpu.
4033 * @cpu: the processor in question.
4035 struct task_struct
*idle_task(int cpu
)
4037 return cpu_rq(cpu
)->idle
;
4041 * find_process_by_pid - find a process with a matching PID value.
4042 * @pid: the pid in question.
4044 static inline struct task_struct
*find_process_by_pid(pid_t pid
)
4046 return pid
? find_task_by_vpid(pid
) : current
;
4049 /* Actually do priority change: must hold grq lock. */
4051 __setscheduler(struct task_struct
*p
, struct rq
*rq
, int policy
, int prio
)
4053 int oldrtprio
, oldprio
;
4056 oldrtprio
= p
->rt_priority
;
4057 p
->rt_priority
= prio
;
4058 p
->normal_prio
= normal_prio(p
);
4060 /* we are holding p->pi_lock already */
4061 p
->prio
= rt_mutex_getprio(p
);
4062 if (task_running(p
)) {
4063 reset_rq_task(rq
, p
);
4064 /* Resched only if we might now be preempted */
4065 if (p
->prio
> oldprio
|| p
->rt_priority
> oldrtprio
)
4071 * check the target process has a UID that matches the current process's
4073 static bool check_same_owner(struct task_struct
*p
)
4075 const struct cred
*cred
= current_cred(), *pcred
;
4079 pcred
= __task_cred(p
);
4080 if (cred
->user
->user_ns
== pcred
->user
->user_ns
)
4081 match
= (cred
->euid
== pcred
->euid
||
4082 cred
->euid
== pcred
->uid
);
4089 static int __sched_setscheduler(struct task_struct
*p
, int policy
,
4090 const struct sched_param
*param
, bool user
)
4092 struct sched_param zero_param
= { .sched_priority
= 0 };
4093 int queued
, retval
, oldpolicy
= -1;
4094 unsigned long flags
, rlim_rtprio
= 0;
4098 /* may grab non-irq protected spin_locks */
4099 BUG_ON(in_interrupt());
4101 if (is_rt_policy(policy
) && !capable(CAP_SYS_NICE
)) {
4102 unsigned long lflags
;
4104 if (!lock_task_sighand(p
, &lflags
))
4106 rlim_rtprio
= task_rlimit(p
, RLIMIT_RTPRIO
);
4107 unlock_task_sighand(p
, &lflags
);
4111 * If the caller requested an RT policy without having the
4112 * necessary rights, we downgrade the policy to SCHED_ISO.
4113 * We also set the parameter to zero to pass the checks.
4116 param
= &zero_param
;
4119 /* double check policy once rq lock held */
4121 reset_on_fork
= p
->sched_reset_on_fork
;
4122 policy
= oldpolicy
= p
->policy
;
4124 reset_on_fork
= !!(policy
& SCHED_RESET_ON_FORK
);
4125 policy
&= ~SCHED_RESET_ON_FORK
;
4127 if (!SCHED_RANGE(policy
))
4132 * Valid priorities for SCHED_FIFO and SCHED_RR are
4133 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
4136 if (param
->sched_priority
< 0 ||
4137 (p
->mm
&& param
->sched_priority
> MAX_USER_RT_PRIO
- 1) ||
4138 (!p
->mm
&& param
->sched_priority
> MAX_RT_PRIO
- 1))
4140 if (is_rt_policy(policy
) != (param
->sched_priority
!= 0))
4144 * Allow unprivileged RT tasks to decrease priority:
4146 if (user
&& !capable(CAP_SYS_NICE
)) {
4147 if (is_rt_policy(policy
)) {
4148 unsigned long rlim_rtprio
=
4149 task_rlimit(p
, RLIMIT_RTPRIO
);
4151 /* can't set/change the rt policy */
4152 if (policy
!= p
->policy
&& !rlim_rtprio
)
4155 /* can't increase priority */
4156 if (param
->sched_priority
> p
->rt_priority
&&
4157 param
->sched_priority
> rlim_rtprio
)
4160 switch (p
->policy
) {
4162 * Can only downgrade policies but not back to
4166 if (policy
== SCHED_ISO
)
4168 if (policy
== SCHED_NORMAL
)
4172 if (policy
== SCHED_BATCH
)
4174 if (policy
!= SCHED_IDLEPRIO
)
4177 case SCHED_IDLEPRIO
:
4178 if (policy
== SCHED_IDLEPRIO
)
4186 /* can't change other user's priorities */
4187 if (!check_same_owner(p
))
4190 /* Normal users shall not reset the sched_reset_on_fork flag */
4191 if (p
->sched_reset_on_fork
&& !reset_on_fork
)
4196 retval
= security_task_setscheduler(p
);
4202 * make sure no PI-waiters arrive (or leave) while we are
4203 * changing the priority of the task:
4205 raw_spin_lock_irqsave(&p
->pi_lock
, flags
);
4207 * To be able to change p->policy safely, the grunqueue lock must be
4210 rq
= __task_grq_lock(p
);
4213 * Changing the policy of the stop threads its a very bad idea
4215 if (p
== rq
->stop
) {
4216 __task_grq_unlock();
4217 raw_spin_unlock_irqrestore(&p
->pi_lock
, flags
);
4222 * If not changing anything there's no need to proceed further:
4224 if (unlikely(policy
== p
->policy
&& (!is_rt_policy(policy
) ||
4225 param
->sched_priority
== p
->rt_priority
))) {
4227 __task_grq_unlock();
4228 raw_spin_unlock_irqrestore(&p
->pi_lock
, flags
);
4232 /* recheck policy now with rq lock held */
4233 if (unlikely(oldpolicy
!= -1 && oldpolicy
!= p
->policy
)) {
4234 policy
= oldpolicy
= -1;
4235 __task_grq_unlock();
4236 raw_spin_unlock_irqrestore(&p
->pi_lock
, flags
);
4240 p
->sched_reset_on_fork
= reset_on_fork
;
4242 queued
= task_queued(p
);
4245 __setscheduler(p
, rq
, policy
, param
->sched_priority
);
4250 __task_grq_unlock();
4251 raw_spin_unlock_irqrestore(&p
->pi_lock
, flags
);
4253 rt_mutex_adjust_pi(p
);
4259 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4260 * @p: the task in question.
4261 * @policy: new policy.
4262 * @param: structure containing the new RT priority.
4264 * NOTE that the task may be already dead.
4266 int sched_setscheduler(struct task_struct
*p
, int policy
,
4267 const struct sched_param
*param
)
4269 return __sched_setscheduler(p
, policy
, param
, true);
4272 EXPORT_SYMBOL_GPL(sched_setscheduler
);
4275 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4276 * @p: the task in question.
4277 * @policy: new policy.
4278 * @param: structure containing the new RT priority.
4280 * Just like sched_setscheduler, only don't bother checking if the
4281 * current context has permission. For example, this is needed in
4282 * stop_machine(): we create temporary high priority worker threads,
4283 * but our caller might not have that capability.
4285 int sched_setscheduler_nocheck(struct task_struct
*p
, int policy
,
4286 const struct sched_param
*param
)
4288 return __sched_setscheduler(p
, policy
, param
, false);
4292 do_sched_setscheduler(pid_t pid
, int policy
, struct sched_param __user
*param
)
4294 struct sched_param lparam
;
4295 struct task_struct
*p
;
4298 if (!param
|| pid
< 0)
4300 if (copy_from_user(&lparam
, param
, sizeof(struct sched_param
)))
4305 p
= find_process_by_pid(pid
);
4307 retval
= sched_setscheduler(p
, policy
, &lparam
);
4314 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4315 * @pid: the pid in question.
4316 * @policy: new policy.
4317 * @param: structure containing the new RT priority.
4319 asmlinkage
long sys_sched_setscheduler(pid_t pid
, int policy
,
4320 struct sched_param __user
*param
)
4322 /* negative values for policy are not valid */
4326 return do_sched_setscheduler(pid
, policy
, param
);
4330 * sys_sched_setparam - set/change the RT priority of a thread
4331 * @pid: the pid in question.
4332 * @param: structure containing the new RT priority.
4334 SYSCALL_DEFINE2(sched_setparam
, pid_t
, pid
, struct sched_param __user
*, param
)
4336 return do_sched_setscheduler(pid
, -1, param
);
4340 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4341 * @pid: the pid in question.
4343 SYSCALL_DEFINE1(sched_getscheduler
, pid_t
, pid
)
4345 struct task_struct
*p
;
4346 int retval
= -EINVAL
;
4353 p
= find_process_by_pid(pid
);
4355 retval
= security_task_getscheduler(p
);
4366 * sys_sched_getscheduler - get the RT priority of a thread
4367 * @pid: the pid in question.
4368 * @param: structure containing the RT priority.
4370 SYSCALL_DEFINE2(sched_getparam
, pid_t
, pid
, struct sched_param __user
*, param
)
4372 struct sched_param lp
;
4373 struct task_struct
*p
;
4374 int retval
= -EINVAL
;
4376 if (!param
|| pid
< 0)
4380 p
= find_process_by_pid(pid
);
4385 retval
= security_task_getscheduler(p
);
4389 lp
.sched_priority
= p
->rt_priority
;
4393 * This one might sleep, we cannot do it with a spinlock held ...
4395 retval
= copy_to_user(param
, &lp
, sizeof(*param
)) ? -EFAULT
: 0;
4405 long sched_setaffinity(pid_t pid
, const struct cpumask
*in_mask
)
4407 cpumask_var_t cpus_allowed
, new_mask
;
4408 struct task_struct
*p
;
4414 p
= find_process_by_pid(pid
);
4421 /* Prevent p going away */
4425 if (!alloc_cpumask_var(&cpus_allowed
, GFP_KERNEL
)) {
4429 if (!alloc_cpumask_var(&new_mask
, GFP_KERNEL
)) {
4431 goto out_free_cpus_allowed
;
4434 if (!check_same_owner(p
) && !ns_capable(task_user_ns(p
), CAP_SYS_NICE
))
4437 retval
= security_task_setscheduler(p
);
4441 cpuset_cpus_allowed(p
, cpus_allowed
);
4442 cpumask_and(new_mask
, in_mask
, cpus_allowed
);
4444 retval
= set_cpus_allowed_ptr(p
, new_mask
);
4447 cpuset_cpus_allowed(p
, cpus_allowed
);
4448 if (!cpumask_subset(new_mask
, cpus_allowed
)) {
4450 * We must have raced with a concurrent cpuset
4451 * update. Just reset the cpus_allowed to the
4452 * cpuset's cpus_allowed
4454 cpumask_copy(new_mask
, cpus_allowed
);
4459 free_cpumask_var(new_mask
);
4460 out_free_cpus_allowed
:
4461 free_cpumask_var(cpus_allowed
);
4468 static int get_user_cpu_mask(unsigned long __user
*user_mask_ptr
, unsigned len
,
4469 cpumask_t
*new_mask
)
4471 if (len
< sizeof(cpumask_t
)) {
4472 memset(new_mask
, 0, sizeof(cpumask_t
));
4473 } else if (len
> sizeof(cpumask_t
)) {
4474 len
= sizeof(cpumask_t
);
4476 return copy_from_user(new_mask
, user_mask_ptr
, len
) ? -EFAULT
: 0;
4481 * sys_sched_setaffinity - set the cpu affinity of a process
4482 * @pid: pid of the process
4483 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4484 * @user_mask_ptr: user-space pointer to the new cpu mask
4486 SYSCALL_DEFINE3(sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
4487 unsigned long __user
*, user_mask_ptr
)
4489 cpumask_var_t new_mask
;
4492 if (!alloc_cpumask_var(&new_mask
, GFP_KERNEL
))
4495 retval
= get_user_cpu_mask(user_mask_ptr
, len
, new_mask
);
4497 retval
= sched_setaffinity(pid
, new_mask
);
4498 free_cpumask_var(new_mask
);
4502 long sched_getaffinity(pid_t pid
, cpumask_t
*mask
)
4504 struct task_struct
*p
;
4505 unsigned long flags
;
4512 p
= find_process_by_pid(pid
);
4516 retval
= security_task_getscheduler(p
);
4520 grq_lock_irqsave(&flags
);
4521 cpumask_and(mask
, tsk_cpus_allowed(p
), cpu_online_mask
);
4522 grq_unlock_irqrestore(&flags
);
4532 * sys_sched_getaffinity - get the cpu affinity of a process
4533 * @pid: pid of the process
4534 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4535 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4537 SYSCALL_DEFINE3(sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
4538 unsigned long __user
*, user_mask_ptr
)
4543 if ((len
* BITS_PER_BYTE
) < nr_cpu_ids
)
4545 if (len
& (sizeof(unsigned long)-1))
4548 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
4551 ret
= sched_getaffinity(pid
, mask
);
4553 size_t retlen
= min_t(size_t, len
, cpumask_size());
4555 if (copy_to_user(user_mask_ptr
, mask
, retlen
))
4560 free_cpumask_var(mask
);
4566 * sys_sched_yield - yield the current processor to other threads.
4568 * This function yields the current CPU to other tasks. It does this by
4569 * scheduling away the current task. If it still has the earliest deadline
4570 * it will be scheduled again as the next task.
4572 SYSCALL_DEFINE0(sched_yield
)
4574 struct task_struct
*p
;
4578 schedstat_inc(task_rq(p
), yld_count
);
4582 * Since we are going to call schedule() anyway, there's
4583 * no need to preempt or enable interrupts:
4585 __release(grq
.lock
);
4586 spin_release(&grq
.lock
.dep_map
, 1, _THIS_IP_
);
4587 do_raw_spin_unlock(&grq
.lock
);
4588 preempt_enable_no_resched();
4595 static inline bool should_resched(void)
4597 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE
);
4600 static void __cond_resched(void)
4602 /* NOT a real fix but will make voluntary preempt work. 馬鹿な事 */
4603 if (unlikely(system_state
!= SYSTEM_RUNNING
))
4606 add_preempt_count(PREEMPT_ACTIVE
);
4608 sub_preempt_count(PREEMPT_ACTIVE
);
4611 int __sched
_cond_resched(void)
4613 if (should_resched()) {
4619 EXPORT_SYMBOL(_cond_resched
);
4622 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
4623 * call schedule, and on return reacquire the lock.
4625 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4626 * operations here to prevent schedule() from being called twice (once via
4627 * spin_unlock(), once by hand).
4629 int __cond_resched_lock(spinlock_t
*lock
)
4631 int resched
= should_resched();
4634 lockdep_assert_held(lock
);
4636 if (spin_needbreak(lock
) || resched
) {
4647 EXPORT_SYMBOL(__cond_resched_lock
);
4649 int __sched
__cond_resched_softirq(void)
4651 BUG_ON(!in_softirq());
4653 if (should_resched()) {
4661 EXPORT_SYMBOL(__cond_resched_softirq
);
4664 * yield - yield the current processor to other threads.
4666 * This is a shortcut for kernel-space yielding - it marks the
4667 * thread runnable and calls sys_sched_yield().
4669 void __sched
yield(void)
4671 set_current_state(TASK_RUNNING
);
4674 EXPORT_SYMBOL(yield
);
4677 * yield_to - yield the current processor to another thread in
4678 * your thread group, or accelerate that thread toward the
4679 * processor it's on.
4681 * @preempt: whether task preemption is allowed or not
4683 * It's the caller's job to ensure that the target task struct
4684 * can't go away on us before we can do any checks.
4686 * Returns true if we indeed boosted the target task.
4688 bool __sched
yield_to(struct task_struct
*p
, bool preempt
)
4690 unsigned long flags
;
4695 grq_lock_irqsave(&flags
);
4696 if (task_running(p
) || p
->state
)
4699 if (p
->deadline
> rq
->rq_deadline
)
4700 p
->deadline
= rq
->rq_deadline
;
4701 p
->time_slice
+= rq
->rq_time_slice
;
4702 rq
->rq_time_slice
= 0;
4703 if (p
->time_slice
> timeslice())
4704 p
->time_slice
= timeslice();
4705 set_tsk_need_resched(rq
->curr
);
4707 grq_unlock_irqrestore(&flags
);
4713 EXPORT_SYMBOL_GPL(yield_to
);
4716 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4717 * that process accounting knows that this is a task in IO wait state.
4719 * But don't do that if it is a deliberate, throttling IO wait (this task
4720 * has set its backing_dev_info: the queue against which it should throttle)
4722 void __sched
io_schedule(void)
4724 struct rq
*rq
= raw_rq();
4726 delayacct_blkio_start();
4727 atomic_inc(&rq
->nr_iowait
);
4728 blk_flush_plug(current
);
4729 current
->in_iowait
= 1;
4731 current
->in_iowait
= 0;
4732 atomic_dec(&rq
->nr_iowait
);
4733 delayacct_blkio_end();
4735 EXPORT_SYMBOL(io_schedule
);
4737 long __sched
io_schedule_timeout(long timeout
)
4739 struct rq
*rq
= raw_rq();
4742 delayacct_blkio_start();
4743 atomic_inc(&rq
->nr_iowait
);
4744 blk_flush_plug(current
);
4745 current
->in_iowait
= 1;
4746 ret
= schedule_timeout(timeout
);
4747 current
->in_iowait
= 0;
4748 atomic_dec(&rq
->nr_iowait
);
4749 delayacct_blkio_end();
4754 * sys_sched_get_priority_max - return maximum RT priority.
4755 * @policy: scheduling class.
4757 * this syscall returns the maximum rt_priority that can be used
4758 * by a given scheduling class.
4760 SYSCALL_DEFINE1(sched_get_priority_max
, int, policy
)
4767 ret
= MAX_USER_RT_PRIO
-1;
4772 case SCHED_IDLEPRIO
:
4780 * sys_sched_get_priority_min - return minimum RT priority.
4781 * @policy: scheduling class.
4783 * this syscall returns the minimum rt_priority that can be used
4784 * by a given scheduling class.
4786 SYSCALL_DEFINE1(sched_get_priority_min
, int, policy
)
4798 case SCHED_IDLEPRIO
:
4806 * sys_sched_rr_get_interval - return the default timeslice of a process.
4807 * @pid: pid of the process.
4808 * @interval: userspace pointer to the timeslice value.
4810 * this syscall writes the default timeslice value of a given process
4811 * into the user-space timespec buffer. A value of '0' means infinity.
4813 SYSCALL_DEFINE2(sched_rr_get_interval
, pid_t
, pid
,
4814 struct timespec __user
*, interval
)
4816 struct task_struct
*p
;
4817 unsigned int time_slice
;
4818 unsigned long flags
;
4827 p
= find_process_by_pid(pid
);
4831 retval
= security_task_getscheduler(p
);
4835 grq_lock_irqsave(&flags
);
4836 time_slice
= p
->policy
== SCHED_FIFO
? 0 : MS_TO_NS(task_timeslice(p
));
4837 grq_unlock_irqrestore(&flags
);
4840 t
= ns_to_timespec(time_slice
);
4841 retval
= copy_to_user(interval
, &t
, sizeof(t
)) ? -EFAULT
: 0;
4849 static const char stat_nam
[] = TASK_STATE_TO_CHAR_STR
;
4851 void sched_show_task(struct task_struct
*p
)
4853 unsigned long free
= 0;
4856 state
= p
->state
? __ffs(p
->state
) + 1 : 0;
4857 printk(KERN_INFO
"%-15.15s %c", p
->comm
,
4858 state
< sizeof(stat_nam
) - 1 ? stat_nam
[state
] : '?');
4859 #if BITS_PER_LONG == 32
4860 if (state
== TASK_RUNNING
)
4861 printk(KERN_CONT
" running ");
4863 printk(KERN_CONT
" %08lx ", thread_saved_pc(p
));
4865 if (state
== TASK_RUNNING
)
4866 printk(KERN_CONT
" running task ");
4868 printk(KERN_CONT
" %016lx ", thread_saved_pc(p
));
4870 #ifdef CONFIG_DEBUG_STACK_USAGE
4871 free
= stack_not_used(p
);
4873 printk(KERN_CONT
"%5lu %5d %6d 0x%08lx\n", free
,
4874 task_pid_nr(p
), task_pid_nr(p
->real_parent
),
4875 (unsigned long)task_thread_info(p
)->flags
);
4877 show_stack(p
, NULL
);
4880 void show_state_filter(unsigned long state_filter
)
4882 struct task_struct
*g
, *p
;
4884 #if BITS_PER_LONG == 32
4886 " task PC stack pid father\n");
4889 " task PC stack pid father\n");
4892 do_each_thread(g
, p
) {
4894 * reset the NMI-timeout, listing all files on a slow
4895 * console might take a lot of time:
4897 touch_nmi_watchdog();
4898 if (!state_filter
|| (p
->state
& state_filter
))
4900 } while_each_thread(g
, p
);
4902 touch_all_softlockup_watchdogs();
4906 * Only show locks if all tasks are dumped:
4909 debug_show_all_locks();
4913 void do_set_cpus_allowed(struct task_struct
*p
, const struct cpumask
*new_mask
)
4915 cpumask_copy(tsk_cpus_allowed(p
), new_mask
);
4920 * init_idle - set up an idle thread for a given CPU
4921 * @idle: task in question
4922 * @cpu: cpu the idle task belongs to
4924 * NOTE: this function does not set the idle thread's NEED_RESCHED
4925 * flag, to make booting more robust.
4927 void init_idle(struct task_struct
*idle
, int cpu
)
4929 struct rq
*rq
= cpu_rq(cpu
);
4930 unsigned long flags
;
4932 time_grq_lock(rq
, &flags
);
4933 idle
->last_ran
= rq
->clock
;
4934 idle
->state
= TASK_RUNNING
;
4935 /* Setting prio to illegal value shouldn't matter when never queued */
4936 idle
->prio
= PRIO_LIMIT
;
4937 set_rq_task(rq
, idle
);
4938 do_set_cpus_allowed(idle
, &cpumask_of_cpu(cpu
));
4939 /* Silence PROVE_RCU */
4941 set_task_cpu(idle
, cpu
);
4943 rq
->curr
= rq
->idle
= idle
;
4945 grq_unlock_irqrestore(&flags
);
4947 /* Set the preempt count _outside_ the spinlocks! */
4948 task_thread_info(idle
)->preempt_count
= 0;
4950 ftrace_graph_init_idle_task(idle
, cpu
);
4951 #if defined(CONFIG_SMP)
4952 sprintf(idle
->comm
, "%s/%d", INIT_TASK_COMM
, cpu
);
4958 void select_nohz_load_balancer(int stop_tick
)
4962 void set_cpu_sd_state_idle(void) {}
4963 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4965 * lowest_flag_domain - Return lowest sched_domain containing flag.
4966 * @cpu: The cpu whose lowest level of sched domain is to
4968 * @flag: The flag to check for the lowest sched_domain
4969 * for the given cpu.
4971 * Returns the lowest sched_domain of a cpu which contains the given flag.
4973 static inline struct sched_domain
*lowest_flag_domain(int cpu
, int flag
)
4975 struct sched_domain
*sd
;
4977 for_each_domain(cpu
, sd
)
4978 if (sd
&& (sd
->flags
& flag
))
4985 * for_each_flag_domain - Iterates over sched_domains containing the flag.
4986 * @cpu: The cpu whose domains we're iterating over.
4987 * @sd: variable holding the value of the power_savings_sd
4989 * @flag: The flag to filter the sched_domains to be iterated.
4991 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4992 * set, starting from the lowest sched_domain to the highest.
4994 #define for_each_flag_domain(cpu, sd, flag) \
4995 for (sd = lowest_flag_domain(cpu, flag); \
4996 (sd && (sd->flags & flag)); sd = sd->parent)
4998 #endif /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
5000 static inline void resched_cpu(int cpu
)
5002 unsigned long flags
;
5004 grq_lock_irqsave(&flags
);
5005 resched_task(cpu_curr(cpu
));
5006 grq_unlock_irqrestore(&flags
);
5010 * In the semi idle case, use the nearest busy cpu for migrating timers
5011 * from an idle cpu. This is good for power-savings.
5013 * We don't do similar optimization for completely idle system, as
5014 * selecting an idle cpu will add more delays to the timers than intended
5015 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
5017 int get_nohz_timer_target(void)
5019 int cpu
= smp_processor_id();
5021 struct sched_domain
*sd
;
5024 for_each_domain(cpu
, sd
) {
5025 for_each_cpu(i
, sched_domain_span(sd
)) {
5037 * When add_timer_on() enqueues a timer into the timer wheel of an
5038 * idle CPU then this timer might expire before the next timer event
5039 * which is scheduled to wake up that CPU. In case of a completely
5040 * idle system the next event might even be infinite time into the
5041 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
5042 * leaves the inner idle loop so the newly added timer is taken into
5043 * account when the CPU goes back to idle and evaluates the timer
5044 * wheel for the next timer event.
5046 void wake_up_idle_cpu(int cpu
)
5048 struct task_struct
*idle
;
5051 if (cpu
== smp_processor_id())
5058 * This is safe, as this function is called with the timer
5059 * wheel base lock of (cpu) held. When the CPU is on the way
5060 * to idle and has not yet set rq->curr to idle then it will
5061 * be serialised on the timer wheel base lock and take the new
5062 * timer into account automatically.
5064 if (unlikely(rq
->curr
!= idle
))
5068 * We can set TIF_RESCHED on the idle task of the other CPU
5069 * lockless. The worst case is that the other CPU runs the
5070 * idle task through an additional NOOP schedule()
5072 set_tsk_need_resched(idle
);
5074 /* NEED_RESCHED must be visible before we test polling */
5076 if (!tsk_is_polling(idle
))
5077 smp_send_reschedule(cpu
);
5080 #endif /* CONFIG_NO_HZ */
5083 * Change a given task's CPU affinity. Migrate the thread to a
5084 * proper CPU and schedule it away if the CPU it's executing on
5085 * is removed from the allowed bitmask.
5087 * NOTE: the caller must have a valid reference to the task, the
5088 * task must not exit() & deallocate itself prematurely. The
5089 * call is not atomic; no spinlocks may be held.
5091 int set_cpus_allowed_ptr(struct task_struct
*p
, const struct cpumask
*new_mask
)
5093 bool running_wrong
= false;
5094 bool queued
= false;
5095 unsigned long flags
;
5099 rq
= task_grq_lock(p
, &flags
);
5101 if (cpumask_equal(tsk_cpus_allowed(p
), new_mask
))
5104 if (!cpumask_intersects(new_mask
, cpu_active_mask
)) {
5109 if (unlikely((p
->flags
& PF_THREAD_BOUND
) && p
!= current
)) {
5114 queued
= task_queued(p
);
5116 do_set_cpus_allowed(p
, new_mask
);
5118 /* Can the task run on the task's current CPU? If so, we're done */
5119 if (cpumask_test_cpu(task_cpu(p
), new_mask
))
5122 if (task_running(p
)) {
5123 /* Task is running on the wrong cpu now, reschedule it. */
5124 if (rq
== this_rq()) {
5125 set_tsk_need_resched(p
);
5126 running_wrong
= true;
5130 set_task_cpu(p
, cpumask_any_and(cpu_active_mask
, new_mask
));
5135 task_grq_unlock(&flags
);
5142 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr
);
5144 #ifdef CONFIG_HOTPLUG_CPU
5145 /* Run through task list and find tasks affined to just the dead cpu, then
5146 * allocate a new affinity */
5147 static void break_sole_affinity(int src_cpu
, struct task_struct
*idle
)
5149 struct task_struct
*p
, *t
;
5151 do_each_thread(t
, p
) {
5152 if (p
!= idle
&& !online_cpus(p
)) {
5153 cpumask_copy(tsk_cpus_allowed(p
), cpu_possible_mask
);
5155 * Don't tell them about moving exiting tasks or
5156 * kernel threads (both mm NULL), since they never
5159 if (p
->mm
&& printk_ratelimit()) {
5160 printk(KERN_INFO
"process %d (%s) no "
5161 "longer affine to cpu %d\n",
5162 task_pid_nr(p
), p
->comm
, src_cpu
);
5166 } while_each_thread(t
, p
);
5170 * Schedules idle task to be the next runnable task on current CPU.
5171 * It does so by boosting its priority to highest possible.
5172 * Used by CPU offline code.
5174 void sched_idle_next(struct rq
*rq
, int this_cpu
, struct task_struct
*idle
)
5176 /* cpu has to be offline */
5177 BUG_ON(cpu_online(this_cpu
));
5179 __setscheduler(idle
, rq
, SCHED_FIFO
, STOP_PRIO
);
5181 activate_idle_task(idle
);
5182 set_tsk_need_resched(rq
->curr
);
5186 * Ensures that the idle task is using init_mm right before its cpu goes
5189 void idle_task_exit(void)
5191 struct mm_struct
*mm
= current
->active_mm
;
5193 BUG_ON(cpu_online(smp_processor_id()));
5196 switch_mm(mm
, &init_mm
, current
);
5199 #endif /* CONFIG_HOTPLUG_CPU */
5200 void sched_set_stop_task(int cpu
, struct task_struct
*stop
)
5202 struct sched_param stop_param
= { .sched_priority
= STOP_PRIO
};
5203 struct sched_param start_param
= { .sched_priority
= MAX_USER_RT_PRIO
- 1 };
5204 struct task_struct
*old_stop
= cpu_rq(cpu
)->stop
;
5208 * Make it appear like a SCHED_FIFO task, its something
5209 * userspace knows about and won't get confused about.
5211 * Also, it will make PI more or less work without too
5212 * much confusion -- but then, stop work should not
5213 * rely on PI working anyway.
5215 sched_setscheduler_nocheck(stop
, SCHED_FIFO
, &stop_param
);
5218 cpu_rq(cpu
)->stop
= stop
;
5222 * Reset it back to a normal rt scheduling prio so that
5223 * it can die in pieces.
5225 sched_setscheduler_nocheck(old_stop
, SCHED_FIFO
, &start_param
);
5230 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5232 static struct ctl_table sd_ctl_dir
[] = {
5234 .procname
= "sched_domain",
5240 static struct ctl_table sd_ctl_root
[] = {
5242 .procname
= "kernel",
5244 .child
= sd_ctl_dir
,
5249 static struct ctl_table
*sd_alloc_ctl_entry(int n
)
5251 struct ctl_table
*entry
=
5252 kcalloc(n
, sizeof(struct ctl_table
), GFP_KERNEL
);
5257 static void sd_free_ctl_entry(struct ctl_table
**tablep
)
5259 struct ctl_table
*entry
;
5262 * In the intermediate directories, both the child directory and
5263 * procname are dynamically allocated and could fail but the mode
5264 * will always be set. In the lowest directory the names are
5265 * static strings and all have proc handlers.
5267 for (entry
= *tablep
; entry
->mode
; entry
++) {
5269 sd_free_ctl_entry(&entry
->child
);
5270 if (entry
->proc_handler
== NULL
)
5271 kfree(entry
->procname
);
5279 set_table_entry(struct ctl_table
*entry
,
5280 const char *procname
, void *data
, int maxlen
,
5281 mode_t mode
, proc_handler
*proc_handler
)
5283 entry
->procname
= procname
;
5285 entry
->maxlen
= maxlen
;
5287 entry
->proc_handler
= proc_handler
;
5290 static struct ctl_table
*
5291 sd_alloc_ctl_domain_table(struct sched_domain
*sd
)
5293 struct ctl_table
*table
= sd_alloc_ctl_entry(13);
5298 set_table_entry(&table
[0], "min_interval", &sd
->min_interval
,
5299 sizeof(long), 0644, proc_doulongvec_minmax
);
5300 set_table_entry(&table
[1], "max_interval", &sd
->max_interval
,
5301 sizeof(long), 0644, proc_doulongvec_minmax
);
5302 set_table_entry(&table
[2], "busy_idx", &sd
->busy_idx
,
5303 sizeof(int), 0644, proc_dointvec_minmax
);
5304 set_table_entry(&table
[3], "idle_idx", &sd
->idle_idx
,
5305 sizeof(int), 0644, proc_dointvec_minmax
);
5306 set_table_entry(&table
[4], "newidle_idx", &sd
->newidle_idx
,
5307 sizeof(int), 0644, proc_dointvec_minmax
);
5308 set_table_entry(&table
[5], "wake_idx", &sd
->wake_idx
,
5309 sizeof(int), 0644, proc_dointvec_minmax
);
5310 set_table_entry(&table
[6], "forkexec_idx", &sd
->forkexec_idx
,
5311 sizeof(int), 0644, proc_dointvec_minmax
);
5312 set_table_entry(&table
[7], "busy_factor", &sd
->busy_factor
,
5313 sizeof(int), 0644, proc_dointvec_minmax
);
5314 set_table_entry(&table
[8], "imbalance_pct", &sd
->imbalance_pct
,
5315 sizeof(int), 0644, proc_dointvec_minmax
);
5316 set_table_entry(&table
[9], "cache_nice_tries",
5317 &sd
->cache_nice_tries
,
5318 sizeof(int), 0644, proc_dointvec_minmax
);
5319 set_table_entry(&table
[10], "flags", &sd
->flags
,
5320 sizeof(int), 0644, proc_dointvec_minmax
);
5321 set_table_entry(&table
[11], "name", sd
->name
,
5322 CORENAME_MAX_SIZE
, 0444, proc_dostring
);
5323 /* &table[12] is terminator */
5328 static ctl_table
*sd_alloc_ctl_cpu_table(int cpu
)
5330 struct ctl_table
*entry
, *table
;
5331 struct sched_domain
*sd
;
5332 int domain_num
= 0, i
;
5335 for_each_domain(cpu
, sd
)
5337 entry
= table
= sd_alloc_ctl_entry(domain_num
+ 1);
5342 for_each_domain(cpu
, sd
) {
5343 snprintf(buf
, 32, "domain%d", i
);
5344 entry
->procname
= kstrdup(buf
, GFP_KERNEL
);
5346 entry
->child
= sd_alloc_ctl_domain_table(sd
);
5353 static struct ctl_table_header
*sd_sysctl_header
;
5354 static void register_sched_domain_sysctl(void)
5356 int i
, cpu_num
= num_possible_cpus();
5357 struct ctl_table
*entry
= sd_alloc_ctl_entry(cpu_num
+ 1);
5360 WARN_ON(sd_ctl_dir
[0].child
);
5361 sd_ctl_dir
[0].child
= entry
;
5366 for_each_possible_cpu(i
) {
5367 snprintf(buf
, 32, "cpu%d", i
);
5368 entry
->procname
= kstrdup(buf
, GFP_KERNEL
);
5370 entry
->child
= sd_alloc_ctl_cpu_table(i
);
5374 WARN_ON(sd_sysctl_header
);
5375 sd_sysctl_header
= register_sysctl_table(sd_ctl_root
);
5378 /* may be called multiple times per register */
5379 static void unregister_sched_domain_sysctl(void)
5381 if (sd_sysctl_header
)
5382 unregister_sysctl_table(sd_sysctl_header
);
5383 sd_sysctl_header
= NULL
;
5384 if (sd_ctl_dir
[0].child
)
5385 sd_free_ctl_entry(&sd_ctl_dir
[0].child
);
5388 static void register_sched_domain_sysctl(void)
5391 static void unregister_sched_domain_sysctl(void)
5396 static void set_rq_online(struct rq
*rq
)
5399 cpumask_set_cpu(cpu_of(rq
), rq
->rd
->online
);
5404 static void set_rq_offline(struct rq
*rq
)
5407 cpumask_clear_cpu(cpu_of(rq
), rq
->rd
->online
);
5413 * migration_call - callback that gets triggered when a CPU is added.
5415 static int __cpuinit
5416 migration_call(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
5418 int cpu
= (long)hcpu
;
5419 unsigned long flags
;
5420 struct rq
*rq
= cpu_rq(cpu
);
5421 #ifdef CONFIG_HOTPLUG_CPU
5422 struct task_struct
*idle
= rq
->idle
;
5425 switch (action
& ~CPU_TASKS_FROZEN
) {
5427 case CPU_UP_PREPARE
:
5431 /* Update our root-domain */
5432 grq_lock_irqsave(&flags
);
5434 BUG_ON(!cpumask_test_cpu(cpu
, rq
->rd
->span
));
5438 grq
.noc
= num_online_cpus();
5439 grq_unlock_irqrestore(&flags
);
5442 #ifdef CONFIG_HOTPLUG_CPU
5444 /* Idle task back to normal (off runqueue, low prio) */
5446 return_task(idle
, true);
5447 idle
->static_prio
= MAX_PRIO
;
5448 __setscheduler(idle
, rq
, SCHED_NORMAL
, 0);
5449 idle
->prio
= PRIO_LIMIT
;
5450 set_rq_task(rq
, idle
);
5456 /* Update our root-domain */
5457 grq_lock_irqsave(&flags
);
5458 sched_idle_next(rq
, cpu
, idle
);
5460 BUG_ON(!cpumask_test_cpu(cpu
, rq
->rd
->span
));
5463 break_sole_affinity(cpu
, idle
);
5464 grq
.noc
= num_online_cpus();
5465 grq_unlock_irqrestore(&flags
);
5473 * Register at high priority so that task migration (migrate_all_tasks)
5474 * happens before everything else. This has to be lower priority than
5475 * the notifier in the perf_counter subsystem, though.
5477 static struct notifier_block __cpuinitdata migration_notifier
= {
5478 .notifier_call
= migration_call
,
5479 .priority
= CPU_PRI_MIGRATION
,
5482 static int __cpuinit
sched_cpu_active(struct notifier_block
*nfb
,
5483 unsigned long action
, void *hcpu
)
5485 switch (action
& ~CPU_TASKS_FROZEN
) {
5487 case CPU_DOWN_FAILED
:
5488 set_cpu_active((long)hcpu
, true);
5495 static int __cpuinit
sched_cpu_inactive(struct notifier_block
*nfb
,
5496 unsigned long action
, void *hcpu
)
5498 switch (action
& ~CPU_TASKS_FROZEN
) {
5499 case CPU_DOWN_PREPARE
:
5500 set_cpu_active((long)hcpu
, false);
5507 int __init
migration_init(void)
5509 void *cpu
= (void *)(long)smp_processor_id();
5512 /* Initialise migration for the boot CPU */
5513 err
= migration_call(&migration_notifier
, CPU_UP_PREPARE
, cpu
);
5514 BUG_ON(err
== NOTIFY_BAD
);
5515 migration_call(&migration_notifier
, CPU_ONLINE
, cpu
);
5516 register_cpu_notifier(&migration_notifier
);
5518 /* Register cpu active notifiers */
5519 cpu_notifier(sched_cpu_active
, CPU_PRI_SCHED_ACTIVE
);
5520 cpu_notifier(sched_cpu_inactive
, CPU_PRI_SCHED_INACTIVE
);
5524 early_initcall(migration_init
);
5529 static cpumask_var_t sched_domains_tmpmask
; /* sched_domains_mutex */
5531 #ifdef CONFIG_SCHED_DEBUG
5533 static __read_mostly
int sched_domain_debug_enabled
;
5535 static int __init
sched_domain_debug_setup(char *str
)
5537 sched_domain_debug_enabled
= 1;
5541 early_param("sched_debug", sched_domain_debug_setup
);
5543 static int sched_domain_debug_one(struct sched_domain
*sd
, int cpu
, int level
,
5544 struct cpumask
*groupmask
)
5546 struct sched_group
*group
= sd
->groups
;
5549 cpulist_scnprintf(str
, sizeof(str
), sched_domain_span(sd
));
5550 cpumask_clear(groupmask
);
5552 printk(KERN_DEBUG
"%*s domain %d: ", level
, "", level
);
5554 if (!(sd
->flags
& SD_LOAD_BALANCE
)) {
5555 printk("does not load-balance\n");
5557 printk(KERN_ERR
"ERROR: !SD_LOAD_BALANCE domain"
5562 printk(KERN_CONT
"span %s level %s\n", str
, sd
->name
);
5564 if (!cpumask_test_cpu(cpu
, sched_domain_span(sd
))) {
5565 printk(KERN_ERR
"ERROR: domain->span does not contain "
5568 if (!cpumask_test_cpu(cpu
, sched_group_cpus(group
))) {
5569 printk(KERN_ERR
"ERROR: domain->groups does not contain"
5573 printk(KERN_DEBUG
"%*s groups:", level
+ 1, "");
5577 printk(KERN_ERR
"ERROR: group is NULL\n");
5581 if (!group
->sgp
->power
) {
5582 printk(KERN_CONT
"\n");
5583 printk(KERN_ERR
"ERROR: domain->cpu_power not "
5588 if (!cpumask_weight(sched_group_cpus(group
))) {
5589 printk(KERN_CONT
"\n");
5590 printk(KERN_ERR
"ERROR: empty group\n");
5594 if (cpumask_intersects(groupmask
, sched_group_cpus(group
))) {
5595 printk(KERN_CONT
"\n");
5596 printk(KERN_ERR
"ERROR: repeated CPUs\n");
5600 cpumask_or(groupmask
, groupmask
, sched_group_cpus(group
));
5602 cpulist_scnprintf(str
, sizeof(str
), sched_group_cpus(group
));
5604 printk(KERN_CONT
" %s", str
);
5605 if (group
->sgp
->power
!= SCHED_POWER_SCALE
) {
5606 printk(KERN_CONT
" (cpu_power = %d)",
5610 group
= group
->next
;
5611 } while (group
!= sd
->groups
);
5612 printk(KERN_CONT
"\n");
5614 if (!cpumask_equal(sched_domain_span(sd
), groupmask
))
5615 printk(KERN_ERR
"ERROR: groups don't span domain->span\n");
5618 !cpumask_subset(groupmask
, sched_domain_span(sd
->parent
)))
5619 printk(KERN_ERR
"ERROR: parent span is not a superset "
5620 "of domain->span\n");
5624 static void sched_domain_debug(struct sched_domain
*sd
, int cpu
)
5628 if (!sched_domain_debug_enabled
)
5632 printk(KERN_DEBUG
"CPU%d attaching NULL sched-domain.\n", cpu
);
5636 printk(KERN_DEBUG
"CPU%d attaching sched-domain:\n", cpu
);
5639 if (sched_domain_debug_one(sd
, cpu
, level
, sched_domains_tmpmask
))
5647 #else /* !CONFIG_SCHED_DEBUG */
5648 # define sched_domain_debug(sd, cpu) do { } while (0)
5649 #endif /* CONFIG_SCHED_DEBUG */
5651 static int sd_degenerate(struct sched_domain
*sd
)
5653 if (cpumask_weight(sched_domain_span(sd
)) == 1)
5656 /* Following flags need at least 2 groups */
5657 if (sd
->flags
& (SD_LOAD_BALANCE
|
5658 SD_BALANCE_NEWIDLE
|
5662 SD_SHARE_PKG_RESOURCES
)) {
5663 if (sd
->groups
!= sd
->groups
->next
)
5667 /* Following flags don't use groups */
5668 if (sd
->flags
& (SD_WAKE_AFFINE
))
5675 sd_parent_degenerate(struct sched_domain
*sd
, struct sched_domain
*parent
)
5677 unsigned long cflags
= sd
->flags
, pflags
= parent
->flags
;
5679 if (sd_degenerate(parent
))
5682 if (!cpumask_equal(sched_domain_span(sd
), sched_domain_span(parent
)))
5685 /* Flags needing groups don't count if only 1 group in parent */
5686 if (parent
->groups
== parent
->groups
->next
) {
5687 pflags
&= ~(SD_LOAD_BALANCE
|
5688 SD_BALANCE_NEWIDLE
|
5692 SD_SHARE_PKG_RESOURCES
);
5693 if (nr_node_ids
== 1)
5694 pflags
&= ~SD_SERIALIZE
;
5696 if (~cflags
& pflags
)
5702 static void free_rootdomain(struct rcu_head
*rcu
)
5704 struct root_domain
*rd
= container_of(rcu
, struct root_domain
, rcu
);
5706 cpupri_cleanup(&rd
->cpupri
);
5707 free_cpumask_var(rd
->rto_mask
);
5708 free_cpumask_var(rd
->online
);
5709 free_cpumask_var(rd
->span
);
5713 static void rq_attach_root(struct rq
*rq
, struct root_domain
*rd
)
5715 struct root_domain
*old_rd
= NULL
;
5716 unsigned long flags
;
5718 grq_lock_irqsave(&flags
);
5723 if (cpumask_test_cpu(rq
->cpu
, old_rd
->online
))
5726 cpumask_clear_cpu(rq
->cpu
, old_rd
->span
);
5729 * If we dont want to free the old_rt yet then
5730 * set old_rd to NULL to skip the freeing later
5733 if (!atomic_dec_and_test(&old_rd
->refcount
))
5737 atomic_inc(&rd
->refcount
);
5740 cpumask_set_cpu(rq
->cpu
, rd
->span
);
5741 if (cpumask_test_cpu(rq
->cpu
, cpu_active_mask
))
5744 grq_unlock_irqrestore(&flags
);
5747 call_rcu_sched(&old_rd
->rcu
, free_rootdomain
);
5750 static int init_rootdomain(struct root_domain
*rd
)
5752 memset(rd
, 0, sizeof(*rd
));
5754 if (!alloc_cpumask_var(&rd
->span
, GFP_KERNEL
))
5756 if (!alloc_cpumask_var(&rd
->online
, GFP_KERNEL
))
5758 if (!alloc_cpumask_var(&rd
->rto_mask
, GFP_KERNEL
))
5761 if (cpupri_init(&rd
->cpupri
) != 0)
5766 free_cpumask_var(rd
->rto_mask
);
5768 free_cpumask_var(rd
->online
);
5770 free_cpumask_var(rd
->span
);
5775 static void init_defrootdomain(void)
5777 init_rootdomain(&def_root_domain
);
5779 atomic_set(&def_root_domain
.refcount
, 1);
5782 static struct root_domain
*alloc_rootdomain(void)
5784 struct root_domain
*rd
;
5786 rd
= kmalloc(sizeof(*rd
), GFP_KERNEL
);
5790 if (init_rootdomain(rd
) != 0) {
5798 static void free_sched_groups(struct sched_group
*sg
, int free_sgp
)
5800 struct sched_group
*tmp
, *first
;
5809 if (free_sgp
&& atomic_dec_and_test(&sg
->sgp
->ref
))
5814 } while (sg
!= first
);
5817 static void free_sched_domain(struct rcu_head
*rcu
)
5819 struct sched_domain
*sd
= container_of(rcu
, struct sched_domain
, rcu
);
5822 * If its an overlapping domain it has private groups, iterate and
5825 if (sd
->flags
& SD_OVERLAP
) {
5826 free_sched_groups(sd
->groups
, 1);
5827 } else if (atomic_dec_and_test(&sd
->groups
->ref
)) {
5828 kfree(sd
->groups
->sgp
);
5834 static void destroy_sched_domain(struct sched_domain
*sd
, int cpu
)
5836 call_rcu(&sd
->rcu
, free_sched_domain
);
5839 static void destroy_sched_domains(struct sched_domain
*sd
, int cpu
)
5841 for (; sd
; sd
= sd
->parent
)
5842 destroy_sched_domain(sd
, cpu
);
5846 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
5847 * hold the hotplug lock.
5850 cpu_attach_domain(struct sched_domain
*sd
, struct root_domain
*rd
, int cpu
)
5852 struct rq
*rq
= cpu_rq(cpu
);
5853 struct sched_domain
*tmp
;
5855 /* Remove the sched domains which do not contribute to scheduling. */
5856 for (tmp
= sd
; tmp
; ) {
5857 struct sched_domain
*parent
= tmp
->parent
;
5861 if (sd_parent_degenerate(tmp
, parent
)) {
5862 tmp
->parent
= parent
->parent
;
5864 parent
->parent
->child
= tmp
;
5865 destroy_sched_domain(parent
, cpu
);
5870 if (sd
&& sd_degenerate(sd
)) {
5873 destroy_sched_domain(tmp
, cpu
);
5878 sched_domain_debug(sd
, cpu
);
5880 rq_attach_root(rq
, rd
);
5882 rcu_assign_pointer(rq
->sd
, sd
);
5883 destroy_sched_domains(tmp
, cpu
);
5886 /* cpus with isolated domains */
5887 static cpumask_var_t cpu_isolated_map
;
5889 /* Setup the mask of cpus configured for isolated domains */
5890 static int __init
isolated_cpu_setup(char *str
)
5892 alloc_bootmem_cpumask_var(&cpu_isolated_map
);
5893 cpulist_parse(str
, cpu_isolated_map
);
5897 __setup("isolcpus=", isolated_cpu_setup
);
5899 #define SD_NODES_PER_DOMAIN 16
5904 * find_next_best_node - find the next node to include in a sched_domain
5905 * @node: node whose sched_domain we're building
5906 * @used_nodes: nodes already in the sched_domain
5908 * Find the next node to include in a given scheduling domain. Simply
5909 * finds the closest node not already in the @used_nodes map.
5911 * Should use nodemask_t.
5913 static int find_next_best_node(int node
, nodemask_t
*used_nodes
)
5915 int i
, n
, val
, min_val
, best_node
= -1;
5919 for (i
= 0; i
< nr_node_ids
; i
++) {
5920 /* Start at @node */
5921 n
= (node
+ i
) % nr_node_ids
;
5923 if (!nr_cpus_node(n
))
5926 /* Skip already used nodes */
5927 if (node_isset(n
, *used_nodes
))
5930 /* Simple min distance search */
5931 val
= node_distance(node
, n
);
5933 if (val
< min_val
) {
5939 if (best_node
!= -1)
5940 node_set(best_node
, *used_nodes
);
5945 * sched_domain_node_span - get a cpumask for a node's sched_domain
5946 * @node: node whose cpumask we're constructing
5947 * @span: resulting cpumask
5949 * Given a node, construct a good cpumask for its sched_domain to span. It
5950 * should be one that prevents unnecessary balancing, but also spreads tasks
5953 static void sched_domain_node_span(int node
, struct cpumask
*span
)
5955 nodemask_t used_nodes
;
5958 cpumask_clear(span
);
5959 nodes_clear(used_nodes
);
5961 cpumask_or(span
, span
, cpumask_of_node(node
));
5962 node_set(node
, used_nodes
);
5964 for (i
= 1; i
< SD_NODES_PER_DOMAIN
; i
++) {
5965 int next_node
= find_next_best_node(node
, &used_nodes
);
5968 cpumask_or(span
, span
, cpumask_of_node(next_node
));
5972 static const struct cpumask
*cpu_node_mask(int cpu
)
5974 lockdep_assert_held(&sched_domains_mutex
);
5976 sched_domain_node_span(cpu_to_node(cpu
), sched_domains_tmpmask
);
5978 return sched_domains_tmpmask
;
5981 static const struct cpumask
*cpu_allnodes_mask(int cpu
)
5983 return cpu_possible_mask
;
5985 #endif /* CONFIG_NUMA */
5987 static const struct cpumask
*cpu_cpu_mask(int cpu
)
5989 return cpumask_of_node(cpu_to_node(cpu
));
5992 int sched_smt_power_savings
= 0, sched_mc_power_savings
= 0;
5995 struct sched_domain
**__percpu sd
;
5996 struct sched_group
**__percpu sg
;
5997 struct sched_group_power
**__percpu sgp
;
6001 struct sched_domain
** __percpu sd
;
6002 struct root_domain
*rd
;
6012 struct sched_domain_topology_level
;
6014 typedef struct sched_domain
*(*sched_domain_init_f
)(struct sched_domain_topology_level
*tl
, int cpu
);
6015 typedef const struct cpumask
*(*sched_domain_mask_f
)(int cpu
);
6017 #define SDTL_OVERLAP 0x01
6019 struct sched_domain_topology_level
{
6020 sched_domain_init_f init
;
6021 sched_domain_mask_f mask
;
6023 struct sd_data data
;
6027 build_overlap_sched_groups(struct sched_domain
*sd
, int cpu
)
6029 struct sched_group
*first
= NULL
, *last
= NULL
, *groups
= NULL
, *sg
;
6030 const struct cpumask
*span
= sched_domain_span(sd
);
6031 struct cpumask
*covered
= sched_domains_tmpmask
;
6032 struct sd_data
*sdd
= sd
->private;
6033 struct sched_domain
*child
;
6036 cpumask_clear(covered
);
6038 for_each_cpu(i
, span
) {
6039 struct cpumask
*sg_span
;
6041 if (cpumask_test_cpu(i
, covered
))
6044 sg
= kzalloc_node(sizeof(struct sched_group
) + cpumask_size(),
6045 GFP_KERNEL
, cpu_to_node(i
));
6050 sg_span
= sched_group_cpus(sg
);
6052 child
= *per_cpu_ptr(sdd
->sd
, i
);
6054 child
= child
->child
;
6055 cpumask_copy(sg_span
, sched_domain_span(child
));
6057 cpumask_set_cpu(i
, sg_span
);
6059 cpumask_or(covered
, covered
, sg_span
);
6061 sg
->sgp
= *per_cpu_ptr(sdd
->sgp
, cpumask_first(sg_span
));
6062 atomic_inc(&sg
->sgp
->ref
);
6064 if (cpumask_test_cpu(cpu
, sg_span
))
6074 sd
->groups
= groups
;
6079 free_sched_groups(first
, 0);
6084 static int get_group(int cpu
, struct sd_data
*sdd
, struct sched_group
**sg
)
6086 struct sched_domain
*sd
= *per_cpu_ptr(sdd
->sd
, cpu
);
6087 struct sched_domain
*child
= sd
->child
;
6090 cpu
= cpumask_first(sched_domain_span(child
));
6093 *sg
= *per_cpu_ptr(sdd
->sg
, cpu
);
6094 (*sg
)->sgp
= *per_cpu_ptr(sdd
->sgp
, cpu
);
6095 atomic_set(&(*sg
)->sgp
->ref
, 1); /* for claim_allocations */
6102 * build_sched_groups will build a circular linked list of the groups
6103 * covered by the given span, and will set each group's ->cpumask correctly,
6104 * and ->cpu_power to 0.
6106 * Assumes the sched_domain tree is fully constructed
6109 build_sched_groups(struct sched_domain
*sd
, int cpu
)
6111 struct sched_group
*first
= NULL
, *last
= NULL
;
6112 struct sd_data
*sdd
= sd
->private;
6113 const struct cpumask
*span
= sched_domain_span(sd
);
6114 struct cpumask
*covered
;
6117 get_group(cpu
, sdd
, &sd
->groups
);
6118 atomic_inc(&sd
->groups
->ref
);
6120 if (cpu
!= cpumask_first(sched_domain_span(sd
)))
6123 lockdep_assert_held(&sched_domains_mutex
);
6124 covered
= sched_domains_tmpmask
;
6126 cpumask_clear(covered
);
6128 for_each_cpu(i
, span
) {
6129 struct sched_group
*sg
;
6130 int group
= get_group(i
, sdd
, &sg
);
6133 if (cpumask_test_cpu(i
, covered
))
6136 cpumask_clear(sched_group_cpus(sg
));
6139 for_each_cpu(j
, span
) {
6140 if (get_group(j
, sdd
, NULL
) != group
)
6143 cpumask_set_cpu(j
, covered
);
6144 cpumask_set_cpu(j
, sched_group_cpus(sg
));
6159 * Initializers for schedule domains
6160 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6163 #ifdef CONFIG_SCHED_DEBUG
6164 # define SD_INIT_NAME(sd, type) sd->name = #type
6166 # define SD_INIT_NAME(sd, type) do { } while (0)
6169 #define SD_INIT_FUNC(type) \
6170 static noinline struct sched_domain * \
6171 sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
6173 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
6174 *sd = SD_##type##_INIT; \
6175 SD_INIT_NAME(sd, type); \
6176 sd->private = &tl->data; \
6182 SD_INIT_FUNC(ALLNODES
)
6185 #ifdef CONFIG_SCHED_SMT
6186 SD_INIT_FUNC(SIBLING
)
6188 #ifdef CONFIG_SCHED_MC
6191 #ifdef CONFIG_SCHED_BOOK
6195 static int default_relax_domain_level
= -1;
6196 int sched_domain_level_max
;
6198 static int __init
setup_relax_domain_level(char *str
)
6202 val
= simple_strtoul(str
, NULL
, 0);
6203 if (val
< sched_domain_level_max
)
6204 default_relax_domain_level
= val
;
6208 __setup("relax_domain_level=", setup_relax_domain_level
);
6210 static void set_domain_attribute(struct sched_domain
*sd
,
6211 struct sched_domain_attr
*attr
)
6215 if (!attr
|| attr
->relax_domain_level
< 0) {
6216 if (default_relax_domain_level
< 0)
6219 request
= default_relax_domain_level
;
6221 request
= attr
->relax_domain_level
;
6222 if (request
< sd
->level
) {
6223 /* turn off idle balance on this domain */
6224 sd
->flags
&= ~(SD_BALANCE_WAKE
|SD_BALANCE_NEWIDLE
);
6226 /* turn on idle balance on this domain */
6227 sd
->flags
|= (SD_BALANCE_WAKE
|SD_BALANCE_NEWIDLE
);
6231 static void __sdt_free(const struct cpumask
*cpu_map
);
6232 static int __sdt_alloc(const struct cpumask
*cpu_map
);
6234 static void __free_domain_allocs(struct s_data
*d
, enum s_alloc what
,
6235 const struct cpumask
*cpu_map
)
6239 if (!atomic_read(&d
->rd
->refcount
))
6240 free_rootdomain(&d
->rd
->rcu
); /* fall through */
6242 free_percpu(d
->sd
); /* fall through */
6244 __sdt_free(cpu_map
); /* fall through */
6250 static enum s_alloc
__visit_domain_allocation_hell(struct s_data
*d
,
6251 const struct cpumask
*cpu_map
)
6253 memset(d
, 0, sizeof(*d
));
6255 if (__sdt_alloc(cpu_map
))
6256 return sa_sd_storage
;
6257 d
->sd
= alloc_percpu(struct sched_domain
*);
6259 return sa_sd_storage
;
6260 d
->rd
= alloc_rootdomain();
6263 return sa_rootdomain
;
6267 * NULL the sd_data elements we've used to build the sched_domain and
6268 * sched_group structure so that the subsequent __free_domain_allocs()
6269 * will not free the data we're using.
6271 static void claim_allocations(int cpu
, struct sched_domain
*sd
)
6273 struct sd_data
*sdd
= sd
->private;
6275 WARN_ON_ONCE(*per_cpu_ptr(sdd
->sd
, cpu
) != sd
);
6276 *per_cpu_ptr(sdd
->sd
, cpu
) = NULL
;
6278 if (atomic_read(&(*per_cpu_ptr(sdd
->sg
, cpu
))->ref
))
6279 *per_cpu_ptr(sdd
->sg
, cpu
) = NULL
;
6281 if (atomic_read(&(*per_cpu_ptr(sdd
->sgp
, cpu
))->ref
))
6282 *per_cpu_ptr(sdd
->sgp
, cpu
) = NULL
;
6285 #ifdef CONFIG_SCHED_SMT
6286 static const struct cpumask
*cpu_smt_mask(int cpu
)
6288 return topology_thread_cpumask(cpu
);
6293 * Topology list, bottom-up.
6295 static struct sched_domain_topology_level default_topology
[] = {
6296 #ifdef CONFIG_SCHED_SMT
6297 { sd_init_SIBLING
, cpu_smt_mask
, },
6299 #ifdef CONFIG_SCHED_MC
6300 { sd_init_MC
, cpu_coregroup_mask
, },
6302 #ifdef CONFIG_SCHED_BOOK
6303 { sd_init_BOOK
, cpu_book_mask
, },
6305 { sd_init_CPU
, cpu_cpu_mask
, },
6307 { sd_init_NODE
, cpu_node_mask
, SDTL_OVERLAP
, },
6308 { sd_init_ALLNODES
, cpu_allnodes_mask
, },
6313 static struct sched_domain_topology_level
*sched_domain_topology
= default_topology
;
6315 static int __sdt_alloc(const struct cpumask
*cpu_map
)
6317 struct sched_domain_topology_level
*tl
;
6320 for (tl
= sched_domain_topology
; tl
->init
; tl
++) {
6321 struct sd_data
*sdd
= &tl
->data
;
6323 sdd
->sd
= alloc_percpu(struct sched_domain
*);
6327 sdd
->sg
= alloc_percpu(struct sched_group
*);
6331 sdd
->sgp
= alloc_percpu(struct sched_group_power
*);
6335 for_each_cpu(j
, cpu_map
) {
6336 struct sched_domain
*sd
;
6337 struct sched_group
*sg
;
6338 struct sched_group_power
*sgp
;
6340 sd
= kzalloc_node(sizeof(struct sched_domain
) + cpumask_size(),
6341 GFP_KERNEL
, cpu_to_node(j
));
6345 *per_cpu_ptr(sdd
->sd
, j
) = sd
;
6347 sg
= kzalloc_node(sizeof(struct sched_group
) + cpumask_size(),
6348 GFP_KERNEL
, cpu_to_node(j
));
6352 *per_cpu_ptr(sdd
->sg
, j
) = sg
;
6354 sgp
= kzalloc_node(sizeof(struct sched_group_power
),
6355 GFP_KERNEL
, cpu_to_node(j
));
6359 *per_cpu_ptr(sdd
->sgp
, j
) = sgp
;
6366 static void __sdt_free(const struct cpumask
*cpu_map
)
6368 struct sched_domain_topology_level
*tl
;
6371 for (tl
= sched_domain_topology
; tl
->init
; tl
++) {
6372 struct sd_data
*sdd
= &tl
->data
;
6374 for_each_cpu(j
, cpu_map
) {
6375 struct sched_domain
*sd
= *per_cpu_ptr(sdd
->sd
, j
);
6376 if (sd
&& (sd
->flags
& SD_OVERLAP
))
6377 free_sched_groups(sd
->groups
, 0);
6378 kfree(*per_cpu_ptr(sdd
->sd
, j
));
6379 kfree(*per_cpu_ptr(sdd
->sg
, j
));
6380 kfree(*per_cpu_ptr(sdd
->sgp
, j
));
6382 free_percpu(sdd
->sd
);
6383 free_percpu(sdd
->sg
);
6384 free_percpu(sdd
->sgp
);
6388 struct sched_domain
*build_sched_domain(struct sched_domain_topology_level
*tl
,
6389 struct s_data
*d
, const struct cpumask
*cpu_map
,
6390 struct sched_domain_attr
*attr
, struct sched_domain
*child
,
6393 struct sched_domain
*sd
= tl
->init(tl
, cpu
);
6397 set_domain_attribute(sd
, attr
);
6398 cpumask_and(sched_domain_span(sd
), cpu_map
, tl
->mask(cpu
));
6400 sd
->level
= child
->level
+ 1;
6401 sched_domain_level_max
= max(sched_domain_level_max
, sd
->level
);
6410 * Build sched domains for a given set of cpus and attach the sched domains
6411 * to the individual cpus
6413 static int build_sched_domains(const struct cpumask
*cpu_map
,
6414 struct sched_domain_attr
*attr
)
6416 enum s_alloc alloc_state
= sa_none
;
6417 struct sched_domain
*sd
;
6419 int i
, ret
= -ENOMEM
;
6421 alloc_state
= __visit_domain_allocation_hell(&d
, cpu_map
);
6422 if (alloc_state
!= sa_rootdomain
)
6425 /* Set up domains for cpus specified by the cpu_map. */
6426 for_each_cpu(i
, cpu_map
) {
6427 struct sched_domain_topology_level
*tl
;
6430 for (tl
= sched_domain_topology
; tl
->init
; tl
++) {
6431 sd
= build_sched_domain(tl
, &d
, cpu_map
, attr
, sd
, i
);
6432 if (tl
->flags
& SDTL_OVERLAP
)
6433 sd
->flags
|= SD_OVERLAP
;
6434 if (cpumask_equal(cpu_map
, sched_domain_span(sd
)))
6441 *per_cpu_ptr(d
.sd
, i
) = sd
;
6444 /* Build the groups for the domains */
6445 for_each_cpu(i
, cpu_map
) {
6446 for (sd
= *per_cpu_ptr(d
.sd
, i
); sd
; sd
= sd
->parent
) {
6447 sd
->span_weight
= cpumask_weight(sched_domain_span(sd
));
6448 if (sd
->flags
& SD_OVERLAP
) {
6449 if (build_overlap_sched_groups(sd
, i
))
6452 if (build_sched_groups(sd
, i
))
6458 /* Calculate CPU power for physical packages and nodes */
6459 for (i
= nr_cpumask_bits
-1; i
>= 0; i
--) {
6460 if (!cpumask_test_cpu(i
, cpu_map
))
6463 for (sd
= *per_cpu_ptr(d
.sd
, i
); sd
; sd
= sd
->parent
) {
6464 claim_allocations(i
, sd
);
6468 /* Attach the domains */
6470 for_each_cpu(i
, cpu_map
) {
6471 sd
= *per_cpu_ptr(d
.sd
, i
);
6472 cpu_attach_domain(sd
, d
.rd
, i
);
6478 __free_domain_allocs(&d
, alloc_state
, cpu_map
);
6482 static cpumask_var_t
*doms_cur
; /* current sched domains */
6483 static int ndoms_cur
; /* number of sched domains in 'doms_cur' */
6484 static struct sched_domain_attr
*dattr_cur
;
6485 /* attribues of custom domains in 'doms_cur' */
6488 * Special case: If a kmalloc of a doms_cur partition (array of
6489 * cpumask) fails, then fallback to a single sched domain,
6490 * as determined by the single cpumask fallback_doms.
6492 static cpumask_var_t fallback_doms
;
6495 * arch_update_cpu_topology lets virtualized architectures update the
6496 * cpu core maps. It is supposed to return 1 if the topology changed
6497 * or 0 if it stayed the same.
6499 int __attribute__((weak
)) arch_update_cpu_topology(void)
6504 cpumask_var_t
*alloc_sched_domains(unsigned int ndoms
)
6507 cpumask_var_t
*doms
;
6509 doms
= kmalloc(sizeof(*doms
) * ndoms
, GFP_KERNEL
);
6512 for (i
= 0; i
< ndoms
; i
++) {
6513 if (!alloc_cpumask_var(&doms
[i
], GFP_KERNEL
)) {
6514 free_sched_domains(doms
, i
);
6521 void free_sched_domains(cpumask_var_t doms
[], unsigned int ndoms
)
6524 for (i
= 0; i
< ndoms
; i
++)
6525 free_cpumask_var(doms
[i
]);
6530 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6531 * For now this just excludes isolated cpus, but could be used to
6532 * exclude other special cases in the future.
6534 static int init_sched_domains(const struct cpumask
*cpu_map
)
6538 arch_update_cpu_topology();
6540 doms_cur
= alloc_sched_domains(ndoms_cur
);
6542 doms_cur
= &fallback_doms
;
6543 cpumask_andnot(doms_cur
[0], cpu_map
, cpu_isolated_map
);
6545 err
= build_sched_domains(doms_cur
[0], NULL
);
6546 register_sched_domain_sysctl();
6552 * Detach sched domains from a group of cpus specified in cpu_map
6553 * These cpus will now be attached to the NULL domain
6555 static void detach_destroy_domains(const struct cpumask
*cpu_map
)
6560 for_each_cpu(i
, cpu_map
)
6561 cpu_attach_domain(NULL
, &def_root_domain
, i
);
6565 /* handle null as "default" */
6566 static int dattrs_equal(struct sched_domain_attr
*cur
, int idx_cur
,
6567 struct sched_domain_attr
*new, int idx_new
)
6569 struct sched_domain_attr tmp
;
6576 return !memcmp(cur
? (cur
+ idx_cur
) : &tmp
,
6577 new ? (new + idx_new
) : &tmp
,
6578 sizeof(struct sched_domain_attr
));
6582 * Partition sched domains as specified by the 'ndoms_new'
6583 * cpumasks in the array doms_new[] of cpumasks. This compares
6584 * doms_new[] to the current sched domain partitioning, doms_cur[].
6585 * It destroys each deleted domain and builds each new domain.
6587 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
6588 * The masks don't intersect (don't overlap.) We should setup one
6589 * sched domain for each mask. CPUs not in any of the cpumasks will
6590 * not be load balanced. If the same cpumask appears both in the
6591 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6594 * The passed in 'doms_new' should be allocated using
6595 * alloc_sched_domains. This routine takes ownership of it and will
6596 * free_sched_domains it when done with it. If the caller failed the
6597 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6598 * and partition_sched_domains() will fallback to the single partition
6599 * 'fallback_doms', it also forces the domains to be rebuilt.
6601 * If doms_new == NULL it will be replaced with cpu_online_mask.
6602 * ndoms_new == 0 is a special case for destroying existing domains,
6603 * and it will not create the default domain.
6605 * Call with hotplug lock held
6607 void partition_sched_domains(int ndoms_new
, cpumask_var_t doms_new
[],
6608 struct sched_domain_attr
*dattr_new
)
6613 mutex_lock(&sched_domains_mutex
);
6615 /* always unregister in case we don't destroy any domains */
6616 unregister_sched_domain_sysctl();
6618 /* Let architecture update cpu core mappings. */
6619 new_topology
= arch_update_cpu_topology();
6621 n
= doms_new
? ndoms_new
: 0;
6623 /* Destroy deleted domains */
6624 for (i
= 0; i
< ndoms_cur
; i
++) {
6625 for (j
= 0; j
< n
&& !new_topology
; j
++) {
6626 if (cpumask_equal(doms_cur
[i
], doms_new
[j
])
6627 && dattrs_equal(dattr_cur
, i
, dattr_new
, j
))
6630 /* no match - a current sched domain not in new doms_new[] */
6631 detach_destroy_domains(doms_cur
[i
]);
6636 if (doms_new
== NULL
) {
6638 doms_new
= &fallback_doms
;
6639 cpumask_andnot(doms_new
[0], cpu_active_mask
, cpu_isolated_map
);
6640 WARN_ON_ONCE(dattr_new
);
6643 /* Build new domains */
6644 for (i
= 0; i
< ndoms_new
; i
++) {
6645 for (j
= 0; j
< ndoms_cur
&& !new_topology
; j
++) {
6646 if (cpumask_equal(doms_new
[i
], doms_cur
[j
])
6647 && dattrs_equal(dattr_new
, i
, dattr_cur
, j
))
6650 /* no match - add a new doms_new */
6651 build_sched_domains(doms_new
[i
], dattr_new
? dattr_new
+ i
: NULL
);
6656 /* Remember the new sched domains */
6657 if (doms_cur
!= &fallback_doms
)
6658 free_sched_domains(doms_cur
, ndoms_cur
);
6659 kfree(dattr_cur
); /* kfree(NULL) is safe */
6660 doms_cur
= doms_new
;
6661 dattr_cur
= dattr_new
;
6662 ndoms_cur
= ndoms_new
;
6664 register_sched_domain_sysctl();
6666 mutex_unlock(&sched_domains_mutex
);
6669 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
6670 static void reinit_sched_domains(void)
6674 /* Destroy domains first to force the rebuild */
6675 partition_sched_domains(0, NULL
, NULL
);
6677 rebuild_sched_domains();
6681 static ssize_t
sched_power_savings_store(const char *buf
, size_t count
, int smt
)
6683 unsigned int level
= 0;
6685 if (sscanf(buf
, "%u", &level
) != 1)
6689 * level is always be positive so don't check for
6690 * level < POWERSAVINGS_BALANCE_NONE which is 0
6691 * What happens on 0 or 1 byte write,
6692 * need to check for count as well?
6695 if (level
>= MAX_POWERSAVINGS_BALANCE_LEVELS
)
6699 sched_smt_power_savings
= level
;
6701 sched_mc_power_savings
= level
;
6703 reinit_sched_domains();
6708 #ifdef CONFIG_SCHED_MC
6709 static ssize_t
sched_mc_power_savings_show(struct device
*dev
,
6710 struct device_attribute
*attr
,
6713 return sprintf(buf
, "%u\n", sched_mc_power_savings
);
6715 static ssize_t
sched_mc_power_savings_store(struct device
*dev
,
6716 struct device_attribute
*attr
,
6717 const char *buf
, size_t count
)
6719 return sched_power_savings_store(buf
, count
, 0);
6721 static DEVICE_ATTR(sched_mc_power_savings
, 0644,
6722 sched_mc_power_savings_show
,
6723 sched_mc_power_savings_store
);
6726 #ifdef CONFIG_SCHED_SMT
6727 static ssize_t
sched_smt_power_savings_show(struct device
*dev
,
6728 struct device_attribute
*attr
,
6731 return sprintf(buf
, "%u\n", sched_smt_power_savings
);
6733 static ssize_t
sched_smt_power_savings_store(struct device
*dev
,
6734 struct device_attribute
*attr
,
6735 const char *buf
, size_t count
)
6737 return sched_power_savings_store(buf
, count
, 1);
6739 static DEVICE_ATTR(sched_smt_power_savings
, 0644,
6740 sched_smt_power_savings_show
,
6741 sched_smt_power_savings_store
);
6744 int __init
sched_create_sysfs_power_savings_entries(struct device
*dev
)
6748 #ifdef CONFIG_SCHED_SMT
6750 err
= device_create_file(dev
, &dev_attr_sched_smt_power_savings
);
6752 #ifdef CONFIG_SCHED_MC
6753 if (!err
&& mc_capable())
6754 err
= device_create_file(dev
, &dev_attr_sched_mc_power_savings
);
6758 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
6761 * Update cpusets according to cpu_active mask. If cpusets are
6762 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6763 * around partition_sched_domains().
6765 static int cpuset_cpu_active(struct notifier_block
*nfb
, unsigned long action
,
6768 switch (action
& ~CPU_TASKS_FROZEN
) {
6770 case CPU_DOWN_FAILED
:
6771 cpuset_update_active_cpus();
6778 static int cpuset_cpu_inactive(struct notifier_block
*nfb
, unsigned long action
,
6781 switch (action
& ~CPU_TASKS_FROZEN
) {
6782 case CPU_DOWN_PREPARE
:
6783 cpuset_update_active_cpus();
6790 #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
6792 * Cheaper version of the below functions in case support for SMT and MC is
6793 * compiled in but CPUs have no siblings.
6795 static bool sole_cpu_idle(int cpu
)
6797 return rq_idle(cpu_rq(cpu
));
6800 #ifdef CONFIG_SCHED_SMT
6801 /* All this CPU's SMT siblings are idle */
6802 static bool siblings_cpu_idle(int cpu
)
6804 return cpumask_subset(&(cpu_rq(cpu
)->smt_siblings
),
6808 #ifdef CONFIG_SCHED_MC
6809 /* All this CPU's shared cache siblings are idle */
6810 static bool cache_cpu_idle(int cpu
)
6812 return cpumask_subset(&(cpu_rq(cpu
)->cache_siblings
),
6817 enum sched_domain_level
{
6828 void __init
sched_init_smp(void)
6830 struct sched_domain
*sd
;
6833 cpumask_var_t non_isolated_cpus
;
6835 alloc_cpumask_var(&non_isolated_cpus
, GFP_KERNEL
);
6836 alloc_cpumask_var(&fallback_doms
, GFP_KERNEL
);
6839 mutex_lock(&sched_domains_mutex
);
6840 init_sched_domains(cpu_active_mask
);
6841 cpumask_andnot(non_isolated_cpus
, cpu_possible_mask
, cpu_isolated_map
);
6842 if (cpumask_empty(non_isolated_cpus
))
6843 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus
);
6844 mutex_unlock(&sched_domains_mutex
);
6847 hotcpu_notifier(cpuset_cpu_active
, CPU_PRI_CPUSET_ACTIVE
);
6848 hotcpu_notifier(cpuset_cpu_inactive
, CPU_PRI_CPUSET_INACTIVE
);
6850 /* Move init over to a non-isolated CPU */
6851 if (set_cpus_allowed_ptr(current
, non_isolated_cpus
) < 0)
6853 free_cpumask_var(non_isolated_cpus
);
6857 * Set up the relative cache distance of each online cpu from each
6858 * other in a simple array for quick lookup. Locality is determined
6859 * by the closest sched_domain that CPUs are separated by. CPUs with
6860 * shared cache in SMT and MC are treated as local. Separate CPUs
6861 * (within the same package or physically) within the same node are
6862 * treated as not local. CPUs not even in the same domain (different
6863 * nodes) are treated as very distant.
6865 for_each_online_cpu(cpu
) {
6866 struct rq
*rq
= cpu_rq(cpu
);
6867 for_each_domain(cpu
, sd
) {
6868 int locality
, other_cpu
;
6870 #ifdef CONFIG_SCHED_SMT
6871 if (sd
->level
== SD_LV_SIBLING
) {
6872 for_each_cpu_mask(other_cpu
, *sched_domain_span(sd
))
6873 cpumask_set_cpu(other_cpu
, &rq
->smt_siblings
);
6876 #ifdef CONFIG_SCHED_MC
6877 if (sd
->level
== SD_LV_MC
) {
6878 for_each_cpu_mask(other_cpu
, *sched_domain_span(sd
))
6879 cpumask_set_cpu(other_cpu
, &rq
->cache_siblings
);
6882 if (sd
->level
<= SD_LV_SIBLING
)
6884 else if (sd
->level
<= SD_LV_MC
)
6886 else if (sd
->level
<= SD_LV_NODE
)
6891 for_each_cpu_mask(other_cpu
, *sched_domain_span(sd
)) {
6892 if (locality
< rq
->cpu_locality
[other_cpu
])
6893 rq
->cpu_locality
[other_cpu
] = locality
;
6898 * Each runqueue has its own function in case it doesn't have
6899 * siblings of its own allowing mixed topologies.
6901 #ifdef CONFIG_SCHED_SMT
6902 if (cpus_weight(rq
->smt_siblings
) > 1)
6903 rq
->siblings_idle
= siblings_cpu_idle
;
6905 #ifdef CONFIG_SCHED_MC
6906 if (cpus_weight(rq
->cache_siblings
) > 1)
6907 rq
->cache_idle
= cache_cpu_idle
;
6913 void __init
sched_init_smp(void)
6916 #endif /* CONFIG_SMP */
6918 unsigned int sysctl_timer_migration
= 1;
6920 int in_sched_functions(unsigned long addr
)
6922 return in_lock_functions(addr
) ||
6923 (addr
>= (unsigned long)__sched_text_start
6924 && addr
< (unsigned long)__sched_text_end
);
6927 void __init
sched_init(void)
6932 prio_ratios
[0] = 128;
6933 for (i
= 1 ; i
< PRIO_RANGE
; i
++)
6934 prio_ratios
[i
] = prio_ratios
[i
- 1] * 11 / 10;
6936 raw_spin_lock_init(&grq
.lock
);
6937 grq
.nr_running
= grq
.nr_uninterruptible
= grq
.nr_switches
= 0;
6939 grq
.last_jiffy
= jiffies
;
6940 raw_spin_lock_init(&grq
.iso_lock
);
6942 grq
.iso_refractory
= false;
6945 init_defrootdomain();
6946 grq
.qnr
= grq
.idle_cpus
= 0;
6947 cpumask_clear(&grq
.cpu_idle_map
);
6949 uprq
= &per_cpu(runqueues
, 0);
6951 for_each_possible_cpu(i
) {
6953 rq
->user_pc
= rq
->nice_pc
= rq
->softirq_pc
= rq
->system_pc
=
6954 rq
->iowait_pc
= rq
->idle_pc
= 0;
6957 rq
->sticky_task
= NULL
;
6963 rq_attach_root(rq
, &def_root_domain
);
6965 atomic_set(&rq
->nr_iowait
, 0);
6971 * Set the base locality for cpu cache distance calculation to
6972 * "distant" (3). Make sure the distance from a CPU to itself is 0.
6974 for_each_possible_cpu(i
) {
6978 #ifdef CONFIG_SCHED_SMT
6979 cpumask_clear(&rq
->smt_siblings
);
6980 cpumask_set_cpu(i
, &rq
->smt_siblings
);
6981 rq
->siblings_idle
= sole_cpu_idle
;
6982 cpumask_set_cpu(i
, &rq
->smt_siblings
);
6984 #ifdef CONFIG_SCHED_MC
6985 cpumask_clear(&rq
->cache_siblings
);
6986 cpumask_set_cpu(i
, &rq
->cache_siblings
);
6987 rq
->cache_idle
= sole_cpu_idle
;
6988 cpumask_set_cpu(i
, &rq
->cache_siblings
);
6990 rq
->cpu_locality
= kmalloc(nr_cpu_ids
* sizeof(int *), GFP_ATOMIC
);
6991 for_each_possible_cpu(j
) {
6993 rq
->cpu_locality
[j
] = 0;
6995 rq
->cpu_locality
[j
] = 4;
7000 for (i
= 0; i
< PRIO_LIMIT
; i
++)
7001 INIT_LIST_HEAD(grq
.queue
+ i
);
7002 /* delimiter for bitsearch */
7003 __set_bit(PRIO_LIMIT
, grq
.prio_bitmap
);
7005 #ifdef CONFIG_PREEMPT_NOTIFIERS
7006 INIT_HLIST_HEAD(&init_task
.preempt_notifiers
);
7009 #ifdef CONFIG_RT_MUTEXES
7010 plist_head_init(&init_task
.pi_waiters
);
7014 * The boot idle thread does lazy MMU switching as well:
7016 atomic_inc(&init_mm
.mm_count
);
7017 enter_lazy_tlb(&init_mm
, current
);
7020 * Make us the idle thread. Technically, schedule() should not be
7021 * called from this thread, however somewhere below it might be,
7022 * but because we are the idle thread, we just pick up running again
7023 * when this runqueue becomes "idle".
7025 init_idle(current
, smp_processor_id());
7028 zalloc_cpumask_var(&sched_domains_tmpmask
, GFP_NOWAIT
);
7029 /* May be allocated at isolcpus cmdline parse time */
7030 if (cpu_isolated_map
== NULL
)
7031 zalloc_cpumask_var(&cpu_isolated_map
, GFP_NOWAIT
);
7035 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
7036 static inline int preempt_count_equals(int preempt_offset
)
7038 int nested
= (preempt_count() & ~PREEMPT_ACTIVE
) + rcu_preempt_depth();
7040 return (nested
== preempt_offset
);
7043 void __might_sleep(const char *file
, int line
, int preempt_offset
)
7045 static unsigned long prev_jiffy
; /* ratelimiting */
7047 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
7048 if ((preempt_count_equals(preempt_offset
) && !irqs_disabled()) ||
7049 system_state
!= SYSTEM_RUNNING
|| oops_in_progress
)
7051 if (time_before(jiffies
, prev_jiffy
+ HZ
) && prev_jiffy
)
7053 prev_jiffy
= jiffies
;
7056 "BUG: sleeping function called from invalid context at %s:%d\n",
7059 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7060 in_atomic(), irqs_disabled(),
7061 current
->pid
, current
->comm
);
7063 debug_show_held_locks(current
);
7064 if (irqs_disabled())
7065 print_irqtrace_events(current
);
7068 EXPORT_SYMBOL(__might_sleep
);
7071 #ifdef CONFIG_MAGIC_SYSRQ
7072 void normalize_rt_tasks(void)
7074 struct task_struct
*g
, *p
;
7075 unsigned long flags
;
7079 read_lock_irq(&tasklist_lock
);
7081 do_each_thread(g
, p
) {
7082 if (!rt_task(p
) && !iso_task(p
))
7085 raw_spin_lock_irqsave(&p
->pi_lock
, flags
);
7086 rq
= __task_grq_lock(p
);
7088 queued
= task_queued(p
);
7091 __setscheduler(p
, rq
, SCHED_NORMAL
, 0);
7097 __task_grq_unlock();
7098 raw_spin_unlock_irqrestore(&p
->pi_lock
, flags
);
7099 } while_each_thread(g
, p
);
7101 read_unlock_irq(&tasklist_lock
);
7103 #endif /* CONFIG_MAGIC_SYSRQ */
7105 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7107 * These functions are only useful for the IA64 MCA handling, or kdb.
7109 * They can only be called when the whole system has been
7110 * stopped - every CPU needs to be quiescent, and no scheduling
7111 * activity can take place. Using them for anything else would
7112 * be a serious bug, and as a result, they aren't even visible
7113 * under any other configuration.
7117 * curr_task - return the current task for a given cpu.
7118 * @cpu: the processor in question.
7120 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7122 struct task_struct
*curr_task(int cpu
)
7124 return cpu_curr(cpu
);
7127 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7131 * set_curr_task - set the current task for a given cpu.
7132 * @cpu: the processor in question.
7133 * @p: the task pointer to set.
7135 * Description: This function must only be used when non-maskable interrupts
7136 * are serviced on a separate stack. It allows the architecture to switch the
7137 * notion of the current task on a cpu in a non-blocking manner. This function
7138 * must be called with all CPU's synchronised, and interrupts disabled, the
7139 * and caller must save the original value of the current task (see
7140 * curr_task() above) and restore that value before reenabling interrupts and
7141 * re-starting the system.
7143 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7145 void set_curr_task(int cpu
, struct task_struct
*p
)
7153 * Use precise platform statistics if available:
7155 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
7156 void task_times(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
7162 void thread_group_times(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
7164 struct task_cputime cputime
;
7166 thread_group_cputime(p
, &cputime
);
7168 *ut
= cputime
.utime
;
7169 *st
= cputime
.stime
;
7173 void task_times(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
7175 cputime_t rtime
, utime
= p
->utime
, total
= utime
+ p
->stime
;
7177 rtime
= nsecs_to_cputime(p
->sched_time
);
7182 temp
= (u64
)(rtime
* utime
);
7183 do_div(temp
, total
);
7184 utime
= (cputime_t
)temp
;
7189 * Compare with previous values, to keep monotonicity:
7191 p
->prev_utime
= max(p
->prev_utime
, utime
);
7192 p
->prev_stime
= max(p
->prev_stime
, (rtime
- p
->prev_utime
));
7194 *ut
= p
->prev_utime
;
7195 *st
= p
->prev_stime
;
7199 * Must be called with siglock held.
7201 void thread_group_times(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
7203 struct signal_struct
*sig
= p
->signal
;
7204 struct task_cputime cputime
;
7205 cputime_t rtime
, utime
, total
;
7207 thread_group_cputime(p
, &cputime
);
7209 total
= cputime
.utime
+ cputime
.stime
;
7210 rtime
= nsecs_to_cputime(cputime
.sum_exec_runtime
);
7215 temp
= (u64
)(rtime
* cputime
.utime
);
7216 do_div(temp
, total
);
7217 utime
= (cputime_t
)temp
;
7221 sig
->prev_utime
= max(sig
->prev_utime
, utime
);
7222 sig
->prev_stime
= max(sig
->prev_stime
, (rtime
- sig
->prev_utime
));
7224 *ut
= sig
->prev_utime
;
7225 *st
= sig
->prev_stime
;
7229 inline cputime_t
task_gtime(struct task_struct
*p
)
7234 void __cpuinit
init_idle_bootup_task(struct task_struct
*idle
)
7237 #ifdef CONFIG_SCHED_DEBUG
7238 void proc_sched_show_task(struct task_struct
*p
, struct seq_file
*m
)
7241 void proc_sched_set_task(struct task_struct
*p
)
7246 unsigned long default_scale_freq_power(struct sched_domain
*sd
, int cpu
)
7248 return SCHED_LOAD_SCALE
;
7251 unsigned long default_scale_smt_power(struct sched_domain
*sd
, int cpu
)
7253 unsigned long weight
= cpumask_weight(sched_domain_span(sd
));
7254 unsigned long smt_gain
= sd
->smt_gain
;