2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
30 #include <trace/events/sched.h>
34 #include <linux/zentune.h>
37 * Targeted preemption latency for CPU-bound tasks:
38 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
40 * NOTE: this latency value is not the same as the concept of
41 * 'timeslice length' - timeslices in CFS are of variable length
42 * and have no persistent notion like in traditional, time-slice
43 * based scheduling concepts.
45 * (to see the precise effective timeslice length of your workload,
46 * run vmstat and monitor the context-switches (cs) field)
48 #if defined(CONFIG_ZEN_DEFAULT)
49 unsigned int sysctl_sched_latency
= 6000000ULL;
50 unsigned int normalized_sysctl_sched_latency
= 6000000ULL;
51 #elif defined(CONFIG_ZEN_CUSTOM)
52 unsigned int sysctl_sched_latency
= sysctl_sched_latency_custom
;
53 unsigned int normalized_sysctl_sched_latency
= normalized_sysctl_sched_latency_custom
;
57 * The initial- and re-scaling of tunables is configurable
58 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
61 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
62 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
63 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
65 enum sched_tunable_scaling sysctl_sched_tunable_scaling
66 = SCHED_TUNABLESCALING_LOG
;
69 * Minimal preemption granularity for CPU-bound tasks:
70 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
72 #if defined(CONFIG_ZEN_DEFAULT)
73 unsigned int sysctl_sched_min_granularity
= 750000ULL;
74 unsigned int normalized_sysctl_sched_min_granularity
= 750000ULL;
75 #elif defined(CONFIG_ZEN_CUSTOM)
76 unsigned int sysctl_sched_min_granularity
= sysctl_sched_min_granularity_custom
;
77 unsigned int normalized_sysctl_sched_min_granularity
= normalized_sysctl_sched_min_granularity_custom
;
81 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
84 #if defined(CONFIG_ZEN_DEFAULT)
85 static unsigned int sched_nr_latency
= 8;
86 #elif defined(CONFIG_ZEN_CUSTOM)
87 static unsigned int sched_nr_latency
= sched_nr_latency_custom
;
91 * After fork, child runs first. If set to 0 (default) then
92 * parent will (try to) run first.
94 unsigned int sysctl_sched_child_runs_first __read_mostly
;
97 * SCHED_OTHER wake-up granularity.
98 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
100 * This option delays the preemption effects of decoupled workloads
101 * and reduces their over-scheduling. Synchronous workloads will still
102 * have immediate wakeup/sleep latencies.
104 unsigned int sysctl_sched_wakeup_granularity
= 1000000UL;
105 unsigned int normalized_sysctl_sched_wakeup_granularity
= 1000000UL;
107 const_debug
unsigned int sysctl_sched_migration_cost
= 500000UL;
110 * The exponential sliding window over which load is averaged for shares
114 unsigned int __read_mostly sysctl_sched_shares_window
= 10000000UL;
116 #ifdef CONFIG_CFS_BANDWIDTH
118 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
119 * each time a cfs_rq requests quota.
121 * Note: in the case that the slice exceeds the runtime remaining (either due
122 * to consumption or the quota being specified to be smaller than the slice)
123 * we will always only issue the remaining available time.
125 * default: 5 msec, units: microseconds
127 unsigned int sysctl_sched_cfs_bandwidth_slice
= 5000UL;
131 * Increase the granularity value when there are more CPUs,
132 * because with more CPUs the 'effective latency' as visible
133 * to users decreases. But the relationship is not linear,
134 * so pick a second-best guess by going with the log2 of the
137 * This idea comes from the SD scheduler of Con Kolivas:
139 static int get_update_sysctl_factor(void)
141 unsigned int cpus
= min_t(int, num_online_cpus(), 8);
144 switch (sysctl_sched_tunable_scaling
) {
145 case SCHED_TUNABLESCALING_NONE
:
148 case SCHED_TUNABLESCALING_LINEAR
:
151 case SCHED_TUNABLESCALING_LOG
:
153 factor
= 1 + ilog2(cpus
);
160 static void update_sysctl(void)
162 unsigned int factor
= get_update_sysctl_factor();
164 #define SET_SYSCTL(name) \
165 (sysctl_##name = (factor) * normalized_sysctl_##name)
166 SET_SYSCTL(sched_min_granularity
);
167 SET_SYSCTL(sched_latency
);
168 SET_SYSCTL(sched_wakeup_granularity
);
172 void sched_init_granularity(void)
177 #if BITS_PER_LONG == 32
178 # define WMULT_CONST (~0UL)
180 # define WMULT_CONST (1UL << 32)
183 #define WMULT_SHIFT 32
186 * Shift right and round:
188 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
191 * delta *= weight / lw
194 calc_delta_mine(unsigned long delta_exec
, unsigned long weight
,
195 struct load_weight
*lw
)
200 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
201 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
202 * 2^SCHED_LOAD_RESOLUTION.
204 if (likely(weight
> (1UL << SCHED_LOAD_RESOLUTION
)))
205 tmp
= (u64
)delta_exec
* scale_load_down(weight
);
207 tmp
= (u64
)delta_exec
;
209 if (!lw
->inv_weight
) {
210 unsigned long w
= scale_load_down(lw
->weight
);
212 if (BITS_PER_LONG
> 32 && unlikely(w
>= WMULT_CONST
))
214 else if (unlikely(!w
))
215 lw
->inv_weight
= WMULT_CONST
;
217 lw
->inv_weight
= WMULT_CONST
/ w
;
221 * Check whether we'd overflow the 64-bit multiplication:
223 if (unlikely(tmp
> WMULT_CONST
))
224 tmp
= SRR(SRR(tmp
, WMULT_SHIFT
/2) * lw
->inv_weight
,
227 tmp
= SRR(tmp
* lw
->inv_weight
, WMULT_SHIFT
);
229 return (unsigned long)min(tmp
, (u64
)(unsigned long)LONG_MAX
);
233 const struct sched_class fair_sched_class
;
235 /**************************************************************
236 * CFS operations on generic schedulable entities:
239 #ifdef CONFIG_FAIR_GROUP_SCHED
241 /* cpu runqueue to which this cfs_rq is attached */
242 static inline struct rq
*rq_of(struct cfs_rq
*cfs_rq
)
247 /* An entity is a task if it doesn't "own" a runqueue */
248 #define entity_is_task(se) (!se->my_q)
250 static inline struct task_struct
*task_of(struct sched_entity
*se
)
252 #ifdef CONFIG_SCHED_DEBUG
253 WARN_ON_ONCE(!entity_is_task(se
));
255 return container_of(se
, struct task_struct
, se
);
258 /* Walk up scheduling entities hierarchy */
259 #define for_each_sched_entity(se) \
260 for (; se; se = se->parent)
262 static inline struct cfs_rq
*task_cfs_rq(struct task_struct
*p
)
267 /* runqueue on which this entity is (to be) queued */
268 static inline struct cfs_rq
*cfs_rq_of(struct sched_entity
*se
)
273 /* runqueue "owned" by this group */
274 static inline struct cfs_rq
*group_cfs_rq(struct sched_entity
*grp
)
279 static inline void list_add_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
281 if (!cfs_rq
->on_list
) {
283 * Ensure we either appear before our parent (if already
284 * enqueued) or force our parent to appear after us when it is
285 * enqueued. The fact that we always enqueue bottom-up
286 * reduces this to two cases.
288 if (cfs_rq
->tg
->parent
&&
289 cfs_rq
->tg
->parent
->cfs_rq
[cpu_of(rq_of(cfs_rq
))]->on_list
) {
290 list_add_rcu(&cfs_rq
->leaf_cfs_rq_list
,
291 &rq_of(cfs_rq
)->leaf_cfs_rq_list
);
293 list_add_tail_rcu(&cfs_rq
->leaf_cfs_rq_list
,
294 &rq_of(cfs_rq
)->leaf_cfs_rq_list
);
301 static inline void list_del_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
303 if (cfs_rq
->on_list
) {
304 list_del_rcu(&cfs_rq
->leaf_cfs_rq_list
);
309 /* Iterate thr' all leaf cfs_rq's on a runqueue */
310 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
311 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
313 /* Do the two (enqueued) entities belong to the same group ? */
315 is_same_group(struct sched_entity
*se
, struct sched_entity
*pse
)
317 if (se
->cfs_rq
== pse
->cfs_rq
)
323 static inline struct sched_entity
*parent_entity(struct sched_entity
*se
)
328 /* return depth at which a sched entity is present in the hierarchy */
329 static inline int depth_se(struct sched_entity
*se
)
333 for_each_sched_entity(se
)
340 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
342 int se_depth
, pse_depth
;
345 * preemption test can be made between sibling entities who are in the
346 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
347 * both tasks until we find their ancestors who are siblings of common
351 /* First walk up until both entities are at same depth */
352 se_depth
= depth_se(*se
);
353 pse_depth
= depth_se(*pse
);
355 while (se_depth
> pse_depth
) {
357 *se
= parent_entity(*se
);
360 while (pse_depth
> se_depth
) {
362 *pse
= parent_entity(*pse
);
365 while (!is_same_group(*se
, *pse
)) {
366 *se
= parent_entity(*se
);
367 *pse
= parent_entity(*pse
);
371 #else /* !CONFIG_FAIR_GROUP_SCHED */
373 static inline struct task_struct
*task_of(struct sched_entity
*se
)
375 return container_of(se
, struct task_struct
, se
);
378 static inline struct rq
*rq_of(struct cfs_rq
*cfs_rq
)
380 return container_of(cfs_rq
, struct rq
, cfs
);
383 #define entity_is_task(se) 1
385 #define for_each_sched_entity(se) \
386 for (; se; se = NULL)
388 static inline struct cfs_rq
*task_cfs_rq(struct task_struct
*p
)
390 return &task_rq(p
)->cfs
;
393 static inline struct cfs_rq
*cfs_rq_of(struct sched_entity
*se
)
395 struct task_struct
*p
= task_of(se
);
396 struct rq
*rq
= task_rq(p
);
401 /* runqueue "owned" by this group */
402 static inline struct cfs_rq
*group_cfs_rq(struct sched_entity
*grp
)
407 static inline void list_add_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
411 static inline void list_del_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
415 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
416 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
419 is_same_group(struct sched_entity
*se
, struct sched_entity
*pse
)
424 static inline struct sched_entity
*parent_entity(struct sched_entity
*se
)
430 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
434 #endif /* CONFIG_FAIR_GROUP_SCHED */
436 static void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
,
437 unsigned long delta_exec
);
439 /**************************************************************
440 * Scheduling class tree data structure manipulation methods:
443 static inline u64
max_vruntime(u64 min_vruntime
, u64 vruntime
)
445 s64 delta
= (s64
)(vruntime
- min_vruntime
);
447 min_vruntime
= vruntime
;
452 static inline u64
min_vruntime(u64 min_vruntime
, u64 vruntime
)
454 s64 delta
= (s64
)(vruntime
- min_vruntime
);
456 min_vruntime
= vruntime
;
461 static inline int entity_before(struct sched_entity
*a
,
462 struct sched_entity
*b
)
464 return (s64
)(a
->vruntime
- b
->vruntime
) < 0;
467 static void update_min_vruntime(struct cfs_rq
*cfs_rq
)
469 u64 vruntime
= cfs_rq
->min_vruntime
;
472 vruntime
= cfs_rq
->curr
->vruntime
;
474 if (cfs_rq
->rb_leftmost
) {
475 struct sched_entity
*se
= rb_entry(cfs_rq
->rb_leftmost
,
480 vruntime
= se
->vruntime
;
482 vruntime
= min_vruntime(vruntime
, se
->vruntime
);
485 cfs_rq
->min_vruntime
= max_vruntime(cfs_rq
->min_vruntime
, vruntime
);
488 cfs_rq
->min_vruntime_copy
= cfs_rq
->min_vruntime
;
493 * Enqueue an entity into the rb-tree:
495 static void __enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
497 struct rb_node
**link
= &cfs_rq
->tasks_timeline
.rb_node
;
498 struct rb_node
*parent
= NULL
;
499 struct sched_entity
*entry
;
503 * Find the right place in the rbtree:
507 entry
= rb_entry(parent
, struct sched_entity
, run_node
);
509 * We dont care about collisions. Nodes with
510 * the same key stay together.
512 if (entity_before(se
, entry
)) {
513 link
= &parent
->rb_left
;
515 link
= &parent
->rb_right
;
521 * Maintain a cache of leftmost tree entries (it is frequently
525 cfs_rq
->rb_leftmost
= &se
->run_node
;
527 rb_link_node(&se
->run_node
, parent
, link
);
528 rb_insert_color(&se
->run_node
, &cfs_rq
->tasks_timeline
);
531 static void __dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
533 if (cfs_rq
->rb_leftmost
== &se
->run_node
) {
534 struct rb_node
*next_node
;
536 next_node
= rb_next(&se
->run_node
);
537 cfs_rq
->rb_leftmost
= next_node
;
540 rb_erase(&se
->run_node
, &cfs_rq
->tasks_timeline
);
543 struct sched_entity
*__pick_first_entity(struct cfs_rq
*cfs_rq
)
545 struct rb_node
*left
= cfs_rq
->rb_leftmost
;
550 return rb_entry(left
, struct sched_entity
, run_node
);
553 static struct sched_entity
*__pick_next_entity(struct sched_entity
*se
)
555 struct rb_node
*next
= rb_next(&se
->run_node
);
560 return rb_entry(next
, struct sched_entity
, run_node
);
563 #ifdef CONFIG_SCHED_DEBUG
564 struct sched_entity
*__pick_last_entity(struct cfs_rq
*cfs_rq
)
566 struct rb_node
*last
= rb_last(&cfs_rq
->tasks_timeline
);
571 return rb_entry(last
, struct sched_entity
, run_node
);
574 /**************************************************************
575 * Scheduling class statistics methods:
578 int sched_proc_update_handler(struct ctl_table
*table
, int write
,
579 void __user
*buffer
, size_t *lenp
,
582 int ret
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
583 int factor
= get_update_sysctl_factor();
588 sched_nr_latency
= DIV_ROUND_UP(sysctl_sched_latency
,
589 sysctl_sched_min_granularity
);
591 #define WRT_SYSCTL(name) \
592 (normalized_sysctl_##name = sysctl_##name / (factor))
593 WRT_SYSCTL(sched_min_granularity
);
594 WRT_SYSCTL(sched_latency
);
595 WRT_SYSCTL(sched_wakeup_granularity
);
605 static inline unsigned long
606 calc_delta_fair(unsigned long delta
, struct sched_entity
*se
)
608 if (unlikely(se
->load
.weight
!= NICE_0_LOAD
))
609 delta
= calc_delta_mine(delta
, NICE_0_LOAD
, &se
->load
);
615 * The idea is to set a period in which each task runs once.
617 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
618 * this period because otherwise the slices get too small.
620 * p = (nr <= nl) ? l : l*nr/nl
622 static u64
__sched_period(unsigned long nr_running
)
624 u64 period
= sysctl_sched_latency
;
625 unsigned long nr_latency
= sched_nr_latency
;
627 if (unlikely(nr_running
> nr_latency
)) {
628 period
= sysctl_sched_min_granularity
;
629 period
*= nr_running
;
636 * We calculate the wall-time slice from the period by taking a part
637 * proportional to the weight.
641 static u64
sched_slice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
643 u64 slice
= __sched_period(cfs_rq
->nr_running
+ !se
->on_rq
);
645 for_each_sched_entity(se
) {
646 struct load_weight
*load
;
647 struct load_weight lw
;
649 cfs_rq
= cfs_rq_of(se
);
650 load
= &cfs_rq
->load
;
652 if (unlikely(!se
->on_rq
)) {
655 update_load_add(&lw
, se
->load
.weight
);
658 slice
= calc_delta_mine(slice
, se
->load
.weight
, load
);
664 * We calculate the vruntime slice of a to be inserted task
668 static u64
sched_vslice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
670 return calc_delta_fair(sched_slice(cfs_rq
, se
), se
);
673 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
);
674 static void update_cfs_shares(struct cfs_rq
*cfs_rq
);
677 * Update the current task's runtime statistics. Skip current tasks that
678 * are not in our scheduling class.
681 __update_curr(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
,
682 unsigned long delta_exec
)
684 unsigned long delta_exec_weighted
;
686 schedstat_set(curr
->statistics
.exec_max
,
687 max((u64
)delta_exec
, curr
->statistics
.exec_max
));
689 curr
->sum_exec_runtime
+= delta_exec
;
690 schedstat_add(cfs_rq
, exec_clock
, delta_exec
);
691 delta_exec_weighted
= calc_delta_fair(delta_exec
, curr
);
693 curr
->vruntime
+= delta_exec_weighted
;
694 update_min_vruntime(cfs_rq
);
696 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
697 cfs_rq
->load_unacc_exec_time
+= delta_exec
;
701 static void update_curr(struct cfs_rq
*cfs_rq
)
703 struct sched_entity
*curr
= cfs_rq
->curr
;
704 u64 now
= rq_of(cfs_rq
)->clock_task
;
705 unsigned long delta_exec
;
711 * Get the amount of time the current task was running
712 * since the last time we changed load (this cannot
713 * overflow on 32 bits):
715 delta_exec
= (unsigned long)(now
- curr
->exec_start
);
719 __update_curr(cfs_rq
, curr
, delta_exec
);
720 curr
->exec_start
= now
;
722 if (entity_is_task(curr
)) {
723 struct task_struct
*curtask
= task_of(curr
);
725 trace_sched_stat_runtime(curtask
, delta_exec
, curr
->vruntime
);
726 cpuacct_charge(curtask
, delta_exec
);
727 account_group_exec_runtime(curtask
, delta_exec
);
730 account_cfs_rq_runtime(cfs_rq
, delta_exec
);
734 update_stats_wait_start(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
736 schedstat_set(se
->statistics
.wait_start
, rq_of(cfs_rq
)->clock
);
740 * Task is being enqueued - update stats:
742 static void update_stats_enqueue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
745 * Are we enqueueing a waiting task? (for current tasks
746 * a dequeue/enqueue event is a NOP)
748 if (se
!= cfs_rq
->curr
)
749 update_stats_wait_start(cfs_rq
, se
);
753 update_stats_wait_end(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
755 schedstat_set(se
->statistics
.wait_max
, max(se
->statistics
.wait_max
,
756 rq_of(cfs_rq
)->clock
- se
->statistics
.wait_start
));
757 schedstat_set(se
->statistics
.wait_count
, se
->statistics
.wait_count
+ 1);
758 schedstat_set(se
->statistics
.wait_sum
, se
->statistics
.wait_sum
+
759 rq_of(cfs_rq
)->clock
- se
->statistics
.wait_start
);
760 #ifdef CONFIG_SCHEDSTATS
761 if (entity_is_task(se
)) {
762 trace_sched_stat_wait(task_of(se
),
763 rq_of(cfs_rq
)->clock
- se
->statistics
.wait_start
);
766 schedstat_set(se
->statistics
.wait_start
, 0);
770 update_stats_dequeue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
773 * Mark the end of the wait period if dequeueing a
776 if (se
!= cfs_rq
->curr
)
777 update_stats_wait_end(cfs_rq
, se
);
781 * We are picking a new current task - update its stats:
784 update_stats_curr_start(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
787 * We are starting a new run period:
789 se
->exec_start
= rq_of(cfs_rq
)->clock_task
;
792 /**************************************************
793 * Scheduling class queueing methods:
796 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
798 add_cfs_task_weight(struct cfs_rq
*cfs_rq
, unsigned long weight
)
800 cfs_rq
->task_weight
+= weight
;
804 add_cfs_task_weight(struct cfs_rq
*cfs_rq
, unsigned long weight
)
810 account_entity_enqueue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
812 update_load_add(&cfs_rq
->load
, se
->load
.weight
);
813 if (!parent_entity(se
))
814 update_load_add(&rq_of(cfs_rq
)->load
, se
->load
.weight
);
815 if (entity_is_task(se
)) {
816 add_cfs_task_weight(cfs_rq
, se
->load
.weight
);
817 list_add(&se
->group_node
, &cfs_rq
->tasks
);
819 cfs_rq
->nr_running
++;
823 account_entity_dequeue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
825 update_load_sub(&cfs_rq
->load
, se
->load
.weight
);
826 if (!parent_entity(se
))
827 update_load_sub(&rq_of(cfs_rq
)->load
, se
->load
.weight
);
828 if (entity_is_task(se
)) {
829 add_cfs_task_weight(cfs_rq
, -se
->load
.weight
);
830 list_del_init(&se
->group_node
);
832 cfs_rq
->nr_running
--;
835 #ifdef CONFIG_FAIR_GROUP_SCHED
836 /* we need this in update_cfs_load and load-balance functions below */
837 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
);
839 static void update_cfs_rq_load_contribution(struct cfs_rq
*cfs_rq
,
842 struct task_group
*tg
= cfs_rq
->tg
;
845 load_avg
= div64_u64(cfs_rq
->load_avg
, cfs_rq
->load_period
+1);
846 load_avg
-= cfs_rq
->load_contribution
;
848 if (global_update
|| abs(load_avg
) > cfs_rq
->load_contribution
/ 8) {
849 atomic_add(load_avg
, &tg
->load_weight
);
850 cfs_rq
->load_contribution
+= load_avg
;
854 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
)
856 u64 period
= sysctl_sched_shares_window
;
858 unsigned long load
= cfs_rq
->load
.weight
;
860 if (cfs_rq
->tg
== &root_task_group
|| throttled_hierarchy(cfs_rq
))
863 now
= rq_of(cfs_rq
)->clock_task
;
864 delta
= now
- cfs_rq
->load_stamp
;
866 /* truncate load history at 4 idle periods */
867 if (cfs_rq
->load_stamp
> cfs_rq
->load_last
&&
868 now
- cfs_rq
->load_last
> 4 * period
) {
869 cfs_rq
->load_period
= 0;
870 cfs_rq
->load_avg
= 0;
874 cfs_rq
->load_stamp
= now
;
875 cfs_rq
->load_unacc_exec_time
= 0;
876 cfs_rq
->load_period
+= delta
;
878 cfs_rq
->load_last
= now
;
879 cfs_rq
->load_avg
+= delta
* load
;
882 /* consider updating load contribution on each fold or truncate */
883 if (global_update
|| cfs_rq
->load_period
> period
884 || !cfs_rq
->load_period
)
885 update_cfs_rq_load_contribution(cfs_rq
, global_update
);
887 while (cfs_rq
->load_period
> period
) {
889 * Inline assembly required to prevent the compiler
890 * optimising this loop into a divmod call.
891 * See __iter_div_u64_rem() for another example of this.
893 asm("" : "+rm" (cfs_rq
->load_period
));
894 cfs_rq
->load_period
/= 2;
895 cfs_rq
->load_avg
/= 2;
898 if (!cfs_rq
->curr
&& !cfs_rq
->nr_running
&& !cfs_rq
->load_avg
)
899 list_del_leaf_cfs_rq(cfs_rq
);
902 static inline long calc_tg_weight(struct task_group
*tg
, struct cfs_rq
*cfs_rq
)
907 * Use this CPU's actual weight instead of the last load_contribution
908 * to gain a more accurate current total weight. See
909 * update_cfs_rq_load_contribution().
911 tg_weight
= atomic_read(&tg
->load_weight
);
912 tg_weight
-= cfs_rq
->load_contribution
;
913 tg_weight
+= cfs_rq
->load
.weight
;
918 static long calc_cfs_shares(struct cfs_rq
*cfs_rq
, struct task_group
*tg
)
920 long tg_weight
, load
, shares
;
922 tg_weight
= calc_tg_weight(tg
, cfs_rq
);
923 load
= cfs_rq
->load
.weight
;
925 shares
= (tg
->shares
* load
);
929 if (shares
< MIN_SHARES
)
931 if (shares
> tg
->shares
)
937 static void update_entity_shares_tick(struct cfs_rq
*cfs_rq
)
939 if (cfs_rq
->load_unacc_exec_time
> sysctl_sched_shares_window
) {
940 update_cfs_load(cfs_rq
, 0);
941 update_cfs_shares(cfs_rq
);
944 # else /* CONFIG_SMP */
945 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
)
949 static inline long calc_cfs_shares(struct cfs_rq
*cfs_rq
, struct task_group
*tg
)
954 static inline void update_entity_shares_tick(struct cfs_rq
*cfs_rq
)
957 # endif /* CONFIG_SMP */
958 static void reweight_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
,
959 unsigned long weight
)
962 /* commit outstanding execution time */
963 if (cfs_rq
->curr
== se
)
965 account_entity_dequeue(cfs_rq
, se
);
968 update_load_set(&se
->load
, weight
);
971 account_entity_enqueue(cfs_rq
, se
);
974 static void update_cfs_shares(struct cfs_rq
*cfs_rq
)
976 struct task_group
*tg
;
977 struct sched_entity
*se
;
981 se
= tg
->se
[cpu_of(rq_of(cfs_rq
))];
982 if (!se
|| throttled_hierarchy(cfs_rq
))
985 if (likely(se
->load
.weight
== tg
->shares
))
988 shares
= calc_cfs_shares(cfs_rq
, tg
);
990 reweight_entity(cfs_rq_of(se
), se
, shares
);
992 #else /* CONFIG_FAIR_GROUP_SCHED */
993 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
)
997 static inline void update_cfs_shares(struct cfs_rq
*cfs_rq
)
1001 static inline void update_entity_shares_tick(struct cfs_rq
*cfs_rq
)
1004 #endif /* CONFIG_FAIR_GROUP_SCHED */
1006 static void enqueue_sleeper(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1008 #ifdef CONFIG_SCHEDSTATS
1009 struct task_struct
*tsk
= NULL
;
1011 if (entity_is_task(se
))
1014 if (se
->statistics
.sleep_start
) {
1015 u64 delta
= rq_of(cfs_rq
)->clock
- se
->statistics
.sleep_start
;
1020 if (unlikely(delta
> se
->statistics
.sleep_max
))
1021 se
->statistics
.sleep_max
= delta
;
1023 se
->statistics
.sleep_start
= 0;
1024 se
->statistics
.sum_sleep_runtime
+= delta
;
1027 account_scheduler_latency(tsk
, delta
>> 10, 1);
1028 trace_sched_stat_sleep(tsk
, delta
);
1031 if (se
->statistics
.block_start
) {
1032 u64 delta
= rq_of(cfs_rq
)->clock
- se
->statistics
.block_start
;
1037 if (unlikely(delta
> se
->statistics
.block_max
))
1038 se
->statistics
.block_max
= delta
;
1040 se
->statistics
.block_start
= 0;
1041 se
->statistics
.sum_sleep_runtime
+= delta
;
1044 if (tsk
->in_iowait
) {
1045 se
->statistics
.iowait_sum
+= delta
;
1046 se
->statistics
.iowait_count
++;
1047 trace_sched_stat_iowait(tsk
, delta
);
1050 trace_sched_stat_blocked(tsk
, delta
);
1053 * Blocking time is in units of nanosecs, so shift by
1054 * 20 to get a milliseconds-range estimation of the
1055 * amount of time that the task spent sleeping:
1057 if (unlikely(prof_on
== SLEEP_PROFILING
)) {
1058 profile_hits(SLEEP_PROFILING
,
1059 (void *)get_wchan(tsk
),
1062 account_scheduler_latency(tsk
, delta
>> 10, 0);
1068 static void check_spread(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1070 #ifdef CONFIG_SCHED_DEBUG
1071 s64 d
= se
->vruntime
- cfs_rq
->min_vruntime
;
1076 if (d
> 3*sysctl_sched_latency
)
1077 schedstat_inc(cfs_rq
, nr_spread_over
);
1082 place_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int initial
)
1084 u64 vruntime
= cfs_rq
->min_vruntime
;
1087 * The 'current' period is already promised to the current tasks,
1088 * however the extra weight of the new task will slow them down a
1089 * little, place the new task so that it fits in the slot that
1090 * stays open at the end.
1092 if (initial
&& sched_feat(START_DEBIT
))
1093 vruntime
+= sched_vslice(cfs_rq
, se
);
1095 /* sleeps up to a single latency don't count. */
1097 unsigned long thresh
= sysctl_sched_latency
;
1100 * Halve their sleep time's effect, to allow
1101 * for a gentler effect of sleepers:
1103 if (sched_feat(GENTLE_FAIR_SLEEPERS
))
1109 /* ensure we never gain time by being placed backwards. */
1110 vruntime
= max_vruntime(se
->vruntime
, vruntime
);
1112 se
->vruntime
= vruntime
;
1115 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
);
1118 enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
1121 * Update the normalized vruntime before updating min_vruntime
1122 * through callig update_curr().
1124 if (!(flags
& ENQUEUE_WAKEUP
) || (flags
& ENQUEUE_WAKING
))
1125 se
->vruntime
+= cfs_rq
->min_vruntime
;
1128 * Update run-time statistics of the 'current'.
1130 update_curr(cfs_rq
);
1131 update_cfs_load(cfs_rq
, 0);
1132 account_entity_enqueue(cfs_rq
, se
);
1133 update_cfs_shares(cfs_rq
);
1135 if (flags
& ENQUEUE_WAKEUP
) {
1136 place_entity(cfs_rq
, se
, 0);
1137 enqueue_sleeper(cfs_rq
, se
);
1140 update_stats_enqueue(cfs_rq
, se
);
1141 check_spread(cfs_rq
, se
);
1142 if (se
!= cfs_rq
->curr
)
1143 __enqueue_entity(cfs_rq
, se
);
1146 if (cfs_rq
->nr_running
== 1) {
1147 list_add_leaf_cfs_rq(cfs_rq
);
1148 check_enqueue_throttle(cfs_rq
);
1152 static void __clear_buddies_last(struct sched_entity
*se
)
1154 for_each_sched_entity(se
) {
1155 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
1156 if (cfs_rq
->last
== se
)
1157 cfs_rq
->last
= NULL
;
1163 static void __clear_buddies_next(struct sched_entity
*se
)
1165 for_each_sched_entity(se
) {
1166 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
1167 if (cfs_rq
->next
== se
)
1168 cfs_rq
->next
= NULL
;
1174 static void __clear_buddies_skip(struct sched_entity
*se
)
1176 for_each_sched_entity(se
) {
1177 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
1178 if (cfs_rq
->skip
== se
)
1179 cfs_rq
->skip
= NULL
;
1185 static void clear_buddies(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1187 if (cfs_rq
->last
== se
)
1188 __clear_buddies_last(se
);
1190 if (cfs_rq
->next
== se
)
1191 __clear_buddies_next(se
);
1193 if (cfs_rq
->skip
== se
)
1194 __clear_buddies_skip(se
);
1197 static void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
);
1200 dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
1203 * Update run-time statistics of the 'current'.
1205 update_curr(cfs_rq
);
1207 update_stats_dequeue(cfs_rq
, se
);
1208 if (flags
& DEQUEUE_SLEEP
) {
1209 #ifdef CONFIG_SCHEDSTATS
1210 if (entity_is_task(se
)) {
1211 struct task_struct
*tsk
= task_of(se
);
1213 if (tsk
->state
& TASK_INTERRUPTIBLE
)
1214 se
->statistics
.sleep_start
= rq_of(cfs_rq
)->clock
;
1215 if (tsk
->state
& TASK_UNINTERRUPTIBLE
)
1216 se
->statistics
.block_start
= rq_of(cfs_rq
)->clock
;
1221 clear_buddies(cfs_rq
, se
);
1223 if (se
!= cfs_rq
->curr
)
1224 __dequeue_entity(cfs_rq
, se
);
1226 update_cfs_load(cfs_rq
, 0);
1227 account_entity_dequeue(cfs_rq
, se
);
1230 * Normalize the entity after updating the min_vruntime because the
1231 * update can refer to the ->curr item and we need to reflect this
1232 * movement in our normalized position.
1234 if (!(flags
& DEQUEUE_SLEEP
))
1235 se
->vruntime
-= cfs_rq
->min_vruntime
;
1237 /* return excess runtime on last dequeue */
1238 return_cfs_rq_runtime(cfs_rq
);
1240 update_min_vruntime(cfs_rq
);
1241 update_cfs_shares(cfs_rq
);
1245 * Preempt the current task with a newly woken task if needed:
1248 check_preempt_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
)
1250 unsigned long ideal_runtime
, delta_exec
;
1251 struct sched_entity
*se
;
1254 ideal_runtime
= sched_slice(cfs_rq
, curr
);
1255 delta_exec
= curr
->sum_exec_runtime
- curr
->prev_sum_exec_runtime
;
1256 if (delta_exec
> ideal_runtime
) {
1257 resched_task(rq_of(cfs_rq
)->curr
);
1259 * The current task ran long enough, ensure it doesn't get
1260 * re-elected due to buddy favours.
1262 clear_buddies(cfs_rq
, curr
);
1267 * Ensure that a task that missed wakeup preemption by a
1268 * narrow margin doesn't have to wait for a full slice.
1269 * This also mitigates buddy induced latencies under load.
1271 if (delta_exec
< sysctl_sched_min_granularity
)
1274 se
= __pick_first_entity(cfs_rq
);
1275 delta
= curr
->vruntime
- se
->vruntime
;
1280 if (delta
> ideal_runtime
)
1281 resched_task(rq_of(cfs_rq
)->curr
);
1285 set_next_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1287 /* 'current' is not kept within the tree. */
1290 * Any task has to be enqueued before it get to execute on
1291 * a CPU. So account for the time it spent waiting on the
1294 update_stats_wait_end(cfs_rq
, se
);
1295 __dequeue_entity(cfs_rq
, se
);
1298 update_stats_curr_start(cfs_rq
, se
);
1300 #ifdef CONFIG_SCHEDSTATS
1302 * Track our maximum slice length, if the CPU's load is at
1303 * least twice that of our own weight (i.e. dont track it
1304 * when there are only lesser-weight tasks around):
1306 if (rq_of(cfs_rq
)->load
.weight
>= 2*se
->load
.weight
) {
1307 se
->statistics
.slice_max
= max(se
->statistics
.slice_max
,
1308 se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
);
1311 se
->prev_sum_exec_runtime
= se
->sum_exec_runtime
;
1315 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
);
1318 * Pick the next process, keeping these things in mind, in this order:
1319 * 1) keep things fair between processes/task groups
1320 * 2) pick the "next" process, since someone really wants that to run
1321 * 3) pick the "last" process, for cache locality
1322 * 4) do not run the "skip" process, if something else is available
1324 static struct sched_entity
*pick_next_entity(struct cfs_rq
*cfs_rq
)
1326 struct sched_entity
*se
= __pick_first_entity(cfs_rq
);
1327 struct sched_entity
*left
= se
;
1330 * Avoid running the skip buddy, if running something else can
1331 * be done without getting too unfair.
1333 if (cfs_rq
->skip
== se
) {
1334 struct sched_entity
*second
= __pick_next_entity(se
);
1335 if (second
&& wakeup_preempt_entity(second
, left
) < 1)
1340 * Prefer last buddy, try to return the CPU to a preempted task.
1342 if (cfs_rq
->last
&& wakeup_preempt_entity(cfs_rq
->last
, left
) < 1)
1346 * Someone really wants this to run. If it's not unfair, run it.
1348 if (cfs_rq
->next
&& wakeup_preempt_entity(cfs_rq
->next
, left
) < 1)
1351 clear_buddies(cfs_rq
, se
);
1356 static void check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
);
1358 static void put_prev_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*prev
)
1361 * If still on the runqueue then deactivate_task()
1362 * was not called and update_curr() has to be done:
1365 update_curr(cfs_rq
);
1367 /* throttle cfs_rqs exceeding runtime */
1368 check_cfs_rq_runtime(cfs_rq
);
1370 check_spread(cfs_rq
, prev
);
1372 update_stats_wait_start(cfs_rq
, prev
);
1373 /* Put 'current' back into the tree. */
1374 __enqueue_entity(cfs_rq
, prev
);
1376 cfs_rq
->curr
= NULL
;
1380 entity_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
, int queued
)
1383 * Update run-time statistics of the 'current'.
1385 update_curr(cfs_rq
);
1388 * Update share accounting for long-running entities.
1390 update_entity_shares_tick(cfs_rq
);
1392 #ifdef CONFIG_SCHED_HRTICK
1394 * queued ticks are scheduled to match the slice, so don't bother
1395 * validating it and just reschedule.
1398 resched_task(rq_of(cfs_rq
)->curr
);
1402 * don't let the period tick interfere with the hrtick preemption
1404 if (!sched_feat(DOUBLE_TICK
) &&
1405 hrtimer_active(&rq_of(cfs_rq
)->hrtick_timer
))
1409 if (cfs_rq
->nr_running
> 1)
1410 check_preempt_tick(cfs_rq
, curr
);
1414 /**************************************************
1415 * CFS bandwidth control machinery
1418 #ifdef CONFIG_CFS_BANDWIDTH
1420 #ifdef HAVE_JUMP_LABEL
1421 static struct jump_label_key __cfs_bandwidth_used
;
1423 static inline bool cfs_bandwidth_used(void)
1425 return static_branch(&__cfs_bandwidth_used
);
1428 void account_cfs_bandwidth_used(int enabled
, int was_enabled
)
1430 /* only need to count groups transitioning between enabled/!enabled */
1431 if (enabled
&& !was_enabled
)
1432 jump_label_inc(&__cfs_bandwidth_used
);
1433 else if (!enabled
&& was_enabled
)
1434 jump_label_dec(&__cfs_bandwidth_used
);
1436 #else /* HAVE_JUMP_LABEL */
1437 static bool cfs_bandwidth_used(void)
1442 void account_cfs_bandwidth_used(int enabled
, int was_enabled
) {}
1443 #endif /* HAVE_JUMP_LABEL */
1446 * default period for cfs group bandwidth.
1447 * default: 0.1s, units: nanoseconds
1449 static inline u64
default_cfs_period(void)
1451 return 100000000ULL;
1454 static inline u64
sched_cfs_bandwidth_slice(void)
1456 return (u64
)sysctl_sched_cfs_bandwidth_slice
* NSEC_PER_USEC
;
1460 * Replenish runtime according to assigned quota and update expiration time.
1461 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
1462 * additional synchronization around rq->lock.
1464 * requires cfs_b->lock
1466 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth
*cfs_b
)
1470 if (cfs_b
->quota
== RUNTIME_INF
)
1473 now
= sched_clock_cpu(smp_processor_id());
1474 cfs_b
->runtime
= cfs_b
->quota
;
1475 cfs_b
->runtime_expires
= now
+ ktime_to_ns(cfs_b
->period
);
1478 static inline struct cfs_bandwidth
*tg_cfs_bandwidth(struct task_group
*tg
)
1480 return &tg
->cfs_bandwidth
;
1483 /* returns 0 on failure to allocate runtime */
1484 static int assign_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1486 struct task_group
*tg
= cfs_rq
->tg
;
1487 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(tg
);
1488 u64 amount
= 0, min_amount
, expires
;
1490 /* note: this is a positive sum as runtime_remaining <= 0 */
1491 min_amount
= sched_cfs_bandwidth_slice() - cfs_rq
->runtime_remaining
;
1493 raw_spin_lock(&cfs_b
->lock
);
1494 if (cfs_b
->quota
== RUNTIME_INF
)
1495 amount
= min_amount
;
1498 * If the bandwidth pool has become inactive, then at least one
1499 * period must have elapsed since the last consumption.
1500 * Refresh the global state and ensure bandwidth timer becomes
1503 if (!cfs_b
->timer_active
) {
1504 __refill_cfs_bandwidth_runtime(cfs_b
);
1505 __start_cfs_bandwidth(cfs_b
);
1508 if (cfs_b
->runtime
> 0) {
1509 amount
= min(cfs_b
->runtime
, min_amount
);
1510 cfs_b
->runtime
-= amount
;
1514 expires
= cfs_b
->runtime_expires
;
1515 raw_spin_unlock(&cfs_b
->lock
);
1517 cfs_rq
->runtime_remaining
+= amount
;
1519 * we may have advanced our local expiration to account for allowed
1520 * spread between our sched_clock and the one on which runtime was
1523 if ((s64
)(expires
- cfs_rq
->runtime_expires
) > 0)
1524 cfs_rq
->runtime_expires
= expires
;
1526 return cfs_rq
->runtime_remaining
> 0;
1530 * Note: This depends on the synchronization provided by sched_clock and the
1531 * fact that rq->clock snapshots this value.
1533 static void expire_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1535 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1536 struct rq
*rq
= rq_of(cfs_rq
);
1538 /* if the deadline is ahead of our clock, nothing to do */
1539 if (likely((s64
)(rq
->clock
- cfs_rq
->runtime_expires
) < 0))
1542 if (cfs_rq
->runtime_remaining
< 0)
1546 * If the local deadline has passed we have to consider the
1547 * possibility that our sched_clock is 'fast' and the global deadline
1548 * has not truly expired.
1550 * Fortunately we can check determine whether this the case by checking
1551 * whether the global deadline has advanced.
1554 if ((s64
)(cfs_rq
->runtime_expires
- cfs_b
->runtime_expires
) >= 0) {
1555 /* extend local deadline, drift is bounded above by 2 ticks */
1556 cfs_rq
->runtime_expires
+= TICK_NSEC
;
1558 /* global deadline is ahead, expiration has passed */
1559 cfs_rq
->runtime_remaining
= 0;
1563 static void __account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
,
1564 unsigned long delta_exec
)
1566 /* dock delta_exec before expiring quota (as it could span periods) */
1567 cfs_rq
->runtime_remaining
-= delta_exec
;
1568 expire_cfs_rq_runtime(cfs_rq
);
1570 if (likely(cfs_rq
->runtime_remaining
> 0))
1574 * if we're unable to extend our runtime we resched so that the active
1575 * hierarchy can be throttled
1577 if (!assign_cfs_rq_runtime(cfs_rq
) && likely(cfs_rq
->curr
))
1578 resched_task(rq_of(cfs_rq
)->curr
);
1581 static __always_inline
void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
,
1582 unsigned long delta_exec
)
1584 if (!cfs_bandwidth_used() || !cfs_rq
->runtime_enabled
)
1587 __account_cfs_rq_runtime(cfs_rq
, delta_exec
);
1590 static inline int cfs_rq_throttled(struct cfs_rq
*cfs_rq
)
1592 return cfs_bandwidth_used() && cfs_rq
->throttled
;
1595 /* check whether cfs_rq, or any parent, is throttled */
1596 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
)
1598 return cfs_bandwidth_used() && cfs_rq
->throttle_count
;
1602 * Ensure that neither of the group entities corresponding to src_cpu or
1603 * dest_cpu are members of a throttled hierarchy when performing group
1604 * load-balance operations.
1606 static inline int throttled_lb_pair(struct task_group
*tg
,
1607 int src_cpu
, int dest_cpu
)
1609 struct cfs_rq
*src_cfs_rq
, *dest_cfs_rq
;
1611 src_cfs_rq
= tg
->cfs_rq
[src_cpu
];
1612 dest_cfs_rq
= tg
->cfs_rq
[dest_cpu
];
1614 return throttled_hierarchy(src_cfs_rq
) ||
1615 throttled_hierarchy(dest_cfs_rq
);
1618 /* updated child weight may affect parent so we have to do this bottom up */
1619 static int tg_unthrottle_up(struct task_group
*tg
, void *data
)
1621 struct rq
*rq
= data
;
1622 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
1624 cfs_rq
->throttle_count
--;
1626 if (!cfs_rq
->throttle_count
) {
1627 u64 delta
= rq
->clock_task
- cfs_rq
->load_stamp
;
1629 /* leaving throttled state, advance shares averaging windows */
1630 cfs_rq
->load_stamp
+= delta
;
1631 cfs_rq
->load_last
+= delta
;
1633 /* update entity weight now that we are on_rq again */
1634 update_cfs_shares(cfs_rq
);
1641 static int tg_throttle_down(struct task_group
*tg
, void *data
)
1643 struct rq
*rq
= data
;
1644 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
1646 /* group is entering throttled state, record last load */
1647 if (!cfs_rq
->throttle_count
)
1648 update_cfs_load(cfs_rq
, 0);
1649 cfs_rq
->throttle_count
++;
1654 static void throttle_cfs_rq(struct cfs_rq
*cfs_rq
)
1656 struct rq
*rq
= rq_of(cfs_rq
);
1657 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1658 struct sched_entity
*se
;
1659 long task_delta
, dequeue
= 1;
1661 se
= cfs_rq
->tg
->se
[cpu_of(rq_of(cfs_rq
))];
1663 /* account load preceding throttle */
1665 walk_tg_tree_from(cfs_rq
->tg
, tg_throttle_down
, tg_nop
, (void *)rq
);
1668 task_delta
= cfs_rq
->h_nr_running
;
1669 for_each_sched_entity(se
) {
1670 struct cfs_rq
*qcfs_rq
= cfs_rq_of(se
);
1671 /* throttled entity or throttle-on-deactivate */
1676 dequeue_entity(qcfs_rq
, se
, DEQUEUE_SLEEP
);
1677 qcfs_rq
->h_nr_running
-= task_delta
;
1679 if (qcfs_rq
->load
.weight
)
1684 rq
->nr_running
-= task_delta
;
1686 cfs_rq
->throttled
= 1;
1687 cfs_rq
->throttled_timestamp
= rq
->clock
;
1688 raw_spin_lock(&cfs_b
->lock
);
1689 list_add_tail_rcu(&cfs_rq
->throttled_list
, &cfs_b
->throttled_cfs_rq
);
1690 raw_spin_unlock(&cfs_b
->lock
);
1693 void unthrottle_cfs_rq(struct cfs_rq
*cfs_rq
)
1695 struct rq
*rq
= rq_of(cfs_rq
);
1696 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1697 struct sched_entity
*se
;
1701 se
= cfs_rq
->tg
->se
[cpu_of(rq_of(cfs_rq
))];
1703 cfs_rq
->throttled
= 0;
1704 raw_spin_lock(&cfs_b
->lock
);
1705 cfs_b
->throttled_time
+= rq
->clock
- cfs_rq
->throttled_timestamp
;
1706 list_del_rcu(&cfs_rq
->throttled_list
);
1707 raw_spin_unlock(&cfs_b
->lock
);
1708 cfs_rq
->throttled_timestamp
= 0;
1710 update_rq_clock(rq
);
1711 /* update hierarchical throttle state */
1712 walk_tg_tree_from(cfs_rq
->tg
, tg_nop
, tg_unthrottle_up
, (void *)rq
);
1714 if (!cfs_rq
->load
.weight
)
1717 task_delta
= cfs_rq
->h_nr_running
;
1718 for_each_sched_entity(se
) {
1722 cfs_rq
= cfs_rq_of(se
);
1724 enqueue_entity(cfs_rq
, se
, ENQUEUE_WAKEUP
);
1725 cfs_rq
->h_nr_running
+= task_delta
;
1727 if (cfs_rq_throttled(cfs_rq
))
1732 rq
->nr_running
+= task_delta
;
1734 /* determine whether we need to wake up potentially idle cpu */
1735 if (rq
->curr
== rq
->idle
&& rq
->cfs
.nr_running
)
1736 resched_task(rq
->curr
);
1739 static u64
distribute_cfs_runtime(struct cfs_bandwidth
*cfs_b
,
1740 u64 remaining
, u64 expires
)
1742 struct cfs_rq
*cfs_rq
;
1743 u64 runtime
= remaining
;
1746 list_for_each_entry_rcu(cfs_rq
, &cfs_b
->throttled_cfs_rq
,
1748 struct rq
*rq
= rq_of(cfs_rq
);
1750 raw_spin_lock(&rq
->lock
);
1751 if (!cfs_rq_throttled(cfs_rq
))
1754 runtime
= -cfs_rq
->runtime_remaining
+ 1;
1755 if (runtime
> remaining
)
1756 runtime
= remaining
;
1757 remaining
-= runtime
;
1759 cfs_rq
->runtime_remaining
+= runtime
;
1760 cfs_rq
->runtime_expires
= expires
;
1762 /* we check whether we're throttled above */
1763 if (cfs_rq
->runtime_remaining
> 0)
1764 unthrottle_cfs_rq(cfs_rq
);
1767 raw_spin_unlock(&rq
->lock
);
1778 * Responsible for refilling a task_group's bandwidth and unthrottling its
1779 * cfs_rqs as appropriate. If there has been no activity within the last
1780 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
1781 * used to track this state.
1783 static int do_sched_cfs_period_timer(struct cfs_bandwidth
*cfs_b
, int overrun
)
1785 u64 runtime
, runtime_expires
;
1786 int idle
= 1, throttled
;
1788 raw_spin_lock(&cfs_b
->lock
);
1789 /* no need to continue the timer with no bandwidth constraint */
1790 if (cfs_b
->quota
== RUNTIME_INF
)
1793 throttled
= !list_empty(&cfs_b
->throttled_cfs_rq
);
1794 /* idle depends on !throttled (for the case of a large deficit) */
1795 idle
= cfs_b
->idle
&& !throttled
;
1796 cfs_b
->nr_periods
+= overrun
;
1798 /* if we're going inactive then everything else can be deferred */
1802 __refill_cfs_bandwidth_runtime(cfs_b
);
1805 /* mark as potentially idle for the upcoming period */
1810 /* account preceding periods in which throttling occurred */
1811 cfs_b
->nr_throttled
+= overrun
;
1814 * There are throttled entities so we must first use the new bandwidth
1815 * to unthrottle them before making it generally available. This
1816 * ensures that all existing debts will be paid before a new cfs_rq is
1819 runtime
= cfs_b
->runtime
;
1820 runtime_expires
= cfs_b
->runtime_expires
;
1824 * This check is repeated as we are holding onto the new bandwidth
1825 * while we unthrottle. This can potentially race with an unthrottled
1826 * group trying to acquire new bandwidth from the global pool.
1828 while (throttled
&& runtime
> 0) {
1829 raw_spin_unlock(&cfs_b
->lock
);
1830 /* we can't nest cfs_b->lock while distributing bandwidth */
1831 runtime
= distribute_cfs_runtime(cfs_b
, runtime
,
1833 raw_spin_lock(&cfs_b
->lock
);
1835 throttled
= !list_empty(&cfs_b
->throttled_cfs_rq
);
1838 /* return (any) remaining runtime */
1839 cfs_b
->runtime
= runtime
;
1841 * While we are ensured activity in the period following an
1842 * unthrottle, this also covers the case in which the new bandwidth is
1843 * insufficient to cover the existing bandwidth deficit. (Forcing the
1844 * timer to remain active while there are any throttled entities.)
1849 cfs_b
->timer_active
= 0;
1850 raw_spin_unlock(&cfs_b
->lock
);
1855 /* a cfs_rq won't donate quota below this amount */
1856 static const u64 min_cfs_rq_runtime
= 1 * NSEC_PER_MSEC
;
1857 /* minimum remaining period time to redistribute slack quota */
1858 static const u64 min_bandwidth_expiration
= 2 * NSEC_PER_MSEC
;
1859 /* how long we wait to gather additional slack before distributing */
1860 static const u64 cfs_bandwidth_slack_period
= 5 * NSEC_PER_MSEC
;
1862 /* are we near the end of the current quota period? */
1863 static int runtime_refresh_within(struct cfs_bandwidth
*cfs_b
, u64 min_expire
)
1865 struct hrtimer
*refresh_timer
= &cfs_b
->period_timer
;
1868 /* if the call-back is running a quota refresh is already occurring */
1869 if (hrtimer_callback_running(refresh_timer
))
1872 /* is a quota refresh about to occur? */
1873 remaining
= ktime_to_ns(hrtimer_expires_remaining(refresh_timer
));
1874 if (remaining
< min_expire
)
1880 static void start_cfs_slack_bandwidth(struct cfs_bandwidth
*cfs_b
)
1882 u64 min_left
= cfs_bandwidth_slack_period
+ min_bandwidth_expiration
;
1884 /* if there's a quota refresh soon don't bother with slack */
1885 if (runtime_refresh_within(cfs_b
, min_left
))
1888 start_bandwidth_timer(&cfs_b
->slack_timer
,
1889 ns_to_ktime(cfs_bandwidth_slack_period
));
1892 /* we know any runtime found here is valid as update_curr() precedes return */
1893 static void __return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1895 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1896 s64 slack_runtime
= cfs_rq
->runtime_remaining
- min_cfs_rq_runtime
;
1898 if (slack_runtime
<= 0)
1901 raw_spin_lock(&cfs_b
->lock
);
1902 if (cfs_b
->quota
!= RUNTIME_INF
&&
1903 cfs_rq
->runtime_expires
== cfs_b
->runtime_expires
) {
1904 cfs_b
->runtime
+= slack_runtime
;
1906 /* we are under rq->lock, defer unthrottling using a timer */
1907 if (cfs_b
->runtime
> sched_cfs_bandwidth_slice() &&
1908 !list_empty(&cfs_b
->throttled_cfs_rq
))
1909 start_cfs_slack_bandwidth(cfs_b
);
1911 raw_spin_unlock(&cfs_b
->lock
);
1913 /* even if it's not valid for return we don't want to try again */
1914 cfs_rq
->runtime_remaining
-= slack_runtime
;
1917 static __always_inline
void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1919 if (!cfs_bandwidth_used())
1922 if (!cfs_rq
->runtime_enabled
|| cfs_rq
->nr_running
)
1925 __return_cfs_rq_runtime(cfs_rq
);
1929 * This is done with a timer (instead of inline with bandwidth return) since
1930 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
1932 static void do_sched_cfs_slack_timer(struct cfs_bandwidth
*cfs_b
)
1934 u64 runtime
= 0, slice
= sched_cfs_bandwidth_slice();
1937 /* confirm we're still not at a refresh boundary */
1938 if (runtime_refresh_within(cfs_b
, min_bandwidth_expiration
))
1941 raw_spin_lock(&cfs_b
->lock
);
1942 if (cfs_b
->quota
!= RUNTIME_INF
&& cfs_b
->runtime
> slice
) {
1943 runtime
= cfs_b
->runtime
;
1946 expires
= cfs_b
->runtime_expires
;
1947 raw_spin_unlock(&cfs_b
->lock
);
1952 runtime
= distribute_cfs_runtime(cfs_b
, runtime
, expires
);
1954 raw_spin_lock(&cfs_b
->lock
);
1955 if (expires
== cfs_b
->runtime_expires
)
1956 cfs_b
->runtime
= runtime
;
1957 raw_spin_unlock(&cfs_b
->lock
);
1961 * When a group wakes up we want to make sure that its quota is not already
1962 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
1963 * runtime as update_curr() throttling can not not trigger until it's on-rq.
1965 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
)
1967 if (!cfs_bandwidth_used())
1970 /* an active group must be handled by the update_curr()->put() path */
1971 if (!cfs_rq
->runtime_enabled
|| cfs_rq
->curr
)
1974 /* ensure the group is not already throttled */
1975 if (cfs_rq_throttled(cfs_rq
))
1978 /* update runtime allocation */
1979 account_cfs_rq_runtime(cfs_rq
, 0);
1980 if (cfs_rq
->runtime_remaining
<= 0)
1981 throttle_cfs_rq(cfs_rq
);
1984 /* conditionally throttle active cfs_rq's from put_prev_entity() */
1985 static void check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1987 if (!cfs_bandwidth_used())
1990 if (likely(!cfs_rq
->runtime_enabled
|| cfs_rq
->runtime_remaining
> 0))
1994 * it's possible for a throttled entity to be forced into a running
1995 * state (e.g. set_curr_task), in this case we're finished.
1997 if (cfs_rq_throttled(cfs_rq
))
2000 throttle_cfs_rq(cfs_rq
);
2003 static inline u64
default_cfs_period(void);
2004 static int do_sched_cfs_period_timer(struct cfs_bandwidth
*cfs_b
, int overrun
);
2005 static void do_sched_cfs_slack_timer(struct cfs_bandwidth
*cfs_b
);
2007 static enum hrtimer_restart
sched_cfs_slack_timer(struct hrtimer
*timer
)
2009 struct cfs_bandwidth
*cfs_b
=
2010 container_of(timer
, struct cfs_bandwidth
, slack_timer
);
2011 do_sched_cfs_slack_timer(cfs_b
);
2013 return HRTIMER_NORESTART
;
2016 static enum hrtimer_restart
sched_cfs_period_timer(struct hrtimer
*timer
)
2018 struct cfs_bandwidth
*cfs_b
=
2019 container_of(timer
, struct cfs_bandwidth
, period_timer
);
2025 now
= hrtimer_cb_get_time(timer
);
2026 overrun
= hrtimer_forward(timer
, now
, cfs_b
->period
);
2031 idle
= do_sched_cfs_period_timer(cfs_b
, overrun
);
2034 return idle
? HRTIMER_NORESTART
: HRTIMER_RESTART
;
2037 void init_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
2039 raw_spin_lock_init(&cfs_b
->lock
);
2041 cfs_b
->quota
= RUNTIME_INF
;
2042 cfs_b
->period
= ns_to_ktime(default_cfs_period());
2044 INIT_LIST_HEAD(&cfs_b
->throttled_cfs_rq
);
2045 hrtimer_init(&cfs_b
->period_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2046 cfs_b
->period_timer
.function
= sched_cfs_period_timer
;
2047 hrtimer_init(&cfs_b
->slack_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2048 cfs_b
->slack_timer
.function
= sched_cfs_slack_timer
;
2051 static void init_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
2053 cfs_rq
->runtime_enabled
= 0;
2054 INIT_LIST_HEAD(&cfs_rq
->throttled_list
);
2057 /* requires cfs_b->lock, may release to reprogram timer */
2058 void __start_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
2061 * The timer may be active because we're trying to set a new bandwidth
2062 * period or because we're racing with the tear-down path
2063 * (timer_active==0 becomes visible before the hrtimer call-back
2064 * terminates). In either case we ensure that it's re-programmed
2066 while (unlikely(hrtimer_active(&cfs_b
->period_timer
))) {
2067 raw_spin_unlock(&cfs_b
->lock
);
2068 /* ensure cfs_b->lock is available while we wait */
2069 hrtimer_cancel(&cfs_b
->period_timer
);
2071 raw_spin_lock(&cfs_b
->lock
);
2072 /* if someone else restarted the timer then we're done */
2073 if (cfs_b
->timer_active
)
2077 cfs_b
->timer_active
= 1;
2078 start_bandwidth_timer(&cfs_b
->period_timer
, cfs_b
->period
);
2081 static void destroy_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
2083 hrtimer_cancel(&cfs_b
->period_timer
);
2084 hrtimer_cancel(&cfs_b
->slack_timer
);
2087 void unthrottle_offline_cfs_rqs(struct rq
*rq
)
2089 struct cfs_rq
*cfs_rq
;
2091 for_each_leaf_cfs_rq(rq
, cfs_rq
) {
2092 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
2094 if (!cfs_rq
->runtime_enabled
)
2098 * clock_task is not advancing so we just need to make sure
2099 * there's some valid quota amount
2101 cfs_rq
->runtime_remaining
= cfs_b
->quota
;
2102 if (cfs_rq_throttled(cfs_rq
))
2103 unthrottle_cfs_rq(cfs_rq
);
2107 #else /* CONFIG_CFS_BANDWIDTH */
2108 static void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
,
2109 unsigned long delta_exec
) {}
2110 static void check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
2111 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
) {}
2112 static void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
2114 static inline int cfs_rq_throttled(struct cfs_rq
*cfs_rq
)
2119 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
)
2124 static inline int throttled_lb_pair(struct task_group
*tg
,
2125 int src_cpu
, int dest_cpu
)
2130 void init_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
) {}
2132 #ifdef CONFIG_FAIR_GROUP_SCHED
2133 static void init_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
2136 static inline struct cfs_bandwidth
*tg_cfs_bandwidth(struct task_group
*tg
)
2140 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
) {}
2141 void unthrottle_offline_cfs_rqs(struct rq
*rq
) {}
2143 #endif /* CONFIG_CFS_BANDWIDTH */
2145 /**************************************************
2146 * CFS operations on tasks:
2149 #ifdef CONFIG_SCHED_HRTICK
2150 static void hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
2152 struct sched_entity
*se
= &p
->se
;
2153 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
2155 WARN_ON(task_rq(p
) != rq
);
2157 if (cfs_rq
->nr_running
> 1) {
2158 u64 slice
= sched_slice(cfs_rq
, se
);
2159 u64 ran
= se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
;
2160 s64 delta
= slice
- ran
;
2169 * Don't schedule slices shorter than 10000ns, that just
2170 * doesn't make sense. Rely on vruntime for fairness.
2173 delta
= max_t(s64
, 10000LL, delta
);
2175 hrtick_start(rq
, delta
);
2180 * called from enqueue/dequeue and updates the hrtick when the
2181 * current task is from our class and nr_running is low enough
2184 static void hrtick_update(struct rq
*rq
)
2186 struct task_struct
*curr
= rq
->curr
;
2188 if (!hrtick_enabled(rq
) || curr
->sched_class
!= &fair_sched_class
)
2191 if (cfs_rq_of(&curr
->se
)->nr_running
< sched_nr_latency
)
2192 hrtick_start_fair(rq
, curr
);
2194 #else /* !CONFIG_SCHED_HRTICK */
2196 hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
2200 static inline void hrtick_update(struct rq
*rq
)
2206 * The enqueue_task method is called before nr_running is
2207 * increased. Here we update the fair scheduling stats and
2208 * then put the task into the rbtree:
2211 enqueue_task_fair(struct rq
*rq
, struct task_struct
*p
, int flags
)
2213 struct cfs_rq
*cfs_rq
;
2214 struct sched_entity
*se
= &p
->se
;
2216 for_each_sched_entity(se
) {
2219 cfs_rq
= cfs_rq_of(se
);
2220 enqueue_entity(cfs_rq
, se
, flags
);
2223 * end evaluation on encountering a throttled cfs_rq
2225 * note: in the case of encountering a throttled cfs_rq we will
2226 * post the final h_nr_running increment below.
2228 if (cfs_rq_throttled(cfs_rq
))
2230 cfs_rq
->h_nr_running
++;
2232 flags
= ENQUEUE_WAKEUP
;
2235 for_each_sched_entity(se
) {
2236 cfs_rq
= cfs_rq_of(se
);
2237 cfs_rq
->h_nr_running
++;
2239 if (cfs_rq_throttled(cfs_rq
))
2242 update_cfs_load(cfs_rq
, 0);
2243 update_cfs_shares(cfs_rq
);
2251 static void set_next_buddy(struct sched_entity
*se
);
2254 * The dequeue_task method is called before nr_running is
2255 * decreased. We remove the task from the rbtree and
2256 * update the fair scheduling stats:
2258 static void dequeue_task_fair(struct rq
*rq
, struct task_struct
*p
, int flags
)
2260 struct cfs_rq
*cfs_rq
;
2261 struct sched_entity
*se
= &p
->se
;
2262 int task_sleep
= flags
& DEQUEUE_SLEEP
;
2264 for_each_sched_entity(se
) {
2265 cfs_rq
= cfs_rq_of(se
);
2266 dequeue_entity(cfs_rq
, se
, flags
);
2269 * end evaluation on encountering a throttled cfs_rq
2271 * note: in the case of encountering a throttled cfs_rq we will
2272 * post the final h_nr_running decrement below.
2274 if (cfs_rq_throttled(cfs_rq
))
2276 cfs_rq
->h_nr_running
--;
2278 /* Don't dequeue parent if it has other entities besides us */
2279 if (cfs_rq
->load
.weight
) {
2281 * Bias pick_next to pick a task from this cfs_rq, as
2282 * p is sleeping when it is within its sched_slice.
2284 if (task_sleep
&& parent_entity(se
))
2285 set_next_buddy(parent_entity(se
));
2287 /* avoid re-evaluating load for this entity */
2288 se
= parent_entity(se
);
2291 flags
|= DEQUEUE_SLEEP
;
2294 for_each_sched_entity(se
) {
2295 cfs_rq
= cfs_rq_of(se
);
2296 cfs_rq
->h_nr_running
--;
2298 if (cfs_rq_throttled(cfs_rq
))
2301 update_cfs_load(cfs_rq
, 0);
2302 update_cfs_shares(cfs_rq
);
2311 /* Used instead of source_load when we know the type == 0 */
2312 static unsigned long weighted_cpuload(const int cpu
)
2314 return cpu_rq(cpu
)->load
.weight
;
2318 * Return a low guess at the load of a migration-source cpu weighted
2319 * according to the scheduling class and "nice" value.
2321 * We want to under-estimate the load of migration sources, to
2322 * balance conservatively.
2324 static unsigned long source_load(int cpu
, int type
)
2326 struct rq
*rq
= cpu_rq(cpu
);
2327 unsigned long total
= weighted_cpuload(cpu
);
2329 if (type
== 0 || !sched_feat(LB_BIAS
))
2332 return min(rq
->cpu_load
[type
-1], total
);
2336 * Return a high guess at the load of a migration-target cpu weighted
2337 * according to the scheduling class and "nice" value.
2339 static unsigned long target_load(int cpu
, int type
)
2341 struct rq
*rq
= cpu_rq(cpu
);
2342 unsigned long total
= weighted_cpuload(cpu
);
2344 if (type
== 0 || !sched_feat(LB_BIAS
))
2347 return max(rq
->cpu_load
[type
-1], total
);
2350 static unsigned long power_of(int cpu
)
2352 return cpu_rq(cpu
)->cpu_power
;
2355 static unsigned long cpu_avg_load_per_task(int cpu
)
2357 struct rq
*rq
= cpu_rq(cpu
);
2358 unsigned long nr_running
= ACCESS_ONCE(rq
->nr_running
);
2361 return rq
->load
.weight
/ nr_running
;
2367 static void task_waking_fair(struct task_struct
*p
)
2369 struct sched_entity
*se
= &p
->se
;
2370 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
2373 #ifndef CONFIG_64BIT
2374 u64 min_vruntime_copy
;
2377 min_vruntime_copy
= cfs_rq
->min_vruntime_copy
;
2379 min_vruntime
= cfs_rq
->min_vruntime
;
2380 } while (min_vruntime
!= min_vruntime_copy
);
2382 min_vruntime
= cfs_rq
->min_vruntime
;
2385 se
->vruntime
-= min_vruntime
;
2388 #ifdef CONFIG_FAIR_GROUP_SCHED
2390 * effective_load() calculates the load change as seen from the root_task_group
2392 * Adding load to a group doesn't make a group heavier, but can cause movement
2393 * of group shares between cpus. Assuming the shares were perfectly aligned one
2394 * can calculate the shift in shares.
2396 * Calculate the effective load difference if @wl is added (subtracted) to @tg
2397 * on this @cpu and results in a total addition (subtraction) of @wg to the
2398 * total group weight.
2400 * Given a runqueue weight distribution (rw_i) we can compute a shares
2401 * distribution (s_i) using:
2403 * s_i = rw_i / \Sum rw_j (1)
2405 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
2406 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
2407 * shares distribution (s_i):
2409 * rw_i = { 2, 4, 1, 0 }
2410 * s_i = { 2/7, 4/7, 1/7, 0 }
2412 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
2413 * task used to run on and the CPU the waker is running on), we need to
2414 * compute the effect of waking a task on either CPU and, in case of a sync
2415 * wakeup, compute the effect of the current task going to sleep.
2417 * So for a change of @wl to the local @cpu with an overall group weight change
2418 * of @wl we can compute the new shares distribution (s'_i) using:
2420 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
2422 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
2423 * differences in waking a task to CPU 0. The additional task changes the
2424 * weight and shares distributions like:
2426 * rw'_i = { 3, 4, 1, 0 }
2427 * s'_i = { 3/8, 4/8, 1/8, 0 }
2429 * We can then compute the difference in effective weight by using:
2431 * dw_i = S * (s'_i - s_i) (3)
2433 * Where 'S' is the group weight as seen by its parent.
2435 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
2436 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
2437 * 4/7) times the weight of the group.
2439 static long effective_load(struct task_group
*tg
, int cpu
, long wl
, long wg
)
2441 struct sched_entity
*se
= tg
->se
[cpu
];
2443 if (!tg
->parent
) /* the trivial, non-cgroup case */
2446 for_each_sched_entity(se
) {
2452 * W = @wg + \Sum rw_j
2454 W
= wg
+ calc_tg_weight(tg
, se
->my_q
);
2459 w
= se
->my_q
->load
.weight
+ wl
;
2462 * wl = S * s'_i; see (2)
2465 wl
= (w
* tg
->shares
) / W
;
2470 * Per the above, wl is the new se->load.weight value; since
2471 * those are clipped to [MIN_SHARES, ...) do so now. See
2472 * calc_cfs_shares().
2474 if (wl
< MIN_SHARES
)
2478 * wl = dw_i = S * (s'_i - s_i); see (3)
2480 wl
-= se
->load
.weight
;
2483 * Recursively apply this logic to all parent groups to compute
2484 * the final effective load change on the root group. Since
2485 * only the @tg group gets extra weight, all parent groups can
2486 * only redistribute existing shares. @wl is the shift in shares
2487 * resulting from this level per the above.
2496 static inline unsigned long effective_load(struct task_group
*tg
, int cpu
,
2497 unsigned long wl
, unsigned long wg
)
2504 static int wake_affine(struct sched_domain
*sd
, struct task_struct
*p
, int sync
)
2506 s64 this_load
, load
;
2507 int idx
, this_cpu
, prev_cpu
;
2508 unsigned long tl_per_task
;
2509 struct task_group
*tg
;
2510 unsigned long weight
;
2514 this_cpu
= smp_processor_id();
2515 prev_cpu
= task_cpu(p
);
2516 load
= source_load(prev_cpu
, idx
);
2517 this_load
= target_load(this_cpu
, idx
);
2520 * If sync wakeup then subtract the (maximum possible)
2521 * effect of the currently running task from the load
2522 * of the current CPU:
2525 tg
= task_group(current
);
2526 weight
= current
->se
.load
.weight
;
2528 this_load
+= effective_load(tg
, this_cpu
, -weight
, -weight
);
2529 load
+= effective_load(tg
, prev_cpu
, 0, -weight
);
2533 weight
= p
->se
.load
.weight
;
2536 * In low-load situations, where prev_cpu is idle and this_cpu is idle
2537 * due to the sync cause above having dropped this_load to 0, we'll
2538 * always have an imbalance, but there's really nothing you can do
2539 * about that, so that's good too.
2541 * Otherwise check if either cpus are near enough in load to allow this
2542 * task to be woken on this_cpu.
2544 if (this_load
> 0) {
2545 s64 this_eff_load
, prev_eff_load
;
2547 this_eff_load
= 100;
2548 this_eff_load
*= power_of(prev_cpu
);
2549 this_eff_load
*= this_load
+
2550 effective_load(tg
, this_cpu
, weight
, weight
);
2552 prev_eff_load
= 100 + (sd
->imbalance_pct
- 100) / 2;
2553 prev_eff_load
*= power_of(this_cpu
);
2554 prev_eff_load
*= load
+ effective_load(tg
, prev_cpu
, 0, weight
);
2556 balanced
= this_eff_load
<= prev_eff_load
;
2561 * If the currently running task will sleep within
2562 * a reasonable amount of time then attract this newly
2565 if (sync
&& balanced
)
2568 schedstat_inc(p
, se
.statistics
.nr_wakeups_affine_attempts
);
2569 tl_per_task
= cpu_avg_load_per_task(this_cpu
);
2572 (this_load
<= load
&&
2573 this_load
+ target_load(prev_cpu
, idx
) <= tl_per_task
)) {
2575 * This domain has SD_WAKE_AFFINE and
2576 * p is cache cold in this domain, and
2577 * there is no bad imbalance.
2579 schedstat_inc(sd
, ttwu_move_affine
);
2580 schedstat_inc(p
, se
.statistics
.nr_wakeups_affine
);
2588 * find_idlest_group finds and returns the least busy CPU group within the
2591 static struct sched_group
*
2592 find_idlest_group(struct sched_domain
*sd
, struct task_struct
*p
,
2593 int this_cpu
, int load_idx
)
2595 struct sched_group
*idlest
= NULL
, *group
= sd
->groups
;
2596 unsigned long min_load
= ULONG_MAX
, this_load
= 0;
2597 int imbalance
= 100 + (sd
->imbalance_pct
-100)/2;
2600 unsigned long load
, avg_load
;
2604 /* Skip over this group if it has no CPUs allowed */
2605 if (!cpumask_intersects(sched_group_cpus(group
),
2606 tsk_cpus_allowed(p
)))
2609 local_group
= cpumask_test_cpu(this_cpu
,
2610 sched_group_cpus(group
));
2612 /* Tally up the load of all CPUs in the group */
2615 for_each_cpu(i
, sched_group_cpus(group
)) {
2616 /* Bias balancing toward cpus of our domain */
2618 load
= source_load(i
, load_idx
);
2620 load
= target_load(i
, load_idx
);
2625 /* Adjust by relative CPU power of the group */
2626 avg_load
= (avg_load
* SCHED_POWER_SCALE
) / group
->sgp
->power
;
2629 this_load
= avg_load
;
2630 } else if (avg_load
< min_load
) {
2631 min_load
= avg_load
;
2634 } while (group
= group
->next
, group
!= sd
->groups
);
2636 if (!idlest
|| 100*this_load
< imbalance
*min_load
)
2642 * find_idlest_cpu - find the idlest cpu among the cpus in group.
2645 find_idlest_cpu(struct sched_group
*group
, struct task_struct
*p
, int this_cpu
)
2647 unsigned long load
, min_load
= ULONG_MAX
;
2651 /* Traverse only the allowed CPUs */
2652 for_each_cpu_and(i
, sched_group_cpus(group
), tsk_cpus_allowed(p
)) {
2653 load
= weighted_cpuload(i
);
2655 if (load
< min_load
|| (load
== min_load
&& i
== this_cpu
)) {
2665 * Try and locate an idle CPU in the sched_domain.
2667 static int select_idle_sibling(struct task_struct
*p
, int target
)
2669 int cpu
= smp_processor_id();
2670 int prev_cpu
= task_cpu(p
);
2671 struct sched_domain
*sd
;
2672 struct sched_group
*sg
;
2676 * If the task is going to be woken-up on this cpu and if it is
2677 * already idle, then it is the right target.
2679 if (target
== cpu
&& idle_cpu(cpu
))
2683 * If the task is going to be woken-up on the cpu where it previously
2684 * ran and if it is currently idle, then it the right target.
2686 if (target
== prev_cpu
&& idle_cpu(prev_cpu
))
2690 * Otherwise, iterate the domains and find an elegible idle cpu.
2694 sd
= rcu_dereference(per_cpu(sd_llc
, target
));
2695 for_each_lower_domain(sd
) {
2698 if (!cpumask_intersects(sched_group_cpus(sg
),
2699 tsk_cpus_allowed(p
)))
2702 for_each_cpu(i
, sched_group_cpus(sg
)) {
2707 target
= cpumask_first_and(sched_group_cpus(sg
),
2708 tsk_cpus_allowed(p
));
2712 } while (sg
!= sd
->groups
);
2721 * sched_balance_self: balance the current task (running on cpu) in domains
2722 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2725 * Balance, ie. select the least loaded group.
2727 * Returns the target CPU number, or the same CPU if no balancing is needed.
2729 * preempt must be disabled.
2732 select_task_rq_fair(struct task_struct
*p
, int sd_flag
, int wake_flags
)
2734 struct sched_domain
*tmp
, *affine_sd
= NULL
, *sd
= NULL
;
2735 int cpu
= smp_processor_id();
2736 int prev_cpu
= task_cpu(p
);
2738 int want_affine
= 0;
2740 int sync
= wake_flags
& WF_SYNC
;
2742 if (p
->rt
.nr_cpus_allowed
== 1)
2745 if (sd_flag
& SD_BALANCE_WAKE
) {
2746 if (cpumask_test_cpu(cpu
, tsk_cpus_allowed(p
)))
2752 for_each_domain(cpu
, tmp
) {
2753 if (!(tmp
->flags
& SD_LOAD_BALANCE
))
2757 * If power savings logic is enabled for a domain, see if we
2758 * are not overloaded, if so, don't balance wider.
2760 if (tmp
->flags
& (SD_POWERSAVINGS_BALANCE
|SD_PREFER_LOCAL
)) {
2761 unsigned long power
= 0;
2762 unsigned long nr_running
= 0;
2763 unsigned long capacity
;
2766 for_each_cpu(i
, sched_domain_span(tmp
)) {
2767 power
+= power_of(i
);
2768 nr_running
+= cpu_rq(i
)->cfs
.nr_running
;
2771 capacity
= DIV_ROUND_CLOSEST(power
, SCHED_POWER_SCALE
);
2773 if (tmp
->flags
& SD_POWERSAVINGS_BALANCE
)
2776 if (nr_running
< capacity
)
2781 * If both cpu and prev_cpu are part of this domain,
2782 * cpu is a valid SD_WAKE_AFFINE target.
2784 if (want_affine
&& (tmp
->flags
& SD_WAKE_AFFINE
) &&
2785 cpumask_test_cpu(prev_cpu
, sched_domain_span(tmp
))) {
2790 if (!want_sd
&& !want_affine
)
2793 if (!(tmp
->flags
& sd_flag
))
2801 if (cpu
== prev_cpu
|| wake_affine(affine_sd
, p
, sync
))
2804 new_cpu
= select_idle_sibling(p
, prev_cpu
);
2809 int load_idx
= sd
->forkexec_idx
;
2810 struct sched_group
*group
;
2813 if (!(sd
->flags
& sd_flag
)) {
2818 if (sd_flag
& SD_BALANCE_WAKE
)
2819 load_idx
= sd
->wake_idx
;
2821 group
= find_idlest_group(sd
, p
, cpu
, load_idx
);
2827 new_cpu
= find_idlest_cpu(group
, p
, cpu
);
2828 if (new_cpu
== -1 || new_cpu
== cpu
) {
2829 /* Now try balancing at a lower domain level of cpu */
2834 /* Now try balancing at a lower domain level of new_cpu */
2836 weight
= sd
->span_weight
;
2838 for_each_domain(cpu
, tmp
) {
2839 if (weight
<= tmp
->span_weight
)
2841 if (tmp
->flags
& sd_flag
)
2844 /* while loop will break here if sd == NULL */
2851 #endif /* CONFIG_SMP */
2853 static unsigned long
2854 wakeup_gran(struct sched_entity
*curr
, struct sched_entity
*se
)
2856 unsigned long gran
= sysctl_sched_wakeup_granularity
;
2859 * Since its curr running now, convert the gran from real-time
2860 * to virtual-time in his units.
2862 * By using 'se' instead of 'curr' we penalize light tasks, so
2863 * they get preempted easier. That is, if 'se' < 'curr' then
2864 * the resulting gran will be larger, therefore penalizing the
2865 * lighter, if otoh 'se' > 'curr' then the resulting gran will
2866 * be smaller, again penalizing the lighter task.
2868 * This is especially important for buddies when the leftmost
2869 * task is higher priority than the buddy.
2871 return calc_delta_fair(gran
, se
);
2875 * Should 'se' preempt 'curr'.
2889 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
)
2891 s64 gran
, vdiff
= curr
->vruntime
- se
->vruntime
;
2896 gran
= wakeup_gran(curr
, se
);
2903 static void set_last_buddy(struct sched_entity
*se
)
2905 if (entity_is_task(se
) && unlikely(task_of(se
)->policy
== SCHED_IDLE
))
2908 for_each_sched_entity(se
)
2909 cfs_rq_of(se
)->last
= se
;
2912 static void set_next_buddy(struct sched_entity
*se
)
2914 if (entity_is_task(se
) && unlikely(task_of(se
)->policy
== SCHED_IDLE
))
2917 for_each_sched_entity(se
)
2918 cfs_rq_of(se
)->next
= se
;
2921 static void set_skip_buddy(struct sched_entity
*se
)
2923 for_each_sched_entity(se
)
2924 cfs_rq_of(se
)->skip
= se
;
2928 * Preempt the current task with a newly woken task if needed:
2930 static void check_preempt_wakeup(struct rq
*rq
, struct task_struct
*p
, int wake_flags
)
2932 struct task_struct
*curr
= rq
->curr
;
2933 struct sched_entity
*se
= &curr
->se
, *pse
= &p
->se
;
2934 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
2935 int scale
= cfs_rq
->nr_running
>= sched_nr_latency
;
2936 int next_buddy_marked
= 0;
2938 if (unlikely(se
== pse
))
2942 * This is possible from callers such as pull_task(), in which we
2943 * unconditionally check_prempt_curr() after an enqueue (which may have
2944 * lead to a throttle). This both saves work and prevents false
2945 * next-buddy nomination below.
2947 if (unlikely(throttled_hierarchy(cfs_rq_of(pse
))))
2950 if (sched_feat(NEXT_BUDDY
) && scale
&& !(wake_flags
& WF_FORK
)) {
2951 set_next_buddy(pse
);
2952 next_buddy_marked
= 1;
2956 * We can come here with TIF_NEED_RESCHED already set from new task
2959 * Note: this also catches the edge-case of curr being in a throttled
2960 * group (e.g. via set_curr_task), since update_curr() (in the
2961 * enqueue of curr) will have resulted in resched being set. This
2962 * prevents us from potentially nominating it as a false LAST_BUDDY
2965 if (test_tsk_need_resched(curr
))
2968 /* Idle tasks are by definition preempted by non-idle tasks. */
2969 if (unlikely(curr
->policy
== SCHED_IDLE
) &&
2970 likely(p
->policy
!= SCHED_IDLE
))
2974 * Batch and idle tasks do not preempt non-idle tasks (their preemption
2975 * is driven by the tick):
2977 if (unlikely(p
->policy
!= SCHED_NORMAL
))
2980 find_matching_se(&se
, &pse
);
2981 update_curr(cfs_rq_of(se
));
2983 if (wakeup_preempt_entity(se
, pse
) == 1) {
2985 * Bias pick_next to pick the sched entity that is
2986 * triggering this preemption.
2988 if (!next_buddy_marked
)
2989 set_next_buddy(pse
);
2998 * Only set the backward buddy when the current task is still
2999 * on the rq. This can happen when a wakeup gets interleaved
3000 * with schedule on the ->pre_schedule() or idle_balance()
3001 * point, either of which can * drop the rq lock.
3003 * Also, during early boot the idle thread is in the fair class,
3004 * for obvious reasons its a bad idea to schedule back to it.
3006 if (unlikely(!se
->on_rq
|| curr
== rq
->idle
))
3009 if (sched_feat(LAST_BUDDY
) && scale
&& entity_is_task(se
))
3013 static struct task_struct
*pick_next_task_fair(struct rq
*rq
)
3015 struct task_struct
*p
;
3016 struct cfs_rq
*cfs_rq
= &rq
->cfs
;
3017 struct sched_entity
*se
;
3019 if (!cfs_rq
->nr_running
)
3023 se
= pick_next_entity(cfs_rq
);
3024 set_next_entity(cfs_rq
, se
);
3025 cfs_rq
= group_cfs_rq(se
);
3029 if (hrtick_enabled(rq
))
3030 hrtick_start_fair(rq
, p
);
3036 * Account for a descheduled task:
3038 static void put_prev_task_fair(struct rq
*rq
, struct task_struct
*prev
)
3040 struct sched_entity
*se
= &prev
->se
;
3041 struct cfs_rq
*cfs_rq
;
3043 for_each_sched_entity(se
) {
3044 cfs_rq
= cfs_rq_of(se
);
3045 put_prev_entity(cfs_rq
, se
);
3050 * sched_yield() is very simple
3052 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3054 static void yield_task_fair(struct rq
*rq
)
3056 struct task_struct
*curr
= rq
->curr
;
3057 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
3058 struct sched_entity
*se
= &curr
->se
;
3061 * Are we the only task in the tree?
3063 if (unlikely(rq
->nr_running
== 1))
3066 clear_buddies(cfs_rq
, se
);
3068 if (curr
->policy
!= SCHED_BATCH
) {
3069 update_rq_clock(rq
);
3071 * Update run-time statistics of the 'current'.
3073 update_curr(cfs_rq
);
3075 * Tell update_rq_clock() that we've just updated,
3076 * so we don't do microscopic update in schedule()
3077 * and double the fastpath cost.
3079 rq
->skip_clock_update
= 1;
3085 static bool yield_to_task_fair(struct rq
*rq
, struct task_struct
*p
, bool preempt
)
3087 struct sched_entity
*se
= &p
->se
;
3089 /* throttled hierarchies are not runnable */
3090 if (!se
->on_rq
|| throttled_hierarchy(cfs_rq_of(se
)))
3093 /* Tell the scheduler that we'd really like pse to run next. */
3096 yield_task_fair(rq
);
3102 /**************************************************
3103 * Fair scheduling class load-balancing methods:
3107 * pull_task - move a task from a remote runqueue to the local runqueue.
3108 * Both runqueues must be locked.
3110 static void pull_task(struct rq
*src_rq
, struct task_struct
*p
,
3111 struct rq
*this_rq
, int this_cpu
)
3113 deactivate_task(src_rq
, p
, 0);
3114 set_task_cpu(p
, this_cpu
);
3115 activate_task(this_rq
, p
, 0);
3116 check_preempt_curr(this_rq
, p
, 0);
3120 * Is this task likely cache-hot:
3123 task_hot(struct task_struct
*p
, u64 now
, struct sched_domain
*sd
)
3127 if (p
->sched_class
!= &fair_sched_class
)
3130 if (unlikely(p
->policy
== SCHED_IDLE
))
3134 * Buddy candidates are cache hot:
3136 if (sched_feat(CACHE_HOT_BUDDY
) && this_rq()->nr_running
&&
3137 (&p
->se
== cfs_rq_of(&p
->se
)->next
||
3138 &p
->se
== cfs_rq_of(&p
->se
)->last
))
3141 if (sysctl_sched_migration_cost
== -1)
3143 if (sysctl_sched_migration_cost
== 0)
3146 delta
= now
- p
->se
.exec_start
;
3148 return delta
< (s64
)sysctl_sched_migration_cost
;
3151 #define LBF_ALL_PINNED 0x01
3152 #define LBF_NEED_BREAK 0x02 /* clears into HAD_BREAK */
3153 #define LBF_HAD_BREAK 0x04
3154 #define LBF_HAD_BREAKS 0x0C /* count HAD_BREAKs overflows into ABORT */
3155 #define LBF_ABORT 0x10
3158 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3161 int can_migrate_task(struct task_struct
*p
, struct rq
*rq
, int this_cpu
,
3162 struct sched_domain
*sd
, enum cpu_idle_type idle
,
3165 int tsk_cache_hot
= 0;
3167 * We do not migrate tasks that are:
3168 * 1) running (obviously), or
3169 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3170 * 3) are cache-hot on their current CPU.
3172 if (!cpumask_test_cpu(this_cpu
, tsk_cpus_allowed(p
))) {
3173 schedstat_inc(p
, se
.statistics
.nr_failed_migrations_affine
);
3176 *lb_flags
&= ~LBF_ALL_PINNED
;
3178 if (task_running(rq
, p
)) {
3179 schedstat_inc(p
, se
.statistics
.nr_failed_migrations_running
);
3184 * Aggressive migration if:
3185 * 1) task is cache cold, or
3186 * 2) too many balance attempts have failed.
3189 tsk_cache_hot
= task_hot(p
, rq
->clock_task
, sd
);
3190 if (!tsk_cache_hot
||
3191 sd
->nr_balance_failed
> sd
->cache_nice_tries
) {
3192 #ifdef CONFIG_SCHEDSTATS
3193 if (tsk_cache_hot
) {
3194 schedstat_inc(sd
, lb_hot_gained
[idle
]);
3195 schedstat_inc(p
, se
.statistics
.nr_forced_migrations
);
3201 if (tsk_cache_hot
) {
3202 schedstat_inc(p
, se
.statistics
.nr_failed_migrations_hot
);
3209 * move_one_task tries to move exactly one task from busiest to this_rq, as
3210 * part of active balancing operations within "domain".
3211 * Returns 1 if successful and 0 otherwise.
3213 * Called with both runqueues locked.
3216 move_one_task(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3217 struct sched_domain
*sd
, enum cpu_idle_type idle
)
3219 struct task_struct
*p
, *n
;
3220 struct cfs_rq
*cfs_rq
;
3223 for_each_leaf_cfs_rq(busiest
, cfs_rq
) {
3224 list_for_each_entry_safe(p
, n
, &cfs_rq
->tasks
, se
.group_node
) {
3225 if (throttled_lb_pair(task_group(p
),
3226 busiest
->cpu
, this_cpu
))
3229 if (!can_migrate_task(p
, busiest
, this_cpu
,
3233 pull_task(busiest
, p
, this_rq
, this_cpu
);
3235 * Right now, this is only the second place pull_task()
3236 * is called, so we can safely collect pull_task()
3237 * stats here rather than inside pull_task().
3239 schedstat_inc(sd
, lb_gained
[idle
]);
3247 static unsigned long
3248 balance_tasks(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3249 unsigned long max_load_move
, struct sched_domain
*sd
,
3250 enum cpu_idle_type idle
, int *lb_flags
,
3251 struct cfs_rq
*busiest_cfs_rq
)
3253 int loops
= 0, pulled
= 0;
3254 long rem_load_move
= max_load_move
;
3255 struct task_struct
*p
, *n
;
3257 if (max_load_move
== 0)
3260 list_for_each_entry_safe(p
, n
, &busiest_cfs_rq
->tasks
, se
.group_node
) {
3261 if (loops
++ > sysctl_sched_nr_migrate
) {
3262 *lb_flags
|= LBF_NEED_BREAK
;
3266 if ((p
->se
.load
.weight
>> 1) > rem_load_move
||
3267 !can_migrate_task(p
, busiest
, this_cpu
, sd
, idle
,
3271 pull_task(busiest
, p
, this_rq
, this_cpu
);
3273 rem_load_move
-= p
->se
.load
.weight
;
3275 #ifdef CONFIG_PREEMPT
3277 * NEWIDLE balancing is a source of latency, so preemptible
3278 * kernels will stop after the first task is pulled to minimize
3279 * the critical section.
3281 if (idle
== CPU_NEWLY_IDLE
) {
3282 *lb_flags
|= LBF_ABORT
;
3288 * We only want to steal up to the prescribed amount of
3291 if (rem_load_move
<= 0)
3296 * Right now, this is one of only two places pull_task() is called,
3297 * so we can safely collect pull_task() stats here rather than
3298 * inside pull_task().
3300 schedstat_add(sd
, lb_gained
[idle
], pulled
);
3302 return max_load_move
- rem_load_move
;
3305 #ifdef CONFIG_FAIR_GROUP_SCHED
3307 * update tg->load_weight by folding this cpu's load_avg
3309 static int update_shares_cpu(struct task_group
*tg
, int cpu
)
3311 struct cfs_rq
*cfs_rq
;
3312 unsigned long flags
;
3319 cfs_rq
= tg
->cfs_rq
[cpu
];
3321 raw_spin_lock_irqsave(&rq
->lock
, flags
);
3323 update_rq_clock(rq
);
3324 update_cfs_load(cfs_rq
, 1);
3327 * We need to update shares after updating tg->load_weight in
3328 * order to adjust the weight of groups with long running tasks.
3330 update_cfs_shares(cfs_rq
);
3332 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
3337 static void update_shares(int cpu
)
3339 struct cfs_rq
*cfs_rq
;
3340 struct rq
*rq
= cpu_rq(cpu
);
3344 * Iterates the task_group tree in a bottom up fashion, see
3345 * list_add_leaf_cfs_rq() for details.
3347 for_each_leaf_cfs_rq(rq
, cfs_rq
) {
3348 /* throttled entities do not contribute to load */
3349 if (throttled_hierarchy(cfs_rq
))
3352 update_shares_cpu(cfs_rq
->tg
, cpu
);
3358 * Compute the cpu's hierarchical load factor for each task group.
3359 * This needs to be done in a top-down fashion because the load of a child
3360 * group is a fraction of its parents load.
3362 static int tg_load_down(struct task_group
*tg
, void *data
)
3365 long cpu
= (long)data
;
3368 load
= cpu_rq(cpu
)->load
.weight
;
3370 load
= tg
->parent
->cfs_rq
[cpu
]->h_load
;
3371 load
*= tg
->se
[cpu
]->load
.weight
;
3372 load
/= tg
->parent
->cfs_rq
[cpu
]->load
.weight
+ 1;
3375 tg
->cfs_rq
[cpu
]->h_load
= load
;
3380 static void update_h_load(long cpu
)
3382 walk_tg_tree(tg_load_down
, tg_nop
, (void *)cpu
);
3385 static unsigned long
3386 load_balance_fair(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3387 unsigned long max_load_move
,
3388 struct sched_domain
*sd
, enum cpu_idle_type idle
,
3391 long rem_load_move
= max_load_move
;
3392 struct cfs_rq
*busiest_cfs_rq
;
3395 update_h_load(cpu_of(busiest
));
3397 for_each_leaf_cfs_rq(busiest
, busiest_cfs_rq
) {
3398 unsigned long busiest_h_load
= busiest_cfs_rq
->h_load
;
3399 unsigned long busiest_weight
= busiest_cfs_rq
->load
.weight
;
3400 u64 rem_load
, moved_load
;
3402 if (*lb_flags
& (LBF_NEED_BREAK
|LBF_ABORT
))
3406 * empty group or part of a throttled hierarchy
3408 if (!busiest_cfs_rq
->task_weight
||
3409 throttled_lb_pair(busiest_cfs_rq
->tg
, cpu_of(busiest
), this_cpu
))
3412 rem_load
= (u64
)rem_load_move
* busiest_weight
;
3413 rem_load
= div_u64(rem_load
, busiest_h_load
+ 1);
3415 moved_load
= balance_tasks(this_rq
, this_cpu
, busiest
,
3416 rem_load
, sd
, idle
, lb_flags
,
3422 moved_load
*= busiest_h_load
;
3423 moved_load
= div_u64(moved_load
, busiest_weight
+ 1);
3425 rem_load_move
-= moved_load
;
3426 if (rem_load_move
< 0)
3431 return max_load_move
- rem_load_move
;
3434 static inline void update_shares(int cpu
)
3438 static unsigned long
3439 load_balance_fair(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3440 unsigned long max_load_move
,
3441 struct sched_domain
*sd
, enum cpu_idle_type idle
,
3444 return balance_tasks(this_rq
, this_cpu
, busiest
,
3445 max_load_move
, sd
, idle
, lb_flags
,
3451 * move_tasks tries to move up to max_load_move weighted load from busiest to
3452 * this_rq, as part of a balancing operation within domain "sd".
3453 * Returns 1 if successful and 0 otherwise.
3455 * Called with both runqueues locked.
3457 static int move_tasks(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3458 unsigned long max_load_move
,
3459 struct sched_domain
*sd
, enum cpu_idle_type idle
,
3462 unsigned long total_load_moved
= 0, load_moved
;
3465 load_moved
= load_balance_fair(this_rq
, this_cpu
, busiest
,
3466 max_load_move
- total_load_moved
,
3467 sd
, idle
, lb_flags
);
3469 total_load_moved
+= load_moved
;
3471 if (*lb_flags
& (LBF_NEED_BREAK
|LBF_ABORT
))
3474 #ifdef CONFIG_PREEMPT
3476 * NEWIDLE balancing is a source of latency, so preemptible
3477 * kernels will stop after the first task is pulled to minimize
3478 * the critical section.
3480 if (idle
== CPU_NEWLY_IDLE
&& this_rq
->nr_running
) {
3481 *lb_flags
|= LBF_ABORT
;
3485 } while (load_moved
&& max_load_move
> total_load_moved
);
3487 return total_load_moved
> 0;
3490 /********** Helpers for find_busiest_group ************************/
3492 * sd_lb_stats - Structure to store the statistics of a sched_domain
3493 * during load balancing.
3495 struct sd_lb_stats
{
3496 struct sched_group
*busiest
; /* Busiest group in this sd */
3497 struct sched_group
*this; /* Local group in this sd */
3498 unsigned long total_load
; /* Total load of all groups in sd */
3499 unsigned long total_pwr
; /* Total power of all groups in sd */
3500 unsigned long avg_load
; /* Average load across all groups in sd */
3502 /** Statistics of this group */
3503 unsigned long this_load
;
3504 unsigned long this_load_per_task
;
3505 unsigned long this_nr_running
;
3506 unsigned long this_has_capacity
;
3507 unsigned int this_idle_cpus
;
3509 /* Statistics of the busiest group */
3510 unsigned int busiest_idle_cpus
;
3511 unsigned long max_load
;
3512 unsigned long busiest_load_per_task
;
3513 unsigned long busiest_nr_running
;
3514 unsigned long busiest_group_capacity
;
3515 unsigned long busiest_has_capacity
;
3516 unsigned int busiest_group_weight
;
3518 int group_imb
; /* Is there imbalance in this sd */
3519 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3520 int power_savings_balance
; /* Is powersave balance needed for this sd */
3521 struct sched_group
*group_min
; /* Least loaded group in sd */
3522 struct sched_group
*group_leader
; /* Group which relieves group_min */
3523 unsigned long min_load_per_task
; /* load_per_task in group_min */
3524 unsigned long leader_nr_running
; /* Nr running of group_leader */
3525 unsigned long min_nr_running
; /* Nr running of group_min */
3530 * sg_lb_stats - stats of a sched_group required for load_balancing
3532 struct sg_lb_stats
{
3533 unsigned long avg_load
; /*Avg load across the CPUs of the group */
3534 unsigned long group_load
; /* Total load over the CPUs of the group */
3535 unsigned long sum_nr_running
; /* Nr tasks running in the group */
3536 unsigned long sum_weighted_load
; /* Weighted load of group's tasks */
3537 unsigned long group_capacity
;
3538 unsigned long idle_cpus
;
3539 unsigned long group_weight
;
3540 int group_imb
; /* Is there an imbalance in the group ? */
3541 int group_has_capacity
; /* Is there extra capacity in the group? */
3545 * get_sd_load_idx - Obtain the load index for a given sched domain.
3546 * @sd: The sched_domain whose load_idx is to be obtained.
3547 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3549 static inline int get_sd_load_idx(struct sched_domain
*sd
,
3550 enum cpu_idle_type idle
)
3556 load_idx
= sd
->busy_idx
;
3559 case CPU_NEWLY_IDLE
:
3560 load_idx
= sd
->newidle_idx
;
3563 load_idx
= sd
->idle_idx
;
3571 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3573 * init_sd_power_savings_stats - Initialize power savings statistics for
3574 * the given sched_domain, during load balancing.
3576 * @sd: Sched domain whose power-savings statistics are to be initialized.
3577 * @sds: Variable containing the statistics for sd.
3578 * @idle: Idle status of the CPU at which we're performing load-balancing.
3580 static inline void init_sd_power_savings_stats(struct sched_domain
*sd
,
3581 struct sd_lb_stats
*sds
, enum cpu_idle_type idle
)
3584 * Busy processors will not participate in power savings
3587 if (idle
== CPU_NOT_IDLE
|| !(sd
->flags
& SD_POWERSAVINGS_BALANCE
))
3588 sds
->power_savings_balance
= 0;
3590 sds
->power_savings_balance
= 1;
3591 sds
->min_nr_running
= ULONG_MAX
;
3592 sds
->leader_nr_running
= 0;
3597 * update_sd_power_savings_stats - Update the power saving stats for a
3598 * sched_domain while performing load balancing.
3600 * @group: sched_group belonging to the sched_domain under consideration.
3601 * @sds: Variable containing the statistics of the sched_domain
3602 * @local_group: Does group contain the CPU for which we're performing
3604 * @sgs: Variable containing the statistics of the group.
3606 static inline void update_sd_power_savings_stats(struct sched_group
*group
,
3607 struct sd_lb_stats
*sds
, int local_group
, struct sg_lb_stats
*sgs
)
3610 if (!sds
->power_savings_balance
)
3614 * If the local group is idle or completely loaded
3615 * no need to do power savings balance at this domain
3617 if (local_group
&& (sds
->this_nr_running
>= sgs
->group_capacity
||
3618 !sds
->this_nr_running
))
3619 sds
->power_savings_balance
= 0;
3622 * If a group is already running at full capacity or idle,
3623 * don't include that group in power savings calculations
3625 if (!sds
->power_savings_balance
||
3626 sgs
->sum_nr_running
>= sgs
->group_capacity
||
3627 !sgs
->sum_nr_running
)
3631 * Calculate the group which has the least non-idle load.
3632 * This is the group from where we need to pick up the load
3635 if ((sgs
->sum_nr_running
< sds
->min_nr_running
) ||
3636 (sgs
->sum_nr_running
== sds
->min_nr_running
&&
3637 group_first_cpu(group
) > group_first_cpu(sds
->group_min
))) {
3638 sds
->group_min
= group
;
3639 sds
->min_nr_running
= sgs
->sum_nr_running
;
3640 sds
->min_load_per_task
= sgs
->sum_weighted_load
/
3641 sgs
->sum_nr_running
;
3645 * Calculate the group which is almost near its
3646 * capacity but still has some space to pick up some load
3647 * from other group and save more power
3649 if (sgs
->sum_nr_running
+ 1 > sgs
->group_capacity
)
3652 if (sgs
->sum_nr_running
> sds
->leader_nr_running
||
3653 (sgs
->sum_nr_running
== sds
->leader_nr_running
&&
3654 group_first_cpu(group
) < group_first_cpu(sds
->group_leader
))) {
3655 sds
->group_leader
= group
;
3656 sds
->leader_nr_running
= sgs
->sum_nr_running
;
3661 * check_power_save_busiest_group - see if there is potential for some power-savings balance
3662 * @sds: Variable containing the statistics of the sched_domain
3663 * under consideration.
3664 * @this_cpu: Cpu at which we're currently performing load-balancing.
3665 * @imbalance: Variable to store the imbalance.
3668 * Check if we have potential to perform some power-savings balance.
3669 * If yes, set the busiest group to be the least loaded group in the
3670 * sched_domain, so that it's CPUs can be put to idle.
3672 * Returns 1 if there is potential to perform power-savings balance.
3675 static inline int check_power_save_busiest_group(struct sd_lb_stats
*sds
,
3676 int this_cpu
, unsigned long *imbalance
)
3678 if (!sds
->power_savings_balance
)
3681 if (sds
->this != sds
->group_leader
||
3682 sds
->group_leader
== sds
->group_min
)
3685 *imbalance
= sds
->min_load_per_task
;
3686 sds
->busiest
= sds
->group_min
;
3691 #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3692 static inline void init_sd_power_savings_stats(struct sched_domain
*sd
,
3693 struct sd_lb_stats
*sds
, enum cpu_idle_type idle
)
3698 static inline void update_sd_power_savings_stats(struct sched_group
*group
,
3699 struct sd_lb_stats
*sds
, int local_group
, struct sg_lb_stats
*sgs
)
3704 static inline int check_power_save_busiest_group(struct sd_lb_stats
*sds
,
3705 int this_cpu
, unsigned long *imbalance
)
3709 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3712 unsigned long default_scale_freq_power(struct sched_domain
*sd
, int cpu
)
3714 return SCHED_POWER_SCALE
;
3717 unsigned long __weak
arch_scale_freq_power(struct sched_domain
*sd
, int cpu
)
3719 return default_scale_freq_power(sd
, cpu
);
3722 unsigned long default_scale_smt_power(struct sched_domain
*sd
, int cpu
)
3724 unsigned long weight
= sd
->span_weight
;
3725 unsigned long smt_gain
= sd
->smt_gain
;
3732 unsigned long __weak
arch_scale_smt_power(struct sched_domain
*sd
, int cpu
)
3734 return default_scale_smt_power(sd
, cpu
);
3737 unsigned long scale_rt_power(int cpu
)
3739 struct rq
*rq
= cpu_rq(cpu
);
3740 u64 total
, available
;
3742 total
= sched_avg_period() + (rq
->clock
- rq
->age_stamp
);
3744 if (unlikely(total
< rq
->rt_avg
)) {
3745 /* Ensures that power won't end up being negative */
3748 available
= total
- rq
->rt_avg
;
3751 if (unlikely((s64
)total
< SCHED_POWER_SCALE
))
3752 total
= SCHED_POWER_SCALE
;
3754 total
>>= SCHED_POWER_SHIFT
;
3756 return div_u64(available
, total
);
3759 static void update_cpu_power(struct sched_domain
*sd
, int cpu
)
3761 unsigned long weight
= sd
->span_weight
;
3762 unsigned long power
= SCHED_POWER_SCALE
;
3763 struct sched_group
*sdg
= sd
->groups
;
3765 if ((sd
->flags
& SD_SHARE_CPUPOWER
) && weight
> 1) {
3766 if (sched_feat(ARCH_POWER
))
3767 power
*= arch_scale_smt_power(sd
, cpu
);
3769 power
*= default_scale_smt_power(sd
, cpu
);
3771 power
>>= SCHED_POWER_SHIFT
;
3774 sdg
->sgp
->power_orig
= power
;
3776 if (sched_feat(ARCH_POWER
))
3777 power
*= arch_scale_freq_power(sd
, cpu
);
3779 power
*= default_scale_freq_power(sd
, cpu
);
3781 power
>>= SCHED_POWER_SHIFT
;
3783 power
*= scale_rt_power(cpu
);
3784 power
>>= SCHED_POWER_SHIFT
;
3789 cpu_rq(cpu
)->cpu_power
= power
;
3790 sdg
->sgp
->power
= power
;
3793 void update_group_power(struct sched_domain
*sd
, int cpu
)
3795 struct sched_domain
*child
= sd
->child
;
3796 struct sched_group
*group
, *sdg
= sd
->groups
;
3797 unsigned long power
;
3800 update_cpu_power(sd
, cpu
);
3806 group
= child
->groups
;
3808 power
+= group
->sgp
->power
;
3809 group
= group
->next
;
3810 } while (group
!= child
->groups
);
3812 sdg
->sgp
->power
= power
;
3816 * Try and fix up capacity for tiny siblings, this is needed when
3817 * things like SD_ASYM_PACKING need f_b_g to select another sibling
3818 * which on its own isn't powerful enough.
3820 * See update_sd_pick_busiest() and check_asym_packing().
3823 fix_small_capacity(struct sched_domain
*sd
, struct sched_group
*group
)
3826 * Only siblings can have significantly less than SCHED_POWER_SCALE
3828 if (!(sd
->flags
& SD_SHARE_CPUPOWER
))
3832 * If ~90% of the cpu_power is still there, we're good.
3834 if (group
->sgp
->power
* 32 > group
->sgp
->power_orig
* 29)
3841 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3842 * @sd: The sched_domain whose statistics are to be updated.
3843 * @group: sched_group whose statistics are to be updated.
3844 * @this_cpu: Cpu for which load balance is currently performed.
3845 * @idle: Idle status of this_cpu
3846 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3847 * @local_group: Does group contain this_cpu.
3848 * @cpus: Set of cpus considered for load balancing.
3849 * @balance: Should we balance.
3850 * @sgs: variable to hold the statistics for this group.
3852 static inline void update_sg_lb_stats(struct sched_domain
*sd
,
3853 struct sched_group
*group
, int this_cpu
,
3854 enum cpu_idle_type idle
, int load_idx
,
3855 int local_group
, const struct cpumask
*cpus
,
3856 int *balance
, struct sg_lb_stats
*sgs
)
3858 unsigned long load
, max_cpu_load
, min_cpu_load
, max_nr_running
;
3860 unsigned int balance_cpu
= -1, first_idle_cpu
= 0;
3861 unsigned long avg_load_per_task
= 0;
3864 balance_cpu
= group_first_cpu(group
);
3866 /* Tally up the load of all CPUs in the group */
3868 min_cpu_load
= ~0UL;
3871 for_each_cpu_and(i
, sched_group_cpus(group
), cpus
) {
3872 struct rq
*rq
= cpu_rq(i
);
3874 /* Bias balancing toward cpus of our domain */
3876 if (idle_cpu(i
) && !first_idle_cpu
) {
3881 load
= target_load(i
, load_idx
);
3883 load
= source_load(i
, load_idx
);
3884 if (load
> max_cpu_load
) {
3885 max_cpu_load
= load
;
3886 max_nr_running
= rq
->nr_running
;
3888 if (min_cpu_load
> load
)
3889 min_cpu_load
= load
;
3892 sgs
->group_load
+= load
;
3893 sgs
->sum_nr_running
+= rq
->nr_running
;
3894 sgs
->sum_weighted_load
+= weighted_cpuload(i
);
3900 * First idle cpu or the first cpu(busiest) in this sched group
3901 * is eligible for doing load balancing at this and above
3902 * domains. In the newly idle case, we will allow all the cpu's
3903 * to do the newly idle load balance.
3905 if (idle
!= CPU_NEWLY_IDLE
&& local_group
) {
3906 if (balance_cpu
!= this_cpu
) {
3910 update_group_power(sd
, this_cpu
);
3913 /* Adjust by relative CPU power of the group */
3914 sgs
->avg_load
= (sgs
->group_load
*SCHED_POWER_SCALE
) / group
->sgp
->power
;
3917 * Consider the group unbalanced when the imbalance is larger
3918 * than the average weight of a task.
3920 * APZ: with cgroup the avg task weight can vary wildly and
3921 * might not be a suitable number - should we keep a
3922 * normalized nr_running number somewhere that negates
3925 if (sgs
->sum_nr_running
)
3926 avg_load_per_task
= sgs
->sum_weighted_load
/ sgs
->sum_nr_running
;
3928 if ((max_cpu_load
- min_cpu_load
) >= avg_load_per_task
&& max_nr_running
> 1)
3931 sgs
->group_capacity
= DIV_ROUND_CLOSEST(group
->sgp
->power
,
3933 if (!sgs
->group_capacity
)
3934 sgs
->group_capacity
= fix_small_capacity(sd
, group
);
3935 sgs
->group_weight
= group
->group_weight
;
3937 if (sgs
->group_capacity
> sgs
->sum_nr_running
)
3938 sgs
->group_has_capacity
= 1;
3942 * update_sd_pick_busiest - return 1 on busiest group
3943 * @sd: sched_domain whose statistics are to be checked
3944 * @sds: sched_domain statistics
3945 * @sg: sched_group candidate to be checked for being the busiest
3946 * @sgs: sched_group statistics
3947 * @this_cpu: the current cpu
3949 * Determine if @sg is a busier group than the previously selected
3952 static bool update_sd_pick_busiest(struct sched_domain
*sd
,
3953 struct sd_lb_stats
*sds
,
3954 struct sched_group
*sg
,
3955 struct sg_lb_stats
*sgs
,
3958 if (sgs
->avg_load
<= sds
->max_load
)
3961 if (sgs
->sum_nr_running
> sgs
->group_capacity
)
3968 * ASYM_PACKING needs to move all the work to the lowest
3969 * numbered CPUs in the group, therefore mark all groups
3970 * higher than ourself as busy.
3972 if ((sd
->flags
& SD_ASYM_PACKING
) && sgs
->sum_nr_running
&&
3973 this_cpu
< group_first_cpu(sg
)) {
3977 if (group_first_cpu(sds
->busiest
) > group_first_cpu(sg
))
3985 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3986 * @sd: sched_domain whose statistics are to be updated.
3987 * @this_cpu: Cpu for which load balance is currently performed.
3988 * @idle: Idle status of this_cpu
3989 * @cpus: Set of cpus considered for load balancing.
3990 * @balance: Should we balance.
3991 * @sds: variable to hold the statistics for this sched_domain.
3993 static inline void update_sd_lb_stats(struct sched_domain
*sd
, int this_cpu
,
3994 enum cpu_idle_type idle
, const struct cpumask
*cpus
,
3995 int *balance
, struct sd_lb_stats
*sds
)
3997 struct sched_domain
*child
= sd
->child
;
3998 struct sched_group
*sg
= sd
->groups
;
3999 struct sg_lb_stats sgs
;
4000 int load_idx
, prefer_sibling
= 0;
4002 if (child
&& child
->flags
& SD_PREFER_SIBLING
)
4005 init_sd_power_savings_stats(sd
, sds
, idle
);
4006 load_idx
= get_sd_load_idx(sd
, idle
);
4011 local_group
= cpumask_test_cpu(this_cpu
, sched_group_cpus(sg
));
4012 memset(&sgs
, 0, sizeof(sgs
));
4013 update_sg_lb_stats(sd
, sg
, this_cpu
, idle
, load_idx
,
4014 local_group
, cpus
, balance
, &sgs
);
4016 if (local_group
&& !(*balance
))
4019 sds
->total_load
+= sgs
.group_load
;
4020 sds
->total_pwr
+= sg
->sgp
->power
;
4023 * In case the child domain prefers tasks go to siblings
4024 * first, lower the sg capacity to one so that we'll try
4025 * and move all the excess tasks away. We lower the capacity
4026 * of a group only if the local group has the capacity to fit
4027 * these excess tasks, i.e. nr_running < group_capacity. The
4028 * extra check prevents the case where you always pull from the
4029 * heaviest group when it is already under-utilized (possible
4030 * with a large weight task outweighs the tasks on the system).
4032 if (prefer_sibling
&& !local_group
&& sds
->this_has_capacity
)
4033 sgs
.group_capacity
= min(sgs
.group_capacity
, 1UL);
4036 sds
->this_load
= sgs
.avg_load
;
4038 sds
->this_nr_running
= sgs
.sum_nr_running
;
4039 sds
->this_load_per_task
= sgs
.sum_weighted_load
;
4040 sds
->this_has_capacity
= sgs
.group_has_capacity
;
4041 sds
->this_idle_cpus
= sgs
.idle_cpus
;
4042 } else if (update_sd_pick_busiest(sd
, sds
, sg
, &sgs
, this_cpu
)) {
4043 sds
->max_load
= sgs
.avg_load
;
4045 sds
->busiest_nr_running
= sgs
.sum_nr_running
;
4046 sds
->busiest_idle_cpus
= sgs
.idle_cpus
;
4047 sds
->busiest_group_capacity
= sgs
.group_capacity
;
4048 sds
->busiest_load_per_task
= sgs
.sum_weighted_load
;
4049 sds
->busiest_has_capacity
= sgs
.group_has_capacity
;
4050 sds
->busiest_group_weight
= sgs
.group_weight
;
4051 sds
->group_imb
= sgs
.group_imb
;
4054 update_sd_power_savings_stats(sg
, sds
, local_group
, &sgs
);
4056 } while (sg
!= sd
->groups
);
4060 * check_asym_packing - Check to see if the group is packed into the
4063 * This is primarily intended to used at the sibling level. Some
4064 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4065 * case of POWER7, it can move to lower SMT modes only when higher
4066 * threads are idle. When in lower SMT modes, the threads will
4067 * perform better since they share less core resources. Hence when we
4068 * have idle threads, we want them to be the higher ones.
4070 * This packing function is run on idle threads. It checks to see if
4071 * the busiest CPU in this domain (core in the P7 case) has a higher
4072 * CPU number than the packing function is being run on. Here we are
4073 * assuming lower CPU number will be equivalent to lower a SMT thread
4076 * Returns 1 when packing is required and a task should be moved to
4077 * this CPU. The amount of the imbalance is returned in *imbalance.
4079 * @sd: The sched_domain whose packing is to be checked.
4080 * @sds: Statistics of the sched_domain which is to be packed
4081 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
4082 * @imbalance: returns amount of imbalanced due to packing.
4084 static int check_asym_packing(struct sched_domain
*sd
,
4085 struct sd_lb_stats
*sds
,
4086 int this_cpu
, unsigned long *imbalance
)
4090 if (!(sd
->flags
& SD_ASYM_PACKING
))
4096 busiest_cpu
= group_first_cpu(sds
->busiest
);
4097 if (this_cpu
> busiest_cpu
)
4100 *imbalance
= DIV_ROUND_CLOSEST(sds
->max_load
* sds
->busiest
->sgp
->power
,
4106 * fix_small_imbalance - Calculate the minor imbalance that exists
4107 * amongst the groups of a sched_domain, during
4109 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
4110 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
4111 * @imbalance: Variable to store the imbalance.
4113 static inline void fix_small_imbalance(struct sd_lb_stats
*sds
,
4114 int this_cpu
, unsigned long *imbalance
)
4116 unsigned long tmp
, pwr_now
= 0, pwr_move
= 0;
4117 unsigned int imbn
= 2;
4118 unsigned long scaled_busy_load_per_task
;
4120 if (sds
->this_nr_running
) {
4121 sds
->this_load_per_task
/= sds
->this_nr_running
;
4122 if (sds
->busiest_load_per_task
>
4123 sds
->this_load_per_task
)
4126 sds
->this_load_per_task
=
4127 cpu_avg_load_per_task(this_cpu
);
4129 scaled_busy_load_per_task
= sds
->busiest_load_per_task
4130 * SCHED_POWER_SCALE
;
4131 scaled_busy_load_per_task
/= sds
->busiest
->sgp
->power
;
4133 if (sds
->max_load
- sds
->this_load
+ scaled_busy_load_per_task
>=
4134 (scaled_busy_load_per_task
* imbn
)) {
4135 *imbalance
= sds
->busiest_load_per_task
;
4140 * OK, we don't have enough imbalance to justify moving tasks,
4141 * however we may be able to increase total CPU power used by
4145 pwr_now
+= sds
->busiest
->sgp
->power
*
4146 min(sds
->busiest_load_per_task
, sds
->max_load
);
4147 pwr_now
+= sds
->this->sgp
->power
*
4148 min(sds
->this_load_per_task
, sds
->this_load
);
4149 pwr_now
/= SCHED_POWER_SCALE
;
4151 /* Amount of load we'd subtract */
4152 tmp
= (sds
->busiest_load_per_task
* SCHED_POWER_SCALE
) /
4153 sds
->busiest
->sgp
->power
;
4154 if (sds
->max_load
> tmp
)
4155 pwr_move
+= sds
->busiest
->sgp
->power
*
4156 min(sds
->busiest_load_per_task
, sds
->max_load
- tmp
);
4158 /* Amount of load we'd add */
4159 if (sds
->max_load
* sds
->busiest
->sgp
->power
<
4160 sds
->busiest_load_per_task
* SCHED_POWER_SCALE
)
4161 tmp
= (sds
->max_load
* sds
->busiest
->sgp
->power
) /
4162 sds
->this->sgp
->power
;
4164 tmp
= (sds
->busiest_load_per_task
* SCHED_POWER_SCALE
) /
4165 sds
->this->sgp
->power
;
4166 pwr_move
+= sds
->this->sgp
->power
*
4167 min(sds
->this_load_per_task
, sds
->this_load
+ tmp
);
4168 pwr_move
/= SCHED_POWER_SCALE
;
4170 /* Move if we gain throughput */
4171 if (pwr_move
> pwr_now
)
4172 *imbalance
= sds
->busiest_load_per_task
;
4176 * calculate_imbalance - Calculate the amount of imbalance present within the
4177 * groups of a given sched_domain during load balance.
4178 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
4179 * @this_cpu: Cpu for which currently load balance is being performed.
4180 * @imbalance: The variable to store the imbalance.
4182 static inline void calculate_imbalance(struct sd_lb_stats
*sds
, int this_cpu
,
4183 unsigned long *imbalance
)
4185 unsigned long max_pull
, load_above_capacity
= ~0UL;
4187 sds
->busiest_load_per_task
/= sds
->busiest_nr_running
;
4188 if (sds
->group_imb
) {
4189 sds
->busiest_load_per_task
=
4190 min(sds
->busiest_load_per_task
, sds
->avg_load
);
4194 * In the presence of smp nice balancing, certain scenarios can have
4195 * max load less than avg load(as we skip the groups at or below
4196 * its cpu_power, while calculating max_load..)
4198 if (sds
->max_load
< sds
->avg_load
) {
4200 return fix_small_imbalance(sds
, this_cpu
, imbalance
);
4203 if (!sds
->group_imb
) {
4205 * Don't want to pull so many tasks that a group would go idle.
4207 load_above_capacity
= (sds
->busiest_nr_running
-
4208 sds
->busiest_group_capacity
);
4210 load_above_capacity
*= (SCHED_LOAD_SCALE
* SCHED_POWER_SCALE
);
4212 load_above_capacity
/= sds
->busiest
->sgp
->power
;
4216 * We're trying to get all the cpus to the average_load, so we don't
4217 * want to push ourselves above the average load, nor do we wish to
4218 * reduce the max loaded cpu below the average load. At the same time,
4219 * we also don't want to reduce the group load below the group capacity
4220 * (so that we can implement power-savings policies etc). Thus we look
4221 * for the minimum possible imbalance.
4222 * Be careful of negative numbers as they'll appear as very large values
4223 * with unsigned longs.
4225 max_pull
= min(sds
->max_load
- sds
->avg_load
, load_above_capacity
);
4227 /* How much load to actually move to equalise the imbalance */
4228 *imbalance
= min(max_pull
* sds
->busiest
->sgp
->power
,
4229 (sds
->avg_load
- sds
->this_load
) * sds
->this->sgp
->power
)
4230 / SCHED_POWER_SCALE
;
4233 * if *imbalance is less than the average load per runnable task
4234 * there is no guarantee that any tasks will be moved so we'll have
4235 * a think about bumping its value to force at least one task to be
4238 if (*imbalance
< sds
->busiest_load_per_task
)
4239 return fix_small_imbalance(sds
, this_cpu
, imbalance
);
4243 /******* find_busiest_group() helpers end here *********************/
4246 * find_busiest_group - Returns the busiest group within the sched_domain
4247 * if there is an imbalance. If there isn't an imbalance, and
4248 * the user has opted for power-savings, it returns a group whose
4249 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4250 * such a group exists.
4252 * Also calculates the amount of weighted load which should be moved
4253 * to restore balance.
4255 * @sd: The sched_domain whose busiest group is to be returned.
4256 * @this_cpu: The cpu for which load balancing is currently being performed.
4257 * @imbalance: Variable which stores amount of weighted load which should
4258 * be moved to restore balance/put a group to idle.
4259 * @idle: The idle status of this_cpu.
4260 * @cpus: The set of CPUs under consideration for load-balancing.
4261 * @balance: Pointer to a variable indicating if this_cpu
4262 * is the appropriate cpu to perform load balancing at this_level.
4264 * Returns: - the busiest group if imbalance exists.
4265 * - If no imbalance and user has opted for power-savings balance,
4266 * return the least loaded group whose CPUs can be
4267 * put to idle by rebalancing its tasks onto our group.
4269 static struct sched_group
*
4270 find_busiest_group(struct sched_domain
*sd
, int this_cpu
,
4271 unsigned long *imbalance
, enum cpu_idle_type idle
,
4272 const struct cpumask
*cpus
, int *balance
)
4274 struct sd_lb_stats sds
;
4276 memset(&sds
, 0, sizeof(sds
));
4279 * Compute the various statistics relavent for load balancing at
4282 update_sd_lb_stats(sd
, this_cpu
, idle
, cpus
, balance
, &sds
);
4285 * this_cpu is not the appropriate cpu to perform load balancing at
4291 if ((idle
== CPU_IDLE
|| idle
== CPU_NEWLY_IDLE
) &&
4292 check_asym_packing(sd
, &sds
, this_cpu
, imbalance
))
4295 /* There is no busy sibling group to pull tasks from */
4296 if (!sds
.busiest
|| sds
.busiest_nr_running
== 0)
4299 sds
.avg_load
= (SCHED_POWER_SCALE
* sds
.total_load
) / sds
.total_pwr
;
4302 * If the busiest group is imbalanced the below checks don't
4303 * work because they assumes all things are equal, which typically
4304 * isn't true due to cpus_allowed constraints and the like.
4309 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4310 if (idle
== CPU_NEWLY_IDLE
&& sds
.this_has_capacity
&&
4311 !sds
.busiest_has_capacity
)
4315 * If the local group is more busy than the selected busiest group
4316 * don't try and pull any tasks.
4318 if (sds
.this_load
>= sds
.max_load
)
4322 * Don't pull any tasks if this group is already above the domain
4325 if (sds
.this_load
>= sds
.avg_load
)
4328 if (idle
== CPU_IDLE
) {
4330 * This cpu is idle. If the busiest group load doesn't
4331 * have more tasks than the number of available cpu's and
4332 * there is no imbalance between this and busiest group
4333 * wrt to idle cpu's, it is balanced.
4335 if ((sds
.this_idle_cpus
<= sds
.busiest_idle_cpus
+ 1) &&
4336 sds
.busiest_nr_running
<= sds
.busiest_group_weight
)
4340 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4341 * imbalance_pct to be conservative.
4343 if (100 * sds
.max_load
<= sd
->imbalance_pct
* sds
.this_load
)
4348 /* Looks like there is an imbalance. Compute it */
4349 calculate_imbalance(&sds
, this_cpu
, imbalance
);
4354 * There is no obvious imbalance. But check if we can do some balancing
4357 if (check_power_save_busiest_group(&sds
, this_cpu
, imbalance
))
4365 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4368 find_busiest_queue(struct sched_domain
*sd
, struct sched_group
*group
,
4369 enum cpu_idle_type idle
, unsigned long imbalance
,
4370 const struct cpumask
*cpus
)
4372 struct rq
*busiest
= NULL
, *rq
;
4373 unsigned long max_load
= 0;
4376 for_each_cpu(i
, sched_group_cpus(group
)) {
4377 unsigned long power
= power_of(i
);
4378 unsigned long capacity
= DIV_ROUND_CLOSEST(power
,
4383 capacity
= fix_small_capacity(sd
, group
);
4385 if (!cpumask_test_cpu(i
, cpus
))
4389 wl
= weighted_cpuload(i
);
4392 * When comparing with imbalance, use weighted_cpuload()
4393 * which is not scaled with the cpu power.
4395 if (capacity
&& rq
->nr_running
== 1 && wl
> imbalance
)
4399 * For the load comparisons with the other cpu's, consider
4400 * the weighted_cpuload() scaled with the cpu power, so that
4401 * the load can be moved away from the cpu that is potentially
4402 * running at a lower capacity.
4404 wl
= (wl
* SCHED_POWER_SCALE
) / power
;
4406 if (wl
> max_load
) {
4416 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4417 * so long as it is large enough.
4419 #define MAX_PINNED_INTERVAL 512
4421 /* Working cpumask for load_balance and load_balance_newidle. */
4422 DEFINE_PER_CPU(cpumask_var_t
, load_balance_tmpmask
);
4424 static int need_active_balance(struct sched_domain
*sd
, int idle
,
4425 int busiest_cpu
, int this_cpu
)
4427 if (idle
== CPU_NEWLY_IDLE
) {
4430 * ASYM_PACKING needs to force migrate tasks from busy but
4431 * higher numbered CPUs in order to pack all tasks in the
4432 * lowest numbered CPUs.
4434 if ((sd
->flags
& SD_ASYM_PACKING
) && busiest_cpu
> this_cpu
)
4438 * The only task running in a non-idle cpu can be moved to this
4439 * cpu in an attempt to completely freeup the other CPU
4442 * The package power saving logic comes from
4443 * find_busiest_group(). If there are no imbalance, then
4444 * f_b_g() will return NULL. However when sched_mc={1,2} then
4445 * f_b_g() will select a group from which a running task may be
4446 * pulled to this cpu in order to make the other package idle.
4447 * If there is no opportunity to make a package idle and if
4448 * there are no imbalance, then f_b_g() will return NULL and no
4449 * action will be taken in load_balance_newidle().
4451 * Under normal task pull operation due to imbalance, there
4452 * will be more than one task in the source run queue and
4453 * move_tasks() will succeed. ld_moved will be true and this
4454 * active balance code will not be triggered.
4456 if (sched_mc_power_savings
< POWERSAVINGS_BALANCE_WAKEUP
)
4460 return unlikely(sd
->nr_balance_failed
> sd
->cache_nice_tries
+2);
4463 static int active_load_balance_cpu_stop(void *data
);
4466 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4467 * tasks if there is an imbalance.
4469 static int load_balance(int this_cpu
, struct rq
*this_rq
,
4470 struct sched_domain
*sd
, enum cpu_idle_type idle
,
4473 int ld_moved
, lb_flags
= 0, active_balance
= 0;
4474 struct sched_group
*group
;
4475 unsigned long imbalance
;
4477 unsigned long flags
;
4478 struct cpumask
*cpus
= __get_cpu_var(load_balance_tmpmask
);
4480 cpumask_copy(cpus
, cpu_active_mask
);
4482 schedstat_inc(sd
, lb_count
[idle
]);
4485 group
= find_busiest_group(sd
, this_cpu
, &imbalance
, idle
,
4492 schedstat_inc(sd
, lb_nobusyg
[idle
]);
4496 busiest
= find_busiest_queue(sd
, group
, idle
, imbalance
, cpus
);
4498 schedstat_inc(sd
, lb_nobusyq
[idle
]);
4502 BUG_ON(busiest
== this_rq
);
4504 schedstat_add(sd
, lb_imbalance
[idle
], imbalance
);
4507 if (busiest
->nr_running
> 1) {
4509 * Attempt to move tasks. If find_busiest_group has found
4510 * an imbalance but busiest->nr_running <= 1, the group is
4511 * still unbalanced. ld_moved simply stays zero, so it is
4512 * correctly treated as an imbalance.
4514 lb_flags
|= LBF_ALL_PINNED
;
4515 local_irq_save(flags
);
4516 double_rq_lock(this_rq
, busiest
);
4517 ld_moved
= move_tasks(this_rq
, this_cpu
, busiest
,
4518 imbalance
, sd
, idle
, &lb_flags
);
4519 double_rq_unlock(this_rq
, busiest
);
4520 local_irq_restore(flags
);
4523 * some other cpu did the load balance for us.
4525 if (ld_moved
&& this_cpu
!= smp_processor_id())
4526 resched_cpu(this_cpu
);
4528 if (lb_flags
& LBF_ABORT
)
4531 if (lb_flags
& LBF_NEED_BREAK
) {
4532 lb_flags
+= LBF_HAD_BREAK
- LBF_NEED_BREAK
;
4533 if (lb_flags
& LBF_ABORT
)
4538 /* All tasks on this runqueue were pinned by CPU affinity */
4539 if (unlikely(lb_flags
& LBF_ALL_PINNED
)) {
4540 cpumask_clear_cpu(cpu_of(busiest
), cpus
);
4541 if (!cpumask_empty(cpus
))
4548 schedstat_inc(sd
, lb_failed
[idle
]);
4550 * Increment the failure counter only on periodic balance.
4551 * We do not want newidle balance, which can be very
4552 * frequent, pollute the failure counter causing
4553 * excessive cache_hot migrations and active balances.
4555 if (idle
!= CPU_NEWLY_IDLE
)
4556 sd
->nr_balance_failed
++;
4558 if (need_active_balance(sd
, idle
, cpu_of(busiest
), this_cpu
)) {
4559 raw_spin_lock_irqsave(&busiest
->lock
, flags
);
4561 /* don't kick the active_load_balance_cpu_stop,
4562 * if the curr task on busiest cpu can't be
4565 if (!cpumask_test_cpu(this_cpu
,
4566 tsk_cpus_allowed(busiest
->curr
))) {
4567 raw_spin_unlock_irqrestore(&busiest
->lock
,
4569 lb_flags
|= LBF_ALL_PINNED
;
4570 goto out_one_pinned
;
4574 * ->active_balance synchronizes accesses to
4575 * ->active_balance_work. Once set, it's cleared
4576 * only after active load balance is finished.
4578 if (!busiest
->active_balance
) {
4579 busiest
->active_balance
= 1;
4580 busiest
->push_cpu
= this_cpu
;
4583 raw_spin_unlock_irqrestore(&busiest
->lock
, flags
);
4586 stop_one_cpu_nowait(cpu_of(busiest
),
4587 active_load_balance_cpu_stop
, busiest
,
4588 &busiest
->active_balance_work
);
4591 * We've kicked active balancing, reset the failure
4594 sd
->nr_balance_failed
= sd
->cache_nice_tries
+1;
4597 sd
->nr_balance_failed
= 0;
4599 if (likely(!active_balance
)) {
4600 /* We were unbalanced, so reset the balancing interval */
4601 sd
->balance_interval
= sd
->min_interval
;
4604 * If we've begun active balancing, start to back off. This
4605 * case may not be covered by the all_pinned logic if there
4606 * is only 1 task on the busy runqueue (because we don't call
4609 if (sd
->balance_interval
< sd
->max_interval
)
4610 sd
->balance_interval
*= 2;
4616 schedstat_inc(sd
, lb_balanced
[idle
]);
4618 sd
->nr_balance_failed
= 0;
4621 /* tune up the balancing interval */
4622 if (((lb_flags
& LBF_ALL_PINNED
) &&
4623 sd
->balance_interval
< MAX_PINNED_INTERVAL
) ||
4624 (sd
->balance_interval
< sd
->max_interval
))
4625 sd
->balance_interval
*= 2;
4633 * idle_balance is called by schedule() if this_cpu is about to become
4634 * idle. Attempts to pull tasks from other CPUs.
4636 void idle_balance(int this_cpu
, struct rq
*this_rq
)
4638 struct sched_domain
*sd
;
4639 int pulled_task
= 0;
4640 unsigned long next_balance
= jiffies
+ HZ
;
4642 this_rq
->idle_stamp
= this_rq
->clock
;
4644 if (this_rq
->avg_idle
< sysctl_sched_migration_cost
)
4648 * Drop the rq->lock, but keep IRQ/preempt disabled.
4650 raw_spin_unlock(&this_rq
->lock
);
4652 update_shares(this_cpu
);
4654 for_each_domain(this_cpu
, sd
) {
4655 unsigned long interval
;
4658 if (!(sd
->flags
& SD_LOAD_BALANCE
))
4661 if (sd
->flags
& SD_BALANCE_NEWIDLE
) {
4662 /* If we've pulled tasks over stop searching: */
4663 pulled_task
= load_balance(this_cpu
, this_rq
,
4664 sd
, CPU_NEWLY_IDLE
, &balance
);
4667 interval
= msecs_to_jiffies(sd
->balance_interval
);
4668 if (time_after(next_balance
, sd
->last_balance
+ interval
))
4669 next_balance
= sd
->last_balance
+ interval
;
4671 this_rq
->idle_stamp
= 0;
4677 raw_spin_lock(&this_rq
->lock
);
4679 if (pulled_task
|| time_after(jiffies
, this_rq
->next_balance
)) {
4681 * We are going idle. next_balance may be set based on
4682 * a busy processor. So reset next_balance.
4684 this_rq
->next_balance
= next_balance
;
4689 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
4690 * running tasks off the busiest CPU onto idle CPUs. It requires at
4691 * least 1 task to be running on each physical CPU where possible, and
4692 * avoids physical / logical imbalances.
4694 static int active_load_balance_cpu_stop(void *data
)
4696 struct rq
*busiest_rq
= data
;
4697 int busiest_cpu
= cpu_of(busiest_rq
);
4698 int target_cpu
= busiest_rq
->push_cpu
;
4699 struct rq
*target_rq
= cpu_rq(target_cpu
);
4700 struct sched_domain
*sd
;
4702 raw_spin_lock_irq(&busiest_rq
->lock
);
4704 /* make sure the requested cpu hasn't gone down in the meantime */
4705 if (unlikely(busiest_cpu
!= smp_processor_id() ||
4706 !busiest_rq
->active_balance
))
4709 /* Is there any task to move? */
4710 if (busiest_rq
->nr_running
<= 1)
4714 * This condition is "impossible", if it occurs
4715 * we need to fix it. Originally reported by
4716 * Bjorn Helgaas on a 128-cpu setup.
4718 BUG_ON(busiest_rq
== target_rq
);
4720 /* move a task from busiest_rq to target_rq */
4721 double_lock_balance(busiest_rq
, target_rq
);
4723 /* Search for an sd spanning us and the target CPU. */
4725 for_each_domain(target_cpu
, sd
) {
4726 if ((sd
->flags
& SD_LOAD_BALANCE
) &&
4727 cpumask_test_cpu(busiest_cpu
, sched_domain_span(sd
)))
4732 schedstat_inc(sd
, alb_count
);
4734 if (move_one_task(target_rq
, target_cpu
, busiest_rq
,
4736 schedstat_inc(sd
, alb_pushed
);
4738 schedstat_inc(sd
, alb_failed
);
4741 double_unlock_balance(busiest_rq
, target_rq
);
4743 busiest_rq
->active_balance
= 0;
4744 raw_spin_unlock_irq(&busiest_rq
->lock
);
4750 * idle load balancing details
4751 * - When one of the busy CPUs notice that there may be an idle rebalancing
4752 * needed, they will kick the idle load balancer, which then does idle
4753 * load balancing for all the idle CPUs.
4756 cpumask_var_t idle_cpus_mask
;
4758 unsigned long next_balance
; /* in jiffy units */
4759 } nohz ____cacheline_aligned
;
4761 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4763 * lowest_flag_domain - Return lowest sched_domain containing flag.
4764 * @cpu: The cpu whose lowest level of sched domain is to
4766 * @flag: The flag to check for the lowest sched_domain
4767 * for the given cpu.
4769 * Returns the lowest sched_domain of a cpu which contains the given flag.
4771 static inline struct sched_domain
*lowest_flag_domain(int cpu
, int flag
)
4773 struct sched_domain
*sd
;
4775 for_each_domain(cpu
, sd
)
4776 if (sd
->flags
& flag
)
4783 * for_each_flag_domain - Iterates over sched_domains containing the flag.
4784 * @cpu: The cpu whose domains we're iterating over.
4785 * @sd: variable holding the value of the power_savings_sd
4787 * @flag: The flag to filter the sched_domains to be iterated.
4789 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4790 * set, starting from the lowest sched_domain to the highest.
4792 #define for_each_flag_domain(cpu, sd, flag) \
4793 for (sd = lowest_flag_domain(cpu, flag); \
4794 (sd && (sd->flags & flag)); sd = sd->parent)
4797 * find_new_ilb - Finds the optimum idle load balancer for nomination.
4798 * @cpu: The cpu which is nominating a new idle_load_balancer.
4800 * Returns: Returns the id of the idle load balancer if it exists,
4801 * Else, returns >= nr_cpu_ids.
4803 * This algorithm picks the idle load balancer such that it belongs to a
4804 * semi-idle powersavings sched_domain. The idea is to try and avoid
4805 * completely idle packages/cores just for the purpose of idle load balancing
4806 * when there are other idle cpu's which are better suited for that job.
4808 static int find_new_ilb(int cpu
)
4810 int ilb
= cpumask_first(nohz
.idle_cpus_mask
);
4811 struct sched_group
*ilbg
;
4812 struct sched_domain
*sd
;
4815 * Have idle load balancer selection from semi-idle packages only
4816 * when power-aware load balancing is enabled
4818 if (!(sched_smt_power_savings
|| sched_mc_power_savings
))
4822 * Optimize for the case when we have no idle CPUs or only one
4823 * idle CPU. Don't walk the sched_domain hierarchy in such cases
4825 if (cpumask_weight(nohz
.idle_cpus_mask
) < 2)
4829 for_each_flag_domain(cpu
, sd
, SD_POWERSAVINGS_BALANCE
) {
4833 if (ilbg
->group_weight
!=
4834 atomic_read(&ilbg
->sgp
->nr_busy_cpus
)) {
4835 ilb
= cpumask_first_and(nohz
.idle_cpus_mask
,
4836 sched_group_cpus(ilbg
));
4842 } while (ilbg
!= sd
->groups
);
4848 if (ilb
< nr_cpu_ids
&& idle_cpu(ilb
))
4853 #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
4854 static inline int find_new_ilb(int call_cpu
)
4861 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
4862 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
4863 * CPU (if there is one).
4865 static void nohz_balancer_kick(int cpu
)
4869 nohz
.next_balance
++;
4871 ilb_cpu
= find_new_ilb(cpu
);
4873 if (ilb_cpu
>= nr_cpu_ids
)
4876 if (test_and_set_bit(NOHZ_BALANCE_KICK
, nohz_flags(ilb_cpu
)))
4879 * Use smp_send_reschedule() instead of resched_cpu().
4880 * This way we generate a sched IPI on the target cpu which
4881 * is idle. And the softirq performing nohz idle load balance
4882 * will be run before returning from the IPI.
4884 smp_send_reschedule(ilb_cpu
);
4888 static inline void clear_nohz_tick_stopped(int cpu
)
4890 if (unlikely(test_bit(NOHZ_TICK_STOPPED
, nohz_flags(cpu
)))) {
4891 cpumask_clear_cpu(cpu
, nohz
.idle_cpus_mask
);
4892 atomic_dec(&nohz
.nr_cpus
);
4893 clear_bit(NOHZ_TICK_STOPPED
, nohz_flags(cpu
));
4897 static inline void set_cpu_sd_state_busy(void)
4899 struct sched_domain
*sd
;
4900 int cpu
= smp_processor_id();
4902 if (!test_bit(NOHZ_IDLE
, nohz_flags(cpu
)))
4904 clear_bit(NOHZ_IDLE
, nohz_flags(cpu
));
4907 for_each_domain(cpu
, sd
)
4908 atomic_inc(&sd
->groups
->sgp
->nr_busy_cpus
);
4912 void set_cpu_sd_state_idle(void)
4914 struct sched_domain
*sd
;
4915 int cpu
= smp_processor_id();
4917 if (test_bit(NOHZ_IDLE
, nohz_flags(cpu
)))
4919 set_bit(NOHZ_IDLE
, nohz_flags(cpu
));
4922 for_each_domain(cpu
, sd
)
4923 atomic_dec(&sd
->groups
->sgp
->nr_busy_cpus
);
4928 * This routine will record that this cpu is going idle with tick stopped.
4929 * This info will be used in performing idle load balancing in the future.
4931 void select_nohz_load_balancer(int stop_tick
)
4933 int cpu
= smp_processor_id();
4936 * If this cpu is going down, then nothing needs to be done.
4938 if (!cpu_active(cpu
))
4942 if (test_bit(NOHZ_TICK_STOPPED
, nohz_flags(cpu
)))
4945 cpumask_set_cpu(cpu
, nohz
.idle_cpus_mask
);
4946 atomic_inc(&nohz
.nr_cpus
);
4947 set_bit(NOHZ_TICK_STOPPED
, nohz_flags(cpu
));
4952 static int __cpuinit
sched_ilb_notifier(struct notifier_block
*nfb
,
4953 unsigned long action
, void *hcpu
)
4955 switch (action
& ~CPU_TASKS_FROZEN
) {
4957 clear_nohz_tick_stopped(smp_processor_id());
4965 static DEFINE_SPINLOCK(balancing
);
4967 static unsigned long __read_mostly max_load_balance_interval
= HZ
/10;
4970 * Scale the max load_balance interval with the number of CPUs in the system.
4971 * This trades load-balance latency on larger machines for less cross talk.
4973 void update_max_interval(void)
4975 max_load_balance_interval
= HZ
*num_online_cpus()/10;
4979 * It checks each scheduling domain to see if it is due to be balanced,
4980 * and initiates a balancing operation if so.
4982 * Balancing parameters are set up in arch_init_sched_domains.
4984 static void rebalance_domains(int cpu
, enum cpu_idle_type idle
)
4987 struct rq
*rq
= cpu_rq(cpu
);
4988 unsigned long interval
;
4989 struct sched_domain
*sd
;
4990 /* Earliest time when we have to do rebalance again */
4991 unsigned long next_balance
= jiffies
+ 60*HZ
;
4992 int update_next_balance
= 0;
4998 for_each_domain(cpu
, sd
) {
4999 if (!(sd
->flags
& SD_LOAD_BALANCE
))
5002 interval
= sd
->balance_interval
;
5003 if (idle
!= CPU_IDLE
)
5004 interval
*= sd
->busy_factor
;
5006 /* scale ms to jiffies */
5007 interval
= msecs_to_jiffies(interval
);
5008 interval
= clamp(interval
, 1UL, max_load_balance_interval
);
5010 need_serialize
= sd
->flags
& SD_SERIALIZE
;
5012 if (need_serialize
) {
5013 if (!spin_trylock(&balancing
))
5017 if (time_after_eq(jiffies
, sd
->last_balance
+ interval
)) {
5018 if (load_balance(cpu
, rq
, sd
, idle
, &balance
)) {
5020 * We've pulled tasks over so either we're no
5023 idle
= CPU_NOT_IDLE
;
5025 sd
->last_balance
= jiffies
;
5028 spin_unlock(&balancing
);
5030 if (time_after(next_balance
, sd
->last_balance
+ interval
)) {
5031 next_balance
= sd
->last_balance
+ interval
;
5032 update_next_balance
= 1;
5036 * Stop the load balance at this level. There is another
5037 * CPU in our sched group which is doing load balancing more
5046 * next_balance will be updated only when there is a need.
5047 * When the cpu is attached to null domain for ex, it will not be
5050 if (likely(update_next_balance
))
5051 rq
->next_balance
= next_balance
;
5056 * In CONFIG_NO_HZ case, the idle balance kickee will do the
5057 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5059 static void nohz_idle_balance(int this_cpu
, enum cpu_idle_type idle
)
5061 struct rq
*this_rq
= cpu_rq(this_cpu
);
5065 if (idle
!= CPU_IDLE
||
5066 !test_bit(NOHZ_BALANCE_KICK
, nohz_flags(this_cpu
)))
5069 for_each_cpu(balance_cpu
, nohz
.idle_cpus_mask
) {
5070 if (balance_cpu
== this_cpu
|| !idle_cpu(balance_cpu
))
5074 * If this cpu gets work to do, stop the load balancing
5075 * work being done for other cpus. Next load
5076 * balancing owner will pick it up.
5081 raw_spin_lock_irq(&this_rq
->lock
);
5082 update_rq_clock(this_rq
);
5083 update_cpu_load(this_rq
);
5084 raw_spin_unlock_irq(&this_rq
->lock
);
5086 rebalance_domains(balance_cpu
, CPU_IDLE
);
5088 rq
= cpu_rq(balance_cpu
);
5089 if (time_after(this_rq
->next_balance
, rq
->next_balance
))
5090 this_rq
->next_balance
= rq
->next_balance
;
5092 nohz
.next_balance
= this_rq
->next_balance
;
5094 clear_bit(NOHZ_BALANCE_KICK
, nohz_flags(this_cpu
));
5098 * Current heuristic for kicking the idle load balancer in the presence
5099 * of an idle cpu is the system.
5100 * - This rq has more than one task.
5101 * - At any scheduler domain level, this cpu's scheduler group has multiple
5102 * busy cpu's exceeding the group's power.
5103 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5104 * domain span are idle.
5106 static inline int nohz_kick_needed(struct rq
*rq
, int cpu
)
5108 unsigned long now
= jiffies
;
5109 struct sched_domain
*sd
;
5111 if (unlikely(idle_cpu(cpu
)))
5115 * We may be recently in ticked or tickless idle mode. At the first
5116 * busy tick after returning from idle, we will update the busy stats.
5118 set_cpu_sd_state_busy();
5119 clear_nohz_tick_stopped(cpu
);
5122 * None are in tickless mode and hence no need for NOHZ idle load
5125 if (likely(!atomic_read(&nohz
.nr_cpus
)))
5128 if (time_before(now
, nohz
.next_balance
))
5131 if (rq
->nr_running
>= 2)
5135 for_each_domain(cpu
, sd
) {
5136 struct sched_group
*sg
= sd
->groups
;
5137 struct sched_group_power
*sgp
= sg
->sgp
;
5138 int nr_busy
= atomic_read(&sgp
->nr_busy_cpus
);
5140 if (sd
->flags
& SD_SHARE_PKG_RESOURCES
&& nr_busy
> 1)
5141 goto need_kick_unlock
;
5143 if (sd
->flags
& SD_ASYM_PACKING
&& nr_busy
!= sg
->group_weight
5144 && (cpumask_first_and(nohz
.idle_cpus_mask
,
5145 sched_domain_span(sd
)) < cpu
))
5146 goto need_kick_unlock
;
5148 if (!(sd
->flags
& (SD_SHARE_PKG_RESOURCES
| SD_ASYM_PACKING
)))
5160 static void nohz_idle_balance(int this_cpu
, enum cpu_idle_type idle
) { }
5164 * run_rebalance_domains is triggered when needed from the scheduler tick.
5165 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5167 static void run_rebalance_domains(struct softirq_action
*h
)
5169 int this_cpu
= smp_processor_id();
5170 struct rq
*this_rq
= cpu_rq(this_cpu
);
5171 enum cpu_idle_type idle
= this_rq
->idle_balance
?
5172 CPU_IDLE
: CPU_NOT_IDLE
;
5174 rebalance_domains(this_cpu
, idle
);
5177 * If this cpu has a pending nohz_balance_kick, then do the
5178 * balancing on behalf of the other idle cpus whose ticks are
5181 nohz_idle_balance(this_cpu
, idle
);
5184 static inline int on_null_domain(int cpu
)
5186 return !rcu_dereference_sched(cpu_rq(cpu
)->sd
);
5190 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
5192 void trigger_load_balance(struct rq
*rq
, int cpu
)
5194 /* Don't need to rebalance while attached to NULL domain */
5195 if (time_after_eq(jiffies
, rq
->next_balance
) &&
5196 likely(!on_null_domain(cpu
)))
5197 raise_softirq(SCHED_SOFTIRQ
);
5199 if (nohz_kick_needed(rq
, cpu
) && likely(!on_null_domain(cpu
)))
5200 nohz_balancer_kick(cpu
);
5204 static void rq_online_fair(struct rq
*rq
)
5209 static void rq_offline_fair(struct rq
*rq
)
5214 #endif /* CONFIG_SMP */
5217 * scheduler tick hitting a task of our scheduling class:
5219 static void task_tick_fair(struct rq
*rq
, struct task_struct
*curr
, int queued
)
5221 struct cfs_rq
*cfs_rq
;
5222 struct sched_entity
*se
= &curr
->se
;
5224 for_each_sched_entity(se
) {
5225 cfs_rq
= cfs_rq_of(se
);
5226 entity_tick(cfs_rq
, se
, queued
);
5231 * called on fork with the child task as argument from the parent's context
5232 * - child not yet on the tasklist
5233 * - preemption disabled
5235 static void task_fork_fair(struct task_struct
*p
)
5237 struct cfs_rq
*cfs_rq
;
5238 struct sched_entity
*se
= &p
->se
, *curr
;
5239 int this_cpu
= smp_processor_id();
5240 struct rq
*rq
= this_rq();
5241 unsigned long flags
;
5243 raw_spin_lock_irqsave(&rq
->lock
, flags
);
5245 update_rq_clock(rq
);
5247 cfs_rq
= task_cfs_rq(current
);
5248 curr
= cfs_rq
->curr
;
5250 if (unlikely(task_cpu(p
) != this_cpu
)) {
5252 __set_task_cpu(p
, this_cpu
);
5256 update_curr(cfs_rq
);
5259 se
->vruntime
= curr
->vruntime
;
5260 place_entity(cfs_rq
, se
, 1);
5262 if (sysctl_sched_child_runs_first
&& curr
&& entity_before(curr
, se
)) {
5264 * Upon rescheduling, sched_class::put_prev_task() will place
5265 * 'current' within the tree based on its new key value.
5267 swap(curr
->vruntime
, se
->vruntime
);
5268 resched_task(rq
->curr
);
5271 se
->vruntime
-= cfs_rq
->min_vruntime
;
5273 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
5277 * Priority of the task has changed. Check to see if we preempt
5281 prio_changed_fair(struct rq
*rq
, struct task_struct
*p
, int oldprio
)
5287 * Reschedule if we are currently running on this runqueue and
5288 * our priority decreased, or if we are not currently running on
5289 * this runqueue and our priority is higher than the current's
5291 if (rq
->curr
== p
) {
5292 if (p
->prio
> oldprio
)
5293 resched_task(rq
->curr
);
5295 check_preempt_curr(rq
, p
, 0);
5298 static void switched_from_fair(struct rq
*rq
, struct task_struct
*p
)
5300 struct sched_entity
*se
= &p
->se
;
5301 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
5304 * Ensure the task's vruntime is normalized, so that when its
5305 * switched back to the fair class the enqueue_entity(.flags=0) will
5306 * do the right thing.
5308 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5309 * have normalized the vruntime, if it was !on_rq, then only when
5310 * the task is sleeping will it still have non-normalized vruntime.
5312 if (!se
->on_rq
&& p
->state
!= TASK_RUNNING
) {
5314 * Fix up our vruntime so that the current sleep doesn't
5315 * cause 'unlimited' sleep bonus.
5317 place_entity(cfs_rq
, se
, 0);
5318 se
->vruntime
-= cfs_rq
->min_vruntime
;
5323 * We switched to the sched_fair class.
5325 static void switched_to_fair(struct rq
*rq
, struct task_struct
*p
)
5331 * We were most likely switched from sched_rt, so
5332 * kick off the schedule if running, otherwise just see
5333 * if we can still preempt the current task.
5336 resched_task(rq
->curr
);
5338 check_preempt_curr(rq
, p
, 0);
5341 /* Account for a task changing its policy or group.
5343 * This routine is mostly called to set cfs_rq->curr field when a task
5344 * migrates between groups/classes.
5346 static void set_curr_task_fair(struct rq
*rq
)
5348 struct sched_entity
*se
= &rq
->curr
->se
;
5350 for_each_sched_entity(se
) {
5351 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
5353 set_next_entity(cfs_rq
, se
);
5354 /* ensure bandwidth has been allocated on our new cfs_rq */
5355 account_cfs_rq_runtime(cfs_rq
, 0);
5359 void init_cfs_rq(struct cfs_rq
*cfs_rq
)
5361 cfs_rq
->tasks_timeline
= RB_ROOT
;
5362 INIT_LIST_HEAD(&cfs_rq
->tasks
);
5363 cfs_rq
->min_vruntime
= (u64
)(-(1LL << 20));
5364 #ifndef CONFIG_64BIT
5365 cfs_rq
->min_vruntime_copy
= cfs_rq
->min_vruntime
;
5369 #ifdef CONFIG_FAIR_GROUP_SCHED
5370 static void task_move_group_fair(struct task_struct
*p
, int on_rq
)
5373 * If the task was not on the rq at the time of this cgroup movement
5374 * it must have been asleep, sleeping tasks keep their ->vruntime
5375 * absolute on their old rq until wakeup (needed for the fair sleeper
5376 * bonus in place_entity()).
5378 * If it was on the rq, we've just 'preempted' it, which does convert
5379 * ->vruntime to a relative base.
5381 * Make sure both cases convert their relative position when migrating
5382 * to another cgroup's rq. This does somewhat interfere with the
5383 * fair sleeper stuff for the first placement, but who cares.
5386 * When !on_rq, vruntime of the task has usually NOT been normalized.
5387 * But there are some cases where it has already been normalized:
5389 * - Moving a forked child which is waiting for being woken up by
5390 * wake_up_new_task().
5391 * - Moving a task which has been woken up by try_to_wake_up() and
5392 * waiting for actually being woken up by sched_ttwu_pending().
5394 * To prevent boost or penalty in the new cfs_rq caused by delta
5395 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5397 if (!on_rq
&& (!p
->se
.sum_exec_runtime
|| p
->state
== TASK_WAKING
))
5401 p
->se
.vruntime
-= cfs_rq_of(&p
->se
)->min_vruntime
;
5402 set_task_rq(p
, task_cpu(p
));
5404 p
->se
.vruntime
+= cfs_rq_of(&p
->se
)->min_vruntime
;
5407 void free_fair_sched_group(struct task_group
*tg
)
5411 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg
));
5413 for_each_possible_cpu(i
) {
5415 kfree(tg
->cfs_rq
[i
]);
5424 int alloc_fair_sched_group(struct task_group
*tg
, struct task_group
*parent
)
5426 struct cfs_rq
*cfs_rq
;
5427 struct sched_entity
*se
;
5430 tg
->cfs_rq
= kzalloc(sizeof(cfs_rq
) * nr_cpu_ids
, GFP_KERNEL
);
5433 tg
->se
= kzalloc(sizeof(se
) * nr_cpu_ids
, GFP_KERNEL
);
5437 tg
->shares
= NICE_0_LOAD
;
5439 init_cfs_bandwidth(tg_cfs_bandwidth(tg
));
5441 for_each_possible_cpu(i
) {
5442 cfs_rq
= kzalloc_node(sizeof(struct cfs_rq
),
5443 GFP_KERNEL
, cpu_to_node(i
));
5447 se
= kzalloc_node(sizeof(struct sched_entity
),
5448 GFP_KERNEL
, cpu_to_node(i
));
5452 init_cfs_rq(cfs_rq
);
5453 init_tg_cfs_entry(tg
, cfs_rq
, se
, i
, parent
->se
[i
]);
5464 void unregister_fair_sched_group(struct task_group
*tg
, int cpu
)
5466 struct rq
*rq
= cpu_rq(cpu
);
5467 unsigned long flags
;
5470 * Only empty task groups can be destroyed; so we can speculatively
5471 * check on_list without danger of it being re-added.
5473 if (!tg
->cfs_rq
[cpu
]->on_list
)
5476 raw_spin_lock_irqsave(&rq
->lock
, flags
);
5477 list_del_leaf_cfs_rq(tg
->cfs_rq
[cpu
]);
5478 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
5481 void init_tg_cfs_entry(struct task_group
*tg
, struct cfs_rq
*cfs_rq
,
5482 struct sched_entity
*se
, int cpu
,
5483 struct sched_entity
*parent
)
5485 struct rq
*rq
= cpu_rq(cpu
);
5490 /* allow initial update_cfs_load() to truncate */
5491 cfs_rq
->load_stamp
= 1;
5493 init_cfs_rq_runtime(cfs_rq
);
5495 tg
->cfs_rq
[cpu
] = cfs_rq
;
5498 /* se could be NULL for root_task_group */
5503 se
->cfs_rq
= &rq
->cfs
;
5505 se
->cfs_rq
= parent
->my_q
;
5508 update_load_set(&se
->load
, 0);
5509 se
->parent
= parent
;
5512 static DEFINE_MUTEX(shares_mutex
);
5514 int sched_group_set_shares(struct task_group
*tg
, unsigned long shares
)
5517 unsigned long flags
;
5520 * We can't change the weight of the root cgroup.
5525 shares
= clamp(shares
, scale_load(MIN_SHARES
), scale_load(MAX_SHARES
));
5527 mutex_lock(&shares_mutex
);
5528 if (tg
->shares
== shares
)
5531 tg
->shares
= shares
;
5532 for_each_possible_cpu(i
) {
5533 struct rq
*rq
= cpu_rq(i
);
5534 struct sched_entity
*se
;
5537 /* Propagate contribution to hierarchy */
5538 raw_spin_lock_irqsave(&rq
->lock
, flags
);
5539 for_each_sched_entity(se
)
5540 update_cfs_shares(group_cfs_rq(se
));
5541 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
5545 mutex_unlock(&shares_mutex
);
5548 #else /* CONFIG_FAIR_GROUP_SCHED */
5550 void free_fair_sched_group(struct task_group
*tg
) { }
5552 int alloc_fair_sched_group(struct task_group
*tg
, struct task_group
*parent
)
5557 void unregister_fair_sched_group(struct task_group
*tg
, int cpu
) { }
5559 #endif /* CONFIG_FAIR_GROUP_SCHED */
5562 static unsigned int get_rr_interval_fair(struct rq
*rq
, struct task_struct
*task
)
5564 struct sched_entity
*se
= &task
->se
;
5565 unsigned int rr_interval
= 0;
5568 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
5571 if (rq
->cfs
.load
.weight
)
5572 rr_interval
= NS_TO_JIFFIES(sched_slice(&rq
->cfs
, se
));
5578 * All the scheduling class methods:
5580 const struct sched_class fair_sched_class
= {
5581 .next
= &idle_sched_class
,
5582 .enqueue_task
= enqueue_task_fair
,
5583 .dequeue_task
= dequeue_task_fair
,
5584 .yield_task
= yield_task_fair
,
5585 .yield_to_task
= yield_to_task_fair
,
5587 .check_preempt_curr
= check_preempt_wakeup
,
5589 .pick_next_task
= pick_next_task_fair
,
5590 .put_prev_task
= put_prev_task_fair
,
5593 .select_task_rq
= select_task_rq_fair
,
5595 .rq_online
= rq_online_fair
,
5596 .rq_offline
= rq_offline_fair
,
5598 .task_waking
= task_waking_fair
,
5601 .set_curr_task
= set_curr_task_fair
,
5602 .task_tick
= task_tick_fair
,
5603 .task_fork
= task_fork_fair
,
5605 .prio_changed
= prio_changed_fair
,
5606 .switched_from
= switched_from_fair
,
5607 .switched_to
= switched_to_fair
,
5609 .get_rr_interval
= get_rr_interval_fair
,
5611 #ifdef CONFIG_FAIR_GROUP_SCHED
5612 .task_move_group
= task_move_group_fair
,
5616 #ifdef CONFIG_SCHED_DEBUG
5617 void print_cfs_stats(struct seq_file
*m
, int cpu
)
5619 struct cfs_rq
*cfs_rq
;
5622 for_each_leaf_cfs_rq(cpu_rq(cpu
), cfs_rq
)
5623 print_cfs_rq(m
, cpu
, cfs_rq
);
5628 __init
void init_sched_fair_class(void)
5631 open_softirq(SCHED_SOFTIRQ
, run_rebalance_domains
);
5634 zalloc_cpumask_var(&nohz
.idle_cpus_mask
, GFP_NOWAIT
);
5635 cpu_notifier(sched_ilb_notifier
, 0);