2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
8 #include <linux/slab.h>
9 #include <linux/irq_work.h>
11 int sched_rr_timeslice
= RR_TIMESLICE
;
13 static int do_sched_rt_period_timer(struct rt_bandwidth
*rt_b
, int overrun
);
15 struct rt_bandwidth def_rt_bandwidth
;
17 static enum hrtimer_restart
sched_rt_period_timer(struct hrtimer
*timer
)
19 struct rt_bandwidth
*rt_b
=
20 container_of(timer
, struct rt_bandwidth
, rt_period_timer
);
24 raw_spin_lock(&rt_b
->rt_runtime_lock
);
26 overrun
= hrtimer_forward_now(timer
, rt_b
->rt_period
);
30 raw_spin_unlock(&rt_b
->rt_runtime_lock
);
31 idle
= do_sched_rt_period_timer(rt_b
, overrun
);
32 raw_spin_lock(&rt_b
->rt_runtime_lock
);
35 rt_b
->rt_period_active
= 0;
36 raw_spin_unlock(&rt_b
->rt_runtime_lock
);
38 return idle
? HRTIMER_NORESTART
: HRTIMER_RESTART
;
41 void init_rt_bandwidth(struct rt_bandwidth
*rt_b
, u64 period
, u64 runtime
)
43 rt_b
->rt_period
= ns_to_ktime(period
);
44 rt_b
->rt_runtime
= runtime
;
46 raw_spin_lock_init(&rt_b
->rt_runtime_lock
);
48 hrtimer_init(&rt_b
->rt_period_timer
,
49 CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
50 rt_b
->rt_period_timer
.function
= sched_rt_period_timer
;
53 static void start_rt_bandwidth(struct rt_bandwidth
*rt_b
)
55 if (!rt_bandwidth_enabled() || rt_b
->rt_runtime
== RUNTIME_INF
)
58 raw_spin_lock(&rt_b
->rt_runtime_lock
);
59 if (!rt_b
->rt_period_active
) {
60 rt_b
->rt_period_active
= 1;
61 hrtimer_forward_now(&rt_b
->rt_period_timer
, rt_b
->rt_period
);
62 hrtimer_start_expires(&rt_b
->rt_period_timer
, HRTIMER_MODE_ABS_PINNED
);
64 raw_spin_unlock(&rt_b
->rt_runtime_lock
);
68 static void push_irq_work_func(struct irq_work
*work
);
71 void init_rt_rq(struct rt_rq
*rt_rq
)
73 struct rt_prio_array
*array
;
76 array
= &rt_rq
->active
;
77 for (i
= 0; i
< MAX_RT_PRIO
; i
++) {
78 INIT_LIST_HEAD(array
->queue
+ i
);
79 __clear_bit(i
, array
->bitmap
);
81 /* delimiter for bitsearch: */
82 __set_bit(MAX_RT_PRIO
, array
->bitmap
);
84 #if defined CONFIG_SMP
85 rt_rq
->highest_prio
.curr
= MAX_RT_PRIO
;
86 rt_rq
->highest_prio
.next
= MAX_RT_PRIO
;
87 rt_rq
->rt_nr_migratory
= 0;
88 rt_rq
->overloaded
= 0;
89 plist_head_init(&rt_rq
->pushable_tasks
);
91 #ifdef HAVE_RT_PUSH_IPI
92 rt_rq
->push_flags
= 0;
93 rt_rq
->push_cpu
= nr_cpu_ids
;
94 raw_spin_lock_init(&rt_rq
->push_lock
);
95 init_irq_work(&rt_rq
->push_work
, push_irq_work_func
);
97 #endif /* CONFIG_SMP */
98 /* We start is dequeued state, because no RT tasks are queued */
102 rt_rq
->rt_throttled
= 0;
103 rt_rq
->rt_runtime
= 0;
104 raw_spin_lock_init(&rt_rq
->rt_runtime_lock
);
107 #ifdef CONFIG_RT_GROUP_SCHED
108 static void destroy_rt_bandwidth(struct rt_bandwidth
*rt_b
)
110 hrtimer_cancel(&rt_b
->rt_period_timer
);
113 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
115 static inline struct task_struct
*rt_task_of(struct sched_rt_entity
*rt_se
)
117 #ifdef CONFIG_SCHED_DEBUG
118 WARN_ON_ONCE(!rt_entity_is_task(rt_se
));
120 return container_of(rt_se
, struct task_struct
, rt
);
123 static inline struct rq
*rq_of_rt_rq(struct rt_rq
*rt_rq
)
128 static inline struct rt_rq
*rt_rq_of_se(struct sched_rt_entity
*rt_se
)
133 static inline struct rq
*rq_of_rt_se(struct sched_rt_entity
*rt_se
)
135 struct rt_rq
*rt_rq
= rt_se
->rt_rq
;
140 void free_rt_sched_group(struct task_group
*tg
)
145 destroy_rt_bandwidth(&tg
->rt_bandwidth
);
147 for_each_possible_cpu(i
) {
158 void init_tg_rt_entry(struct task_group
*tg
, struct rt_rq
*rt_rq
,
159 struct sched_rt_entity
*rt_se
, int cpu
,
160 struct sched_rt_entity
*parent
)
162 struct rq
*rq
= cpu_rq(cpu
);
164 rt_rq
->highest_prio
.curr
= MAX_RT_PRIO
;
165 rt_rq
->rt_nr_boosted
= 0;
169 tg
->rt_rq
[cpu
] = rt_rq
;
170 tg
->rt_se
[cpu
] = rt_se
;
176 rt_se
->rt_rq
= &rq
->rt
;
178 rt_se
->rt_rq
= parent
->my_q
;
181 rt_se
->parent
= parent
;
182 INIT_LIST_HEAD(&rt_se
->run_list
);
185 int alloc_rt_sched_group(struct task_group
*tg
, struct task_group
*parent
)
188 struct sched_rt_entity
*rt_se
;
191 tg
->rt_rq
= kzalloc(sizeof(rt_rq
) * nr_cpu_ids
, GFP_KERNEL
);
194 tg
->rt_se
= kzalloc(sizeof(rt_se
) * nr_cpu_ids
, GFP_KERNEL
);
198 init_rt_bandwidth(&tg
->rt_bandwidth
,
199 ktime_to_ns(def_rt_bandwidth
.rt_period
), 0);
201 for_each_possible_cpu(i
) {
202 rt_rq
= kzalloc_node(sizeof(struct rt_rq
),
203 GFP_KERNEL
, cpu_to_node(i
));
207 rt_se
= kzalloc_node(sizeof(struct sched_rt_entity
),
208 GFP_KERNEL
, cpu_to_node(i
));
213 rt_rq
->rt_runtime
= tg
->rt_bandwidth
.rt_runtime
;
214 init_tg_rt_entry(tg
, rt_rq
, rt_se
, i
, parent
->rt_se
[i
]);
225 #else /* CONFIG_RT_GROUP_SCHED */
227 #define rt_entity_is_task(rt_se) (1)
229 static inline struct task_struct
*rt_task_of(struct sched_rt_entity
*rt_se
)
231 return container_of(rt_se
, struct task_struct
, rt
);
234 static inline struct rq
*rq_of_rt_rq(struct rt_rq
*rt_rq
)
236 return container_of(rt_rq
, struct rq
, rt
);
239 static inline struct rq
*rq_of_rt_se(struct sched_rt_entity
*rt_se
)
241 struct task_struct
*p
= rt_task_of(rt_se
);
246 static inline struct rt_rq
*rt_rq_of_se(struct sched_rt_entity
*rt_se
)
248 struct rq
*rq
= rq_of_rt_se(rt_se
);
253 void free_rt_sched_group(struct task_group
*tg
) { }
255 int alloc_rt_sched_group(struct task_group
*tg
, struct task_group
*parent
)
259 #endif /* CONFIG_RT_GROUP_SCHED */
263 static void pull_rt_task(struct rq
*this_rq
);
265 static inline bool need_pull_rt_task(struct rq
*rq
, struct task_struct
*prev
)
267 /* Try to pull RT tasks here if we lower this rq's prio */
268 return rq
->rt
.highest_prio
.curr
> prev
->prio
;
271 static inline int rt_overloaded(struct rq
*rq
)
273 return atomic_read(&rq
->rd
->rto_count
);
276 static inline void rt_set_overload(struct rq
*rq
)
281 cpumask_set_cpu(rq
->cpu
, rq
->rd
->rto_mask
);
283 * Make sure the mask is visible before we set
284 * the overload count. That is checked to determine
285 * if we should look at the mask. It would be a shame
286 * if we looked at the mask, but the mask was not
289 * Matched by the barrier in pull_rt_task().
292 atomic_inc(&rq
->rd
->rto_count
);
295 static inline void rt_clear_overload(struct rq
*rq
)
300 /* the order here really doesn't matter */
301 atomic_dec(&rq
->rd
->rto_count
);
302 cpumask_clear_cpu(rq
->cpu
, rq
->rd
->rto_mask
);
305 static void update_rt_migration(struct rt_rq
*rt_rq
)
307 if (rt_rq
->rt_nr_migratory
&& rt_rq
->rt_nr_total
> 1) {
308 if (!rt_rq
->overloaded
) {
309 rt_set_overload(rq_of_rt_rq(rt_rq
));
310 rt_rq
->overloaded
= 1;
312 } else if (rt_rq
->overloaded
) {
313 rt_clear_overload(rq_of_rt_rq(rt_rq
));
314 rt_rq
->overloaded
= 0;
318 static void inc_rt_migration(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
)
320 struct task_struct
*p
;
322 if (!rt_entity_is_task(rt_se
))
325 p
= rt_task_of(rt_se
);
326 rt_rq
= &rq_of_rt_rq(rt_rq
)->rt
;
328 rt_rq
->rt_nr_total
++;
329 if (p
->nr_cpus_allowed
> 1)
330 rt_rq
->rt_nr_migratory
++;
332 update_rt_migration(rt_rq
);
335 static void dec_rt_migration(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
)
337 struct task_struct
*p
;
339 if (!rt_entity_is_task(rt_se
))
342 p
= rt_task_of(rt_se
);
343 rt_rq
= &rq_of_rt_rq(rt_rq
)->rt
;
345 rt_rq
->rt_nr_total
--;
346 if (p
->nr_cpus_allowed
> 1)
347 rt_rq
->rt_nr_migratory
--;
349 update_rt_migration(rt_rq
);
352 static inline int has_pushable_tasks(struct rq
*rq
)
354 return !plist_head_empty(&rq
->rt
.pushable_tasks
);
357 static DEFINE_PER_CPU(struct callback_head
, rt_push_head
);
358 static DEFINE_PER_CPU(struct callback_head
, rt_pull_head
);
360 static void push_rt_tasks(struct rq
*);
361 static void pull_rt_task(struct rq
*);
363 static inline void queue_push_tasks(struct rq
*rq
)
365 if (!has_pushable_tasks(rq
))
368 queue_balance_callback(rq
, &per_cpu(rt_push_head
, rq
->cpu
), push_rt_tasks
);
371 static inline void queue_pull_task(struct rq
*rq
)
373 queue_balance_callback(rq
, &per_cpu(rt_pull_head
, rq
->cpu
), pull_rt_task
);
376 static void enqueue_pushable_task(struct rq
*rq
, struct task_struct
*p
)
378 plist_del(&p
->pushable_tasks
, &rq
->rt
.pushable_tasks
);
379 plist_node_init(&p
->pushable_tasks
, p
->prio
);
380 plist_add(&p
->pushable_tasks
, &rq
->rt
.pushable_tasks
);
382 /* Update the highest prio pushable task */
383 if (p
->prio
< rq
->rt
.highest_prio
.next
)
384 rq
->rt
.highest_prio
.next
= p
->prio
;
387 static void dequeue_pushable_task(struct rq
*rq
, struct task_struct
*p
)
389 plist_del(&p
->pushable_tasks
, &rq
->rt
.pushable_tasks
);
391 /* Update the new highest prio pushable task */
392 if (has_pushable_tasks(rq
)) {
393 p
= plist_first_entry(&rq
->rt
.pushable_tasks
,
394 struct task_struct
, pushable_tasks
);
395 rq
->rt
.highest_prio
.next
= p
->prio
;
397 rq
->rt
.highest_prio
.next
= MAX_RT_PRIO
;
402 static inline void enqueue_pushable_task(struct rq
*rq
, struct task_struct
*p
)
406 static inline void dequeue_pushable_task(struct rq
*rq
, struct task_struct
*p
)
411 void inc_rt_migration(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
)
416 void dec_rt_migration(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
)
420 static inline bool need_pull_rt_task(struct rq
*rq
, struct task_struct
*prev
)
425 static inline void pull_rt_task(struct rq
*this_rq
)
429 static inline void queue_push_tasks(struct rq
*rq
)
432 #endif /* CONFIG_SMP */
434 static void enqueue_top_rt_rq(struct rt_rq
*rt_rq
);
435 static void dequeue_top_rt_rq(struct rt_rq
*rt_rq
);
437 static inline int on_rt_rq(struct sched_rt_entity
*rt_se
)
439 return !list_empty(&rt_se
->run_list
);
442 #ifdef CONFIG_RT_GROUP_SCHED
444 static inline u64
sched_rt_runtime(struct rt_rq
*rt_rq
)
449 return rt_rq
->rt_runtime
;
452 static inline u64
sched_rt_period(struct rt_rq
*rt_rq
)
454 return ktime_to_ns(rt_rq
->tg
->rt_bandwidth
.rt_period
);
457 typedef struct task_group
*rt_rq_iter_t
;
459 static inline struct task_group
*next_task_group(struct task_group
*tg
)
462 tg
= list_entry_rcu(tg
->list
.next
,
463 typeof(struct task_group
), list
);
464 } while (&tg
->list
!= &task_groups
&& task_group_is_autogroup(tg
));
466 if (&tg
->list
== &task_groups
)
472 #define for_each_rt_rq(rt_rq, iter, rq) \
473 for (iter = container_of(&task_groups, typeof(*iter), list); \
474 (iter = next_task_group(iter)) && \
475 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
477 #define for_each_sched_rt_entity(rt_se) \
478 for (; rt_se; rt_se = rt_se->parent)
480 static inline struct rt_rq
*group_rt_rq(struct sched_rt_entity
*rt_se
)
485 static void enqueue_rt_entity(struct sched_rt_entity
*rt_se
, bool head
);
486 static void dequeue_rt_entity(struct sched_rt_entity
*rt_se
);
488 static void sched_rt_rq_enqueue(struct rt_rq
*rt_rq
)
490 struct task_struct
*curr
= rq_of_rt_rq(rt_rq
)->curr
;
491 struct rq
*rq
= rq_of_rt_rq(rt_rq
);
492 struct sched_rt_entity
*rt_se
;
494 int cpu
= cpu_of(rq
);
496 rt_se
= rt_rq
->tg
->rt_se
[cpu
];
498 if (rt_rq
->rt_nr_running
) {
500 enqueue_top_rt_rq(rt_rq
);
501 else if (!on_rt_rq(rt_se
))
502 enqueue_rt_entity(rt_se
, false);
504 if (rt_rq
->highest_prio
.curr
< curr
->prio
)
509 static void sched_rt_rq_dequeue(struct rt_rq
*rt_rq
)
511 struct sched_rt_entity
*rt_se
;
512 int cpu
= cpu_of(rq_of_rt_rq(rt_rq
));
514 rt_se
= rt_rq
->tg
->rt_se
[cpu
];
517 dequeue_top_rt_rq(rt_rq
);
518 else if (on_rt_rq(rt_se
))
519 dequeue_rt_entity(rt_se
);
522 static inline int rt_rq_throttled(struct rt_rq
*rt_rq
)
524 return rt_rq
->rt_throttled
&& !rt_rq
->rt_nr_boosted
;
527 static int rt_se_boosted(struct sched_rt_entity
*rt_se
)
529 struct rt_rq
*rt_rq
= group_rt_rq(rt_se
);
530 struct task_struct
*p
;
533 return !!rt_rq
->rt_nr_boosted
;
535 p
= rt_task_of(rt_se
);
536 return p
->prio
!= p
->normal_prio
;
540 static inline const struct cpumask
*sched_rt_period_mask(void)
542 return this_rq()->rd
->span
;
545 static inline const struct cpumask
*sched_rt_period_mask(void)
547 return cpu_online_mask
;
552 struct rt_rq
*sched_rt_period_rt_rq(struct rt_bandwidth
*rt_b
, int cpu
)
554 return container_of(rt_b
, struct task_group
, rt_bandwidth
)->rt_rq
[cpu
];
557 static inline struct rt_bandwidth
*sched_rt_bandwidth(struct rt_rq
*rt_rq
)
559 return &rt_rq
->tg
->rt_bandwidth
;
562 #else /* !CONFIG_RT_GROUP_SCHED */
564 static inline u64
sched_rt_runtime(struct rt_rq
*rt_rq
)
566 return rt_rq
->rt_runtime
;
569 static inline u64
sched_rt_period(struct rt_rq
*rt_rq
)
571 return ktime_to_ns(def_rt_bandwidth
.rt_period
);
574 typedef struct rt_rq
*rt_rq_iter_t
;
576 #define for_each_rt_rq(rt_rq, iter, rq) \
577 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
579 #define for_each_sched_rt_entity(rt_se) \
580 for (; rt_se; rt_se = NULL)
582 static inline struct rt_rq
*group_rt_rq(struct sched_rt_entity
*rt_se
)
587 static inline void sched_rt_rq_enqueue(struct rt_rq
*rt_rq
)
589 struct rq
*rq
= rq_of_rt_rq(rt_rq
);
591 if (!rt_rq
->rt_nr_running
)
594 enqueue_top_rt_rq(rt_rq
);
598 static inline void sched_rt_rq_dequeue(struct rt_rq
*rt_rq
)
600 dequeue_top_rt_rq(rt_rq
);
603 static inline int rt_rq_throttled(struct rt_rq
*rt_rq
)
605 return rt_rq
->rt_throttled
;
608 static inline const struct cpumask
*sched_rt_period_mask(void)
610 return cpu_online_mask
;
614 struct rt_rq
*sched_rt_period_rt_rq(struct rt_bandwidth
*rt_b
, int cpu
)
616 return &cpu_rq(cpu
)->rt
;
619 static inline struct rt_bandwidth
*sched_rt_bandwidth(struct rt_rq
*rt_rq
)
621 return &def_rt_bandwidth
;
624 #endif /* CONFIG_RT_GROUP_SCHED */
626 bool sched_rt_bandwidth_account(struct rt_rq
*rt_rq
)
628 struct rt_bandwidth
*rt_b
= sched_rt_bandwidth(rt_rq
);
630 return (hrtimer_active(&rt_b
->rt_period_timer
) ||
631 rt_rq
->rt_time
< rt_b
->rt_runtime
);
636 * We ran out of runtime, see if we can borrow some from our neighbours.
638 static int do_balance_runtime(struct rt_rq
*rt_rq
)
640 struct rt_bandwidth
*rt_b
= sched_rt_bandwidth(rt_rq
);
641 struct root_domain
*rd
= rq_of_rt_rq(rt_rq
)->rd
;
642 int i
, weight
, more
= 0;
645 weight
= cpumask_weight(rd
->span
);
647 raw_spin_lock(&rt_b
->rt_runtime_lock
);
648 rt_period
= ktime_to_ns(rt_b
->rt_period
);
649 for_each_cpu(i
, rd
->span
) {
650 struct rt_rq
*iter
= sched_rt_period_rt_rq(rt_b
, i
);
656 raw_spin_lock(&iter
->rt_runtime_lock
);
658 * Either all rqs have inf runtime and there's nothing to steal
659 * or __disable_runtime() below sets a specific rq to inf to
660 * indicate its been disabled and disalow stealing.
662 if (iter
->rt_runtime
== RUNTIME_INF
)
666 * From runqueues with spare time, take 1/n part of their
667 * spare time, but no more than our period.
669 diff
= iter
->rt_runtime
- iter
->rt_time
;
671 diff
= div_u64((u64
)diff
, weight
);
672 if (rt_rq
->rt_runtime
+ diff
> rt_period
)
673 diff
= rt_period
- rt_rq
->rt_runtime
;
674 iter
->rt_runtime
-= diff
;
675 rt_rq
->rt_runtime
+= diff
;
677 if (rt_rq
->rt_runtime
== rt_period
) {
678 raw_spin_unlock(&iter
->rt_runtime_lock
);
683 raw_spin_unlock(&iter
->rt_runtime_lock
);
685 raw_spin_unlock(&rt_b
->rt_runtime_lock
);
691 * Ensure this RQ takes back all the runtime it lend to its neighbours.
693 static void __disable_runtime(struct rq
*rq
)
695 struct root_domain
*rd
= rq
->rd
;
699 if (unlikely(!scheduler_running
))
702 for_each_rt_rq(rt_rq
, iter
, rq
) {
703 struct rt_bandwidth
*rt_b
= sched_rt_bandwidth(rt_rq
);
707 raw_spin_lock(&rt_b
->rt_runtime_lock
);
708 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
710 * Either we're all inf and nobody needs to borrow, or we're
711 * already disabled and thus have nothing to do, or we have
712 * exactly the right amount of runtime to take out.
714 if (rt_rq
->rt_runtime
== RUNTIME_INF
||
715 rt_rq
->rt_runtime
== rt_b
->rt_runtime
)
717 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
720 * Calculate the difference between what we started out with
721 * and what we current have, that's the amount of runtime
722 * we lend and now have to reclaim.
724 want
= rt_b
->rt_runtime
- rt_rq
->rt_runtime
;
727 * Greedy reclaim, take back as much as we can.
729 for_each_cpu(i
, rd
->span
) {
730 struct rt_rq
*iter
= sched_rt_period_rt_rq(rt_b
, i
);
734 * Can't reclaim from ourselves or disabled runqueues.
736 if (iter
== rt_rq
|| iter
->rt_runtime
== RUNTIME_INF
)
739 raw_spin_lock(&iter
->rt_runtime_lock
);
741 diff
= min_t(s64
, iter
->rt_runtime
, want
);
742 iter
->rt_runtime
-= diff
;
745 iter
->rt_runtime
-= want
;
748 raw_spin_unlock(&iter
->rt_runtime_lock
);
754 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
756 * We cannot be left wanting - that would mean some runtime
757 * leaked out of the system.
762 * Disable all the borrow logic by pretending we have inf
763 * runtime - in which case borrowing doesn't make sense.
765 rt_rq
->rt_runtime
= RUNTIME_INF
;
766 rt_rq
->rt_throttled
= 0;
767 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
768 raw_spin_unlock(&rt_b
->rt_runtime_lock
);
770 /* Make rt_rq available for pick_next_task() */
771 sched_rt_rq_enqueue(rt_rq
);
775 static void __enable_runtime(struct rq
*rq
)
780 if (unlikely(!scheduler_running
))
784 * Reset each runqueue's bandwidth settings
786 for_each_rt_rq(rt_rq
, iter
, rq
) {
787 struct rt_bandwidth
*rt_b
= sched_rt_bandwidth(rt_rq
);
789 raw_spin_lock(&rt_b
->rt_runtime_lock
);
790 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
791 rt_rq
->rt_runtime
= rt_b
->rt_runtime
;
793 rt_rq
->rt_throttled
= 0;
794 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
795 raw_spin_unlock(&rt_b
->rt_runtime_lock
);
799 static int balance_runtime(struct rt_rq
*rt_rq
)
803 if (!sched_feat(RT_RUNTIME_SHARE
))
806 if (rt_rq
->rt_time
> rt_rq
->rt_runtime
) {
807 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
808 more
= do_balance_runtime(rt_rq
);
809 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
814 #else /* !CONFIG_SMP */
815 static inline int balance_runtime(struct rt_rq
*rt_rq
)
819 #endif /* CONFIG_SMP */
821 static int do_sched_rt_period_timer(struct rt_bandwidth
*rt_b
, int overrun
)
823 int i
, idle
= 1, throttled
= 0;
824 const struct cpumask
*span
;
826 span
= sched_rt_period_mask();
827 #ifdef CONFIG_RT_GROUP_SCHED
829 * FIXME: isolated CPUs should really leave the root task group,
830 * whether they are isolcpus or were isolated via cpusets, lest
831 * the timer run on a CPU which does not service all runqueues,
832 * potentially leaving other CPUs indefinitely throttled. If
833 * isolation is really required, the user will turn the throttle
834 * off to kill the perturbations it causes anyway. Meanwhile,
835 * this maintains functionality for boot and/or troubleshooting.
837 if (rt_b
== &root_task_group
.rt_bandwidth
)
838 span
= cpu_online_mask
;
840 for_each_cpu(i
, span
) {
842 struct rt_rq
*rt_rq
= sched_rt_period_rt_rq(rt_b
, i
);
843 struct rq
*rq
= rq_of_rt_rq(rt_rq
);
845 raw_spin_lock(&rq
->lock
);
846 if (rt_rq
->rt_time
) {
849 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
850 if (rt_rq
->rt_throttled
)
851 balance_runtime(rt_rq
);
852 runtime
= rt_rq
->rt_runtime
;
853 rt_rq
->rt_time
-= min(rt_rq
->rt_time
, overrun
*runtime
);
854 if (rt_rq
->rt_throttled
&& rt_rq
->rt_time
< runtime
) {
855 rt_rq
->rt_throttled
= 0;
859 * When we're idle and a woken (rt) task is
860 * throttled check_preempt_curr() will set
861 * skip_update and the time between the wakeup
862 * and this unthrottle will get accounted as
865 if (rt_rq
->rt_nr_running
&& rq
->curr
== rq
->idle
)
866 rq_clock_skip_update(rq
, false);
868 if (rt_rq
->rt_time
|| rt_rq
->rt_nr_running
)
870 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
871 } else if (rt_rq
->rt_nr_running
) {
873 if (!rt_rq_throttled(rt_rq
))
876 if (rt_rq
->rt_throttled
)
880 sched_rt_rq_enqueue(rt_rq
);
881 raw_spin_unlock(&rq
->lock
);
884 if (!throttled
&& (!rt_bandwidth_enabled() || rt_b
->rt_runtime
== RUNTIME_INF
))
890 static inline int rt_se_prio(struct sched_rt_entity
*rt_se
)
892 #ifdef CONFIG_RT_GROUP_SCHED
893 struct rt_rq
*rt_rq
= group_rt_rq(rt_se
);
896 return rt_rq
->highest_prio
.curr
;
899 return rt_task_of(rt_se
)->prio
;
902 static int sched_rt_runtime_exceeded(struct rt_rq
*rt_rq
)
904 u64 runtime
= sched_rt_runtime(rt_rq
);
906 if (rt_rq
->rt_throttled
)
907 return rt_rq_throttled(rt_rq
);
909 if (runtime
>= sched_rt_period(rt_rq
))
912 balance_runtime(rt_rq
);
913 runtime
= sched_rt_runtime(rt_rq
);
914 if (runtime
== RUNTIME_INF
)
917 if (rt_rq
->rt_time
> runtime
) {
918 struct rt_bandwidth
*rt_b
= sched_rt_bandwidth(rt_rq
);
921 * Don't actually throttle groups that have no runtime assigned
922 * but accrue some time due to boosting.
924 if (likely(rt_b
->rt_runtime
)) {
925 rt_rq
->rt_throttled
= 1;
926 printk_deferred_once("sched: RT throttling activated\n");
929 * In case we did anyway, make it go away,
930 * replenishment is a joke, since it will replenish us
936 if (rt_rq_throttled(rt_rq
)) {
937 sched_rt_rq_dequeue(rt_rq
);
946 * Update the current task's runtime statistics. Skip current tasks that
947 * are not in our scheduling class.
949 static void update_curr_rt(struct rq
*rq
)
951 struct task_struct
*curr
= rq
->curr
;
952 struct sched_rt_entity
*rt_se
= &curr
->rt
;
955 if (curr
->sched_class
!= &rt_sched_class
)
958 delta_exec
= rq_clock_task(rq
) - curr
->se
.exec_start
;
959 if (unlikely((s64
)delta_exec
<= 0))
962 schedstat_set(curr
->se
.statistics
.exec_max
,
963 max(curr
->se
.statistics
.exec_max
, delta_exec
));
965 curr
->se
.sum_exec_runtime
+= delta_exec
;
966 account_group_exec_runtime(curr
, delta_exec
);
968 curr
->se
.exec_start
= rq_clock_task(rq
);
969 cpuacct_charge(curr
, delta_exec
);
971 sched_rt_avg_update(rq
, delta_exec
);
973 if (!rt_bandwidth_enabled())
976 for_each_sched_rt_entity(rt_se
) {
977 struct rt_rq
*rt_rq
= rt_rq_of_se(rt_se
);
979 if (sched_rt_runtime(rt_rq
) != RUNTIME_INF
) {
980 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
981 rt_rq
->rt_time
+= delta_exec
;
982 if (sched_rt_runtime_exceeded(rt_rq
))
984 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
990 dequeue_top_rt_rq(struct rt_rq
*rt_rq
)
992 struct rq
*rq
= rq_of_rt_rq(rt_rq
);
994 BUG_ON(&rq
->rt
!= rt_rq
);
996 if (!rt_rq
->rt_queued
)
999 BUG_ON(!rq
->nr_running
);
1001 sub_nr_running(rq
, rt_rq
->rt_nr_running
);
1002 rt_rq
->rt_queued
= 0;
1006 enqueue_top_rt_rq(struct rt_rq
*rt_rq
)
1008 struct rq
*rq
= rq_of_rt_rq(rt_rq
);
1010 BUG_ON(&rq
->rt
!= rt_rq
);
1012 if (rt_rq
->rt_queued
)
1014 if (rt_rq_throttled(rt_rq
) || !rt_rq
->rt_nr_running
)
1017 add_nr_running(rq
, rt_rq
->rt_nr_running
);
1018 rt_rq
->rt_queued
= 1;
1021 #if defined CONFIG_SMP
1024 inc_rt_prio_smp(struct rt_rq
*rt_rq
, int prio
, int prev_prio
)
1026 struct rq
*rq
= rq_of_rt_rq(rt_rq
);
1028 #ifdef CONFIG_RT_GROUP_SCHED
1030 * Change rq's cpupri only if rt_rq is the top queue.
1032 if (&rq
->rt
!= rt_rq
)
1035 if (rq
->online
&& prio
< prev_prio
)
1036 cpupri_set(&rq
->rd
->cpupri
, rq
->cpu
, prio
);
1040 dec_rt_prio_smp(struct rt_rq
*rt_rq
, int prio
, int prev_prio
)
1042 struct rq
*rq
= rq_of_rt_rq(rt_rq
);
1044 #ifdef CONFIG_RT_GROUP_SCHED
1046 * Change rq's cpupri only if rt_rq is the top queue.
1048 if (&rq
->rt
!= rt_rq
)
1051 if (rq
->online
&& rt_rq
->highest_prio
.curr
!= prev_prio
)
1052 cpupri_set(&rq
->rd
->cpupri
, rq
->cpu
, rt_rq
->highest_prio
.curr
);
1055 #else /* CONFIG_SMP */
1058 void inc_rt_prio_smp(struct rt_rq
*rt_rq
, int prio
, int prev_prio
) {}
1060 void dec_rt_prio_smp(struct rt_rq
*rt_rq
, int prio
, int prev_prio
) {}
1062 #endif /* CONFIG_SMP */
1064 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1066 inc_rt_prio(struct rt_rq
*rt_rq
, int prio
)
1068 int prev_prio
= rt_rq
->highest_prio
.curr
;
1070 if (prio
< prev_prio
)
1071 rt_rq
->highest_prio
.curr
= prio
;
1073 inc_rt_prio_smp(rt_rq
, prio
, prev_prio
);
1077 dec_rt_prio(struct rt_rq
*rt_rq
, int prio
)
1079 int prev_prio
= rt_rq
->highest_prio
.curr
;
1081 if (rt_rq
->rt_nr_running
) {
1083 WARN_ON(prio
< prev_prio
);
1086 * This may have been our highest task, and therefore
1087 * we may have some recomputation to do
1089 if (prio
== prev_prio
) {
1090 struct rt_prio_array
*array
= &rt_rq
->active
;
1092 rt_rq
->highest_prio
.curr
=
1093 sched_find_first_bit(array
->bitmap
);
1097 rt_rq
->highest_prio
.curr
= MAX_RT_PRIO
;
1099 dec_rt_prio_smp(rt_rq
, prio
, prev_prio
);
1104 static inline void inc_rt_prio(struct rt_rq
*rt_rq
, int prio
) {}
1105 static inline void dec_rt_prio(struct rt_rq
*rt_rq
, int prio
) {}
1107 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1109 #ifdef CONFIG_RT_GROUP_SCHED
1112 inc_rt_group(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
)
1114 if (rt_se_boosted(rt_se
))
1115 rt_rq
->rt_nr_boosted
++;
1118 start_rt_bandwidth(&rt_rq
->tg
->rt_bandwidth
);
1122 dec_rt_group(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
)
1124 if (rt_se_boosted(rt_se
))
1125 rt_rq
->rt_nr_boosted
--;
1127 WARN_ON(!rt_rq
->rt_nr_running
&& rt_rq
->rt_nr_boosted
);
1130 #else /* CONFIG_RT_GROUP_SCHED */
1133 inc_rt_group(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
)
1135 start_rt_bandwidth(&def_rt_bandwidth
);
1139 void dec_rt_group(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
) {}
1141 #endif /* CONFIG_RT_GROUP_SCHED */
1144 unsigned int rt_se_nr_running(struct sched_rt_entity
*rt_se
)
1146 struct rt_rq
*group_rq
= group_rt_rq(rt_se
);
1149 return group_rq
->rt_nr_running
;
1155 void inc_rt_tasks(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
)
1157 int prio
= rt_se_prio(rt_se
);
1159 WARN_ON(!rt_prio(prio
));
1160 rt_rq
->rt_nr_running
+= rt_se_nr_running(rt_se
);
1162 inc_rt_prio(rt_rq
, prio
);
1163 inc_rt_migration(rt_se
, rt_rq
);
1164 inc_rt_group(rt_se
, rt_rq
);
1168 void dec_rt_tasks(struct sched_rt_entity
*rt_se
, struct rt_rq
*rt_rq
)
1170 WARN_ON(!rt_prio(rt_se_prio(rt_se
)));
1171 WARN_ON(!rt_rq
->rt_nr_running
);
1172 rt_rq
->rt_nr_running
-= rt_se_nr_running(rt_se
);
1174 dec_rt_prio(rt_rq
, rt_se_prio(rt_se
));
1175 dec_rt_migration(rt_se
, rt_rq
);
1176 dec_rt_group(rt_se
, rt_rq
);
1179 static void __enqueue_rt_entity(struct sched_rt_entity
*rt_se
, bool head
)
1181 struct rt_rq
*rt_rq
= rt_rq_of_se(rt_se
);
1182 struct rt_prio_array
*array
= &rt_rq
->active
;
1183 struct rt_rq
*group_rq
= group_rt_rq(rt_se
);
1184 struct list_head
*queue
= array
->queue
+ rt_se_prio(rt_se
);
1187 * Don't enqueue the group if its throttled, or when empty.
1188 * The latter is a consequence of the former when a child group
1189 * get throttled and the current group doesn't have any other
1192 if (group_rq
&& (rt_rq_throttled(group_rq
) || !group_rq
->rt_nr_running
))
1196 list_add(&rt_se
->run_list
, queue
);
1198 list_add_tail(&rt_se
->run_list
, queue
);
1199 __set_bit(rt_se_prio(rt_se
), array
->bitmap
);
1201 inc_rt_tasks(rt_se
, rt_rq
);
1204 static void __dequeue_rt_entity(struct sched_rt_entity
*rt_se
)
1206 struct rt_rq
*rt_rq
= rt_rq_of_se(rt_se
);
1207 struct rt_prio_array
*array
= &rt_rq
->active
;
1209 list_del_init(&rt_se
->run_list
);
1210 if (list_empty(array
->queue
+ rt_se_prio(rt_se
)))
1211 __clear_bit(rt_se_prio(rt_se
), array
->bitmap
);
1213 dec_rt_tasks(rt_se
, rt_rq
);
1217 * Because the prio of an upper entry depends on the lower
1218 * entries, we must remove entries top - down.
1220 static void dequeue_rt_stack(struct sched_rt_entity
*rt_se
)
1222 struct sched_rt_entity
*back
= NULL
;
1224 for_each_sched_rt_entity(rt_se
) {
1229 dequeue_top_rt_rq(rt_rq_of_se(back
));
1231 for (rt_se
= back
; rt_se
; rt_se
= rt_se
->back
) {
1232 if (on_rt_rq(rt_se
))
1233 __dequeue_rt_entity(rt_se
);
1237 static void enqueue_rt_entity(struct sched_rt_entity
*rt_se
, bool head
)
1239 struct rq
*rq
= rq_of_rt_se(rt_se
);
1241 dequeue_rt_stack(rt_se
);
1242 for_each_sched_rt_entity(rt_se
)
1243 __enqueue_rt_entity(rt_se
, head
);
1244 enqueue_top_rt_rq(&rq
->rt
);
1247 static void dequeue_rt_entity(struct sched_rt_entity
*rt_se
)
1249 struct rq
*rq
= rq_of_rt_se(rt_se
);
1251 dequeue_rt_stack(rt_se
);
1253 for_each_sched_rt_entity(rt_se
) {
1254 struct rt_rq
*rt_rq
= group_rt_rq(rt_se
);
1256 if (rt_rq
&& rt_rq
->rt_nr_running
)
1257 __enqueue_rt_entity(rt_se
, false);
1259 enqueue_top_rt_rq(&rq
->rt
);
1263 * Adding/removing a task to/from a priority array:
1266 enqueue_task_rt(struct rq
*rq
, struct task_struct
*p
, int flags
)
1268 struct sched_rt_entity
*rt_se
= &p
->rt
;
1270 if (flags
& ENQUEUE_WAKEUP
)
1273 enqueue_rt_entity(rt_se
, flags
& ENQUEUE_HEAD
);
1275 if (!task_current(rq
, p
) && p
->nr_cpus_allowed
> 1)
1276 enqueue_pushable_task(rq
, p
);
1279 static void dequeue_task_rt(struct rq
*rq
, struct task_struct
*p
, int flags
)
1281 struct sched_rt_entity
*rt_se
= &p
->rt
;
1284 dequeue_rt_entity(rt_se
);
1286 dequeue_pushable_task(rq
, p
);
1290 * Put task to the head or the end of the run list without the overhead of
1291 * dequeue followed by enqueue.
1294 requeue_rt_entity(struct rt_rq
*rt_rq
, struct sched_rt_entity
*rt_se
, int head
)
1296 if (on_rt_rq(rt_se
)) {
1297 struct rt_prio_array
*array
= &rt_rq
->active
;
1298 struct list_head
*queue
= array
->queue
+ rt_se_prio(rt_se
);
1301 list_move(&rt_se
->run_list
, queue
);
1303 list_move_tail(&rt_se
->run_list
, queue
);
1307 static void requeue_task_rt(struct rq
*rq
, struct task_struct
*p
, int head
)
1309 struct sched_rt_entity
*rt_se
= &p
->rt
;
1310 struct rt_rq
*rt_rq
;
1312 for_each_sched_rt_entity(rt_se
) {
1313 rt_rq
= rt_rq_of_se(rt_se
);
1314 requeue_rt_entity(rt_rq
, rt_se
, head
);
1318 static void yield_task_rt(struct rq
*rq
)
1320 requeue_task_rt(rq
, rq
->curr
, 0);
1324 static int find_lowest_rq(struct task_struct
*task
);
1327 select_task_rq_rt(struct task_struct
*p
, int cpu
, int sd_flag
, int flags
)
1329 struct task_struct
*curr
;
1332 /* For anything but wake ups, just return the task_cpu */
1333 if (sd_flag
!= SD_BALANCE_WAKE
&& sd_flag
!= SD_BALANCE_FORK
)
1339 curr
= READ_ONCE(rq
->curr
); /* unlocked access */
1342 * If the current task on @p's runqueue is an RT task, then
1343 * try to see if we can wake this RT task up on another
1344 * runqueue. Otherwise simply start this RT task
1345 * on its current runqueue.
1347 * We want to avoid overloading runqueues. If the woken
1348 * task is a higher priority, then it will stay on this CPU
1349 * and the lower prio task should be moved to another CPU.
1350 * Even though this will probably make the lower prio task
1351 * lose its cache, we do not want to bounce a higher task
1352 * around just because it gave up its CPU, perhaps for a
1355 * For equal prio tasks, we just let the scheduler sort it out.
1357 * Otherwise, just let it ride on the affined RQ and the
1358 * post-schedule router will push the preempted task away
1360 * This test is optimistic, if we get it wrong the load-balancer
1361 * will have to sort it out.
1363 if (curr
&& unlikely(rt_task(curr
)) &&
1364 (curr
->nr_cpus_allowed
< 2 ||
1365 curr
->prio
<= p
->prio
)) {
1366 int target
= find_lowest_rq(p
);
1369 * Don't bother moving it if the destination CPU is
1370 * not running a lower priority task.
1373 p
->prio
< cpu_rq(target
)->rt
.highest_prio
.curr
)
1382 static void check_preempt_equal_prio(struct rq
*rq
, struct task_struct
*p
)
1385 * Current can't be migrated, useless to reschedule,
1386 * let's hope p can move out.
1388 if (rq
->curr
->nr_cpus_allowed
== 1 ||
1389 !cpupri_find(&rq
->rd
->cpupri
, rq
->curr
, NULL
))
1393 * p is migratable, so let's not schedule it and
1394 * see if it is pushed or pulled somewhere else.
1396 if (p
->nr_cpus_allowed
!= 1
1397 && cpupri_find(&rq
->rd
->cpupri
, p
, NULL
))
1401 * There appears to be other cpus that can accept
1402 * current and none to run 'p', so lets reschedule
1403 * to try and push current away:
1405 requeue_task_rt(rq
, p
, 1);
1409 #endif /* CONFIG_SMP */
1412 * Preempt the current task with a newly woken task if needed:
1414 static void check_preempt_curr_rt(struct rq
*rq
, struct task_struct
*p
, int flags
)
1416 if (p
->prio
< rq
->curr
->prio
) {
1425 * - the newly woken task is of equal priority to the current task
1426 * - the newly woken task is non-migratable while current is migratable
1427 * - current will be preempted on the next reschedule
1429 * we should check to see if current can readily move to a different
1430 * cpu. If so, we will reschedule to allow the push logic to try
1431 * to move current somewhere else, making room for our non-migratable
1434 if (p
->prio
== rq
->curr
->prio
&& !test_tsk_need_resched(rq
->curr
))
1435 check_preempt_equal_prio(rq
, p
);
1439 static struct sched_rt_entity
*pick_next_rt_entity(struct rq
*rq
,
1440 struct rt_rq
*rt_rq
)
1442 struct rt_prio_array
*array
= &rt_rq
->active
;
1443 struct sched_rt_entity
*next
= NULL
;
1444 struct list_head
*queue
;
1447 idx
= sched_find_first_bit(array
->bitmap
);
1448 BUG_ON(idx
>= MAX_RT_PRIO
);
1450 queue
= array
->queue
+ idx
;
1451 next
= list_entry(queue
->next
, struct sched_rt_entity
, run_list
);
1456 static struct task_struct
*_pick_next_task_rt(struct rq
*rq
)
1458 struct sched_rt_entity
*rt_se
;
1459 struct task_struct
*p
;
1460 struct rt_rq
*rt_rq
= &rq
->rt
;
1463 rt_se
= pick_next_rt_entity(rq
, rt_rq
);
1465 rt_rq
= group_rt_rq(rt_se
);
1468 p
= rt_task_of(rt_se
);
1469 p
->se
.exec_start
= rq_clock_task(rq
);
1474 static struct task_struct
*
1475 pick_next_task_rt(struct rq
*rq
, struct task_struct
*prev
)
1477 struct task_struct
*p
;
1478 struct rt_rq
*rt_rq
= &rq
->rt
;
1480 if (need_pull_rt_task(rq
, prev
)) {
1482 * This is OK, because current is on_cpu, which avoids it being
1483 * picked for load-balance and preemption/IRQs are still
1484 * disabled avoiding further scheduler activity on it and we're
1485 * being very careful to re-start the picking loop.
1487 lockdep_unpin_lock(&rq
->lock
);
1489 lockdep_pin_lock(&rq
->lock
);
1491 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1492 * means a dl or stop task can slip in, in which case we need
1493 * to re-start task selection.
1495 if (unlikely((rq
->stop
&& task_on_rq_queued(rq
->stop
)) ||
1496 rq
->dl
.dl_nr_running
))
1501 * We may dequeue prev's rt_rq in put_prev_task().
1502 * So, we update time before rt_nr_running check.
1504 if (prev
->sched_class
== &rt_sched_class
)
1507 if (!rt_rq
->rt_queued
)
1510 put_prev_task(rq
, prev
);
1512 p
= _pick_next_task_rt(rq
);
1514 /* The running task is never eligible for pushing */
1515 dequeue_pushable_task(rq
, p
);
1517 queue_push_tasks(rq
);
1522 static void put_prev_task_rt(struct rq
*rq
, struct task_struct
*p
)
1527 * The previous task needs to be made eligible for pushing
1528 * if it is still active
1530 if (on_rt_rq(&p
->rt
) && p
->nr_cpus_allowed
> 1)
1531 enqueue_pushable_task(rq
, p
);
1536 /* Only try algorithms three times */
1537 #define RT_MAX_TRIES 3
1539 static int pick_rt_task(struct rq
*rq
, struct task_struct
*p
, int cpu
)
1541 if (!task_running(rq
, p
) &&
1542 cpumask_test_cpu(cpu
, tsk_cpus_allowed(p
)))
1548 * Return the highest pushable rq's task, which is suitable to be executed
1549 * on the cpu, NULL otherwise
1551 static struct task_struct
*pick_highest_pushable_task(struct rq
*rq
, int cpu
)
1553 struct plist_head
*head
= &rq
->rt
.pushable_tasks
;
1554 struct task_struct
*p
;
1556 if (!has_pushable_tasks(rq
))
1559 plist_for_each_entry(p
, head
, pushable_tasks
) {
1560 if (pick_rt_task(rq
, p
, cpu
))
1567 static DEFINE_PER_CPU(cpumask_var_t
, local_cpu_mask
);
1569 static int find_lowest_rq(struct task_struct
*task
)
1571 struct sched_domain
*sd
;
1572 struct cpumask
*lowest_mask
= this_cpu_cpumask_var_ptr(local_cpu_mask
);
1573 int this_cpu
= smp_processor_id();
1574 int cpu
= task_cpu(task
);
1576 /* Make sure the mask is initialized first */
1577 if (unlikely(!lowest_mask
))
1580 if (task
->nr_cpus_allowed
== 1)
1581 return -1; /* No other targets possible */
1583 if (!cpupri_find(&task_rq(task
)->rd
->cpupri
, task
, lowest_mask
))
1584 return -1; /* No targets found */
1587 * At this point we have built a mask of cpus representing the
1588 * lowest priority tasks in the system. Now we want to elect
1589 * the best one based on our affinity and topology.
1591 * We prioritize the last cpu that the task executed on since
1592 * it is most likely cache-hot in that location.
1594 if (cpumask_test_cpu(cpu
, lowest_mask
))
1598 * Otherwise, we consult the sched_domains span maps to figure
1599 * out which cpu is logically closest to our hot cache data.
1601 if (!cpumask_test_cpu(this_cpu
, lowest_mask
))
1602 this_cpu
= -1; /* Skip this_cpu opt if not among lowest */
1605 for_each_domain(cpu
, sd
) {
1606 if (sd
->flags
& SD_WAKE_AFFINE
) {
1610 * "this_cpu" is cheaper to preempt than a
1613 if (this_cpu
!= -1 &&
1614 cpumask_test_cpu(this_cpu
, sched_domain_span(sd
))) {
1619 best_cpu
= cpumask_first_and(lowest_mask
,
1620 sched_domain_span(sd
));
1621 if (best_cpu
< nr_cpu_ids
) {
1630 * And finally, if there were no matches within the domains
1631 * just give the caller *something* to work with from the compatible
1637 cpu
= cpumask_any(lowest_mask
);
1638 if (cpu
< nr_cpu_ids
)
1643 /* Will lock the rq it finds */
1644 static struct rq
*find_lock_lowest_rq(struct task_struct
*task
, struct rq
*rq
)
1646 struct rq
*lowest_rq
= NULL
;
1650 for (tries
= 0; tries
< RT_MAX_TRIES
; tries
++) {
1651 cpu
= find_lowest_rq(task
);
1653 if ((cpu
== -1) || (cpu
== rq
->cpu
))
1656 lowest_rq
= cpu_rq(cpu
);
1658 if (lowest_rq
->rt
.highest_prio
.curr
<= task
->prio
) {
1660 * Target rq has tasks of equal or higher priority,
1661 * retrying does not release any lock and is unlikely
1662 * to yield a different result.
1668 /* if the prio of this runqueue changed, try again */
1669 if (double_lock_balance(rq
, lowest_rq
)) {
1671 * We had to unlock the run queue. In
1672 * the mean time, task could have
1673 * migrated already or had its affinity changed.
1674 * Also make sure that it wasn't scheduled on its rq.
1676 if (unlikely(task_rq(task
) != rq
||
1677 !cpumask_test_cpu(lowest_rq
->cpu
,
1678 tsk_cpus_allowed(task
)) ||
1679 task_running(rq
, task
) ||
1680 !task_on_rq_queued(task
))) {
1682 double_unlock_balance(rq
, lowest_rq
);
1688 /* If this rq is still suitable use it. */
1689 if (lowest_rq
->rt
.highest_prio
.curr
> task
->prio
)
1693 double_unlock_balance(rq
, lowest_rq
);
1700 static struct task_struct
*pick_next_pushable_task(struct rq
*rq
)
1702 struct task_struct
*p
;
1704 if (!has_pushable_tasks(rq
))
1707 p
= plist_first_entry(&rq
->rt
.pushable_tasks
,
1708 struct task_struct
, pushable_tasks
);
1710 BUG_ON(rq
->cpu
!= task_cpu(p
));
1711 BUG_ON(task_current(rq
, p
));
1712 BUG_ON(p
->nr_cpus_allowed
<= 1);
1714 BUG_ON(!task_on_rq_queued(p
));
1715 BUG_ON(!rt_task(p
));
1721 * If the current CPU has more than one RT task, see if the non
1722 * running task can migrate over to a CPU that is running a task
1723 * of lesser priority.
1725 static int push_rt_task(struct rq
*rq
)
1727 struct task_struct
*next_task
;
1728 struct rq
*lowest_rq
;
1731 if (!rq
->rt
.overloaded
)
1734 next_task
= pick_next_pushable_task(rq
);
1739 if (unlikely(next_task
== rq
->curr
)) {
1745 * It's possible that the next_task slipped in of
1746 * higher priority than current. If that's the case
1747 * just reschedule current.
1749 if (unlikely(next_task
->prio
< rq
->curr
->prio
)) {
1754 /* We might release rq lock */
1755 get_task_struct(next_task
);
1757 /* find_lock_lowest_rq locks the rq if found */
1758 lowest_rq
= find_lock_lowest_rq(next_task
, rq
);
1760 struct task_struct
*task
;
1762 * find_lock_lowest_rq releases rq->lock
1763 * so it is possible that next_task has migrated.
1765 * We need to make sure that the task is still on the same
1766 * run-queue and is also still the next task eligible for
1769 task
= pick_next_pushable_task(rq
);
1770 if (task_cpu(next_task
) == rq
->cpu
&& task
== next_task
) {
1772 * The task hasn't migrated, and is still the next
1773 * eligible task, but we failed to find a run-queue
1774 * to push it to. Do not retry in this case, since
1775 * other cpus will pull from us when ready.
1781 /* No more tasks, just exit */
1785 * Something has shifted, try again.
1787 put_task_struct(next_task
);
1792 deactivate_task(rq
, next_task
, 0);
1793 set_task_cpu(next_task
, lowest_rq
->cpu
);
1794 activate_task(lowest_rq
, next_task
, 0);
1797 resched_curr(lowest_rq
);
1799 double_unlock_balance(rq
, lowest_rq
);
1802 put_task_struct(next_task
);
1807 static void push_rt_tasks(struct rq
*rq
)
1809 /* push_rt_task will return true if it moved an RT */
1810 while (push_rt_task(rq
))
1814 #ifdef HAVE_RT_PUSH_IPI
1816 * The search for the next cpu always starts at rq->cpu and ends
1817 * when we reach rq->cpu again. It will never return rq->cpu.
1818 * This returns the next cpu to check, or nr_cpu_ids if the loop
1821 * rq->rt.push_cpu holds the last cpu returned by this function,
1822 * or if this is the first instance, it must hold rq->cpu.
1824 static int rto_next_cpu(struct rq
*rq
)
1826 int prev_cpu
= rq
->rt
.push_cpu
;
1829 cpu
= cpumask_next(prev_cpu
, rq
->rd
->rto_mask
);
1832 * If the previous cpu is less than the rq's CPU, then it already
1833 * passed the end of the mask, and has started from the beginning.
1834 * We end if the next CPU is greater or equal to rq's CPU.
1836 if (prev_cpu
< rq
->cpu
) {
1840 } else if (cpu
>= nr_cpu_ids
) {
1842 * We passed the end of the mask, start at the beginning.
1843 * If the result is greater or equal to the rq's CPU, then
1844 * the loop is finished.
1846 cpu
= cpumask_first(rq
->rd
->rto_mask
);
1850 rq
->rt
.push_cpu
= cpu
;
1852 /* Return cpu to let the caller know if the loop is finished or not */
1856 static int find_next_push_cpu(struct rq
*rq
)
1862 cpu
= rto_next_cpu(rq
);
1863 if (cpu
>= nr_cpu_ids
)
1865 next_rq
= cpu_rq(cpu
);
1867 /* Make sure the next rq can push to this rq */
1868 if (next_rq
->rt
.highest_prio
.next
< rq
->rt
.highest_prio
.curr
)
1875 #define RT_PUSH_IPI_EXECUTING 1
1876 #define RT_PUSH_IPI_RESTART 2
1878 static void tell_cpu_to_push(struct rq
*rq
)
1882 if (rq
->rt
.push_flags
& RT_PUSH_IPI_EXECUTING
) {
1883 raw_spin_lock(&rq
->rt
.push_lock
);
1884 /* Make sure it's still executing */
1885 if (rq
->rt
.push_flags
& RT_PUSH_IPI_EXECUTING
) {
1887 * Tell the IPI to restart the loop as things have
1888 * changed since it started.
1890 rq
->rt
.push_flags
|= RT_PUSH_IPI_RESTART
;
1891 raw_spin_unlock(&rq
->rt
.push_lock
);
1894 raw_spin_unlock(&rq
->rt
.push_lock
);
1897 /* When here, there's no IPI going around */
1899 rq
->rt
.push_cpu
= rq
->cpu
;
1900 cpu
= find_next_push_cpu(rq
);
1901 if (cpu
>= nr_cpu_ids
)
1904 rq
->rt
.push_flags
= RT_PUSH_IPI_EXECUTING
;
1906 irq_work_queue_on(&rq
->rt
.push_work
, cpu
);
1909 /* Called from hardirq context */
1910 static void try_to_push_tasks(void *arg
)
1912 struct rt_rq
*rt_rq
= arg
;
1913 struct rq
*rq
, *src_rq
;
1917 this_cpu
= rt_rq
->push_cpu
;
1919 /* Paranoid check */
1920 BUG_ON(this_cpu
!= smp_processor_id());
1922 rq
= cpu_rq(this_cpu
);
1923 src_rq
= rq_of_rt_rq(rt_rq
);
1926 if (has_pushable_tasks(rq
)) {
1927 raw_spin_lock(&rq
->lock
);
1929 raw_spin_unlock(&rq
->lock
);
1932 /* Pass the IPI to the next rt overloaded queue */
1933 raw_spin_lock(&rt_rq
->push_lock
);
1935 * If the source queue changed since the IPI went out,
1936 * we need to restart the search from that CPU again.
1938 if (rt_rq
->push_flags
& RT_PUSH_IPI_RESTART
) {
1939 rt_rq
->push_flags
&= ~RT_PUSH_IPI_RESTART
;
1940 rt_rq
->push_cpu
= src_rq
->cpu
;
1943 cpu
= find_next_push_cpu(src_rq
);
1945 if (cpu
>= nr_cpu_ids
)
1946 rt_rq
->push_flags
&= ~RT_PUSH_IPI_EXECUTING
;
1947 raw_spin_unlock(&rt_rq
->push_lock
);
1949 if (cpu
>= nr_cpu_ids
)
1953 * It is possible that a restart caused this CPU to be
1954 * chosen again. Don't bother with an IPI, just see if we
1955 * have more to push.
1957 if (unlikely(cpu
== rq
->cpu
))
1960 /* Try the next RT overloaded CPU */
1961 irq_work_queue_on(&rt_rq
->push_work
, cpu
);
1964 static void push_irq_work_func(struct irq_work
*work
)
1966 struct rt_rq
*rt_rq
= container_of(work
, struct rt_rq
, push_work
);
1968 try_to_push_tasks(rt_rq
);
1970 #endif /* HAVE_RT_PUSH_IPI */
1972 static void pull_rt_task(struct rq
*this_rq
)
1974 int this_cpu
= this_rq
->cpu
, cpu
;
1975 bool resched
= false;
1976 struct task_struct
*p
;
1979 if (likely(!rt_overloaded(this_rq
)))
1983 * Match the barrier from rt_set_overloaded; this guarantees that if we
1984 * see overloaded we must also see the rto_mask bit.
1988 #ifdef HAVE_RT_PUSH_IPI
1989 if (sched_feat(RT_PUSH_IPI
)) {
1990 tell_cpu_to_push(this_rq
);
1995 for_each_cpu(cpu
, this_rq
->rd
->rto_mask
) {
1996 if (this_cpu
== cpu
)
1999 src_rq
= cpu_rq(cpu
);
2002 * Don't bother taking the src_rq->lock if the next highest
2003 * task is known to be lower-priority than our current task.
2004 * This may look racy, but if this value is about to go
2005 * logically higher, the src_rq will push this task away.
2006 * And if its going logically lower, we do not care
2008 if (src_rq
->rt
.highest_prio
.next
>=
2009 this_rq
->rt
.highest_prio
.curr
)
2013 * We can potentially drop this_rq's lock in
2014 * double_lock_balance, and another CPU could
2017 double_lock_balance(this_rq
, src_rq
);
2020 * We can pull only a task, which is pushable
2021 * on its rq, and no others.
2023 p
= pick_highest_pushable_task(src_rq
, this_cpu
);
2026 * Do we have an RT task that preempts
2027 * the to-be-scheduled task?
2029 if (p
&& (p
->prio
< this_rq
->rt
.highest_prio
.curr
)) {
2030 WARN_ON(p
== src_rq
->curr
);
2031 WARN_ON(!task_on_rq_queued(p
));
2034 * There's a chance that p is higher in priority
2035 * than what's currently running on its cpu.
2036 * This is just that p is wakeing up and hasn't
2037 * had a chance to schedule. We only pull
2038 * p if it is lower in priority than the
2039 * current task on the run queue
2041 if (p
->prio
< src_rq
->curr
->prio
)
2046 deactivate_task(src_rq
, p
, 0);
2047 set_task_cpu(p
, this_cpu
);
2048 activate_task(this_rq
, p
, 0);
2050 * We continue with the search, just in
2051 * case there's an even higher prio task
2052 * in another runqueue. (low likelihood
2057 double_unlock_balance(this_rq
, src_rq
);
2061 resched_curr(this_rq
);
2065 * If we are not running and we are not going to reschedule soon, we should
2066 * try to push tasks away now
2068 static void task_woken_rt(struct rq
*rq
, struct task_struct
*p
)
2070 if (!task_running(rq
, p
) &&
2071 !test_tsk_need_resched(rq
->curr
) &&
2072 has_pushable_tasks(rq
) &&
2073 p
->nr_cpus_allowed
> 1 &&
2074 (dl_task(rq
->curr
) || rt_task(rq
->curr
)) &&
2075 (rq
->curr
->nr_cpus_allowed
< 2 ||
2076 rq
->curr
->prio
<= p
->prio
))
2080 static void set_cpus_allowed_rt(struct task_struct
*p
,
2081 const struct cpumask
*new_mask
)
2086 BUG_ON(!rt_task(p
));
2088 if (!task_on_rq_queued(p
))
2091 weight
= cpumask_weight(new_mask
);
2094 * Only update if the process changes its state from whether it
2095 * can migrate or not.
2097 if ((p
->nr_cpus_allowed
> 1) == (weight
> 1))
2103 * The process used to be able to migrate OR it can now migrate
2106 if (!task_current(rq
, p
))
2107 dequeue_pushable_task(rq
, p
);
2108 BUG_ON(!rq
->rt
.rt_nr_migratory
);
2109 rq
->rt
.rt_nr_migratory
--;
2111 if (!task_current(rq
, p
))
2112 enqueue_pushable_task(rq
, p
);
2113 rq
->rt
.rt_nr_migratory
++;
2116 update_rt_migration(&rq
->rt
);
2119 /* Assumes rq->lock is held */
2120 static void rq_online_rt(struct rq
*rq
)
2122 if (rq
->rt
.overloaded
)
2123 rt_set_overload(rq
);
2125 __enable_runtime(rq
);
2127 cpupri_set(&rq
->rd
->cpupri
, rq
->cpu
, rq
->rt
.highest_prio
.curr
);
2130 /* Assumes rq->lock is held */
2131 static void rq_offline_rt(struct rq
*rq
)
2133 if (rq
->rt
.overloaded
)
2134 rt_clear_overload(rq
);
2136 __disable_runtime(rq
);
2138 cpupri_set(&rq
->rd
->cpupri
, rq
->cpu
, CPUPRI_INVALID
);
2142 * When switch from the rt queue, we bring ourselves to a position
2143 * that we might want to pull RT tasks from other runqueues.
2145 static void switched_from_rt(struct rq
*rq
, struct task_struct
*p
)
2148 * If there are other RT tasks then we will reschedule
2149 * and the scheduling of the other RT tasks will handle
2150 * the balancing. But if we are the last RT task
2151 * we may need to handle the pulling of RT tasks
2154 if (!task_on_rq_queued(p
) || rq
->rt
.rt_nr_running
)
2157 queue_pull_task(rq
);
2160 void __init
init_sched_rt_class(void)
2164 for_each_possible_cpu(i
) {
2165 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask
, i
),
2166 GFP_KERNEL
, cpu_to_node(i
));
2169 #endif /* CONFIG_SMP */
2172 * When switching a task to RT, we may overload the runqueue
2173 * with RT tasks. In this case we try to push them off to
2176 static void switched_to_rt(struct rq
*rq
, struct task_struct
*p
)
2179 * If we are already running, then there's nothing
2180 * that needs to be done. But if we are not running
2181 * we may need to preempt the current running task.
2182 * If that current running task is also an RT task
2183 * then see if we can move to another run queue.
2185 if (task_on_rq_queued(p
) && rq
->curr
!= p
) {
2187 if (p
->nr_cpus_allowed
> 1 && rq
->rt
.overloaded
)
2188 queue_push_tasks(rq
);
2190 if (p
->prio
< rq
->curr
->prio
)
2192 #endif /* CONFIG_SMP */
2197 * Priority of the task has changed. This may cause
2198 * us to initiate a push or pull.
2201 prio_changed_rt(struct rq
*rq
, struct task_struct
*p
, int oldprio
)
2203 if (!task_on_rq_queued(p
))
2206 if (rq
->curr
== p
) {
2209 * If our priority decreases while running, we
2210 * may need to pull tasks to this runqueue.
2212 if (oldprio
< p
->prio
)
2213 queue_pull_task(rq
);
2216 * If there's a higher priority task waiting to run
2219 if (p
->prio
> rq
->rt
.highest_prio
.curr
)
2222 /* For UP simply resched on drop of prio */
2223 if (oldprio
< p
->prio
)
2225 #endif /* CONFIG_SMP */
2228 * This task is not running, but if it is
2229 * greater than the current running task
2232 if (p
->prio
< rq
->curr
->prio
)
2237 static void watchdog(struct rq
*rq
, struct task_struct
*p
)
2239 unsigned long soft
, hard
;
2241 /* max may change after cur was read, this will be fixed next tick */
2242 soft
= task_rlimit(p
, RLIMIT_RTTIME
);
2243 hard
= task_rlimit_max(p
, RLIMIT_RTTIME
);
2245 if (soft
!= RLIM_INFINITY
) {
2248 if (p
->rt
.watchdog_stamp
!= jiffies
) {
2250 p
->rt
.watchdog_stamp
= jiffies
;
2253 next
= DIV_ROUND_UP(min(soft
, hard
), USEC_PER_SEC
/HZ
);
2254 if (p
->rt
.timeout
> next
)
2255 p
->cputime_expires
.sched_exp
= p
->se
.sum_exec_runtime
;
2259 static void task_tick_rt(struct rq
*rq
, struct task_struct
*p
, int queued
)
2261 struct sched_rt_entity
*rt_se
= &p
->rt
;
2268 * RR tasks need a special form of timeslice management.
2269 * FIFO tasks have no timeslices.
2271 if (p
->policy
!= SCHED_RR
)
2274 if (--p
->rt
.time_slice
)
2277 p
->rt
.time_slice
= sched_rr_timeslice
;
2280 * Requeue to the end of queue if we (and all of our ancestors) are not
2281 * the only element on the queue
2283 for_each_sched_rt_entity(rt_se
) {
2284 if (rt_se
->run_list
.prev
!= rt_se
->run_list
.next
) {
2285 requeue_task_rt(rq
, p
, 0);
2292 static void set_curr_task_rt(struct rq
*rq
)
2294 struct task_struct
*p
= rq
->curr
;
2296 p
->se
.exec_start
= rq_clock_task(rq
);
2298 /* The running task is never eligible for pushing */
2299 dequeue_pushable_task(rq
, p
);
2302 static unsigned int get_rr_interval_rt(struct rq
*rq
, struct task_struct
*task
)
2305 * Time slice is 0 for SCHED_FIFO tasks
2307 if (task
->policy
== SCHED_RR
)
2308 return sched_rr_timeslice
;
2313 const struct sched_class rt_sched_class
= {
2314 .next
= &fair_sched_class
,
2315 .enqueue_task
= enqueue_task_rt
,
2316 .dequeue_task
= dequeue_task_rt
,
2317 .yield_task
= yield_task_rt
,
2319 .check_preempt_curr
= check_preempt_curr_rt
,
2321 .pick_next_task
= pick_next_task_rt
,
2322 .put_prev_task
= put_prev_task_rt
,
2325 .select_task_rq
= select_task_rq_rt
,
2327 .set_cpus_allowed
= set_cpus_allowed_rt
,
2328 .rq_online
= rq_online_rt
,
2329 .rq_offline
= rq_offline_rt
,
2330 .task_woken
= task_woken_rt
,
2331 .switched_from
= switched_from_rt
,
2334 .set_curr_task
= set_curr_task_rt
,
2335 .task_tick
= task_tick_rt
,
2337 .get_rr_interval
= get_rr_interval_rt
,
2339 .prio_changed
= prio_changed_rt
,
2340 .switched_to
= switched_to_rt
,
2342 .update_curr
= update_curr_rt
,
2345 #ifdef CONFIG_SCHED_DEBUG
2346 extern void print_rt_rq(struct seq_file
*m
, int cpu
, struct rt_rq
*rt_rq
);
2348 void print_rt_stats(struct seq_file
*m
, int cpu
)
2351 struct rt_rq
*rt_rq
;
2354 for_each_rt_rq(rt_rq
, iter
, cpu_rq(cpu
))
2355 print_rt_rq(m
, cpu
, rt_rq
);
2358 #endif /* CONFIG_SCHED_DEBUG */