r8152: fix tx packets accounting
[linux/fpc-iii.git] / kernel / sched / rt.c
blobc7b0d2e7a9aae2b6a663cd61c2a0d8528d6cad54
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
6 #include "sched.h"
8 #include <linux/slab.h>
9 #include <linux/irq_work.h>
11 int sched_rr_timeslice = RR_TIMESLICE;
13 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
15 struct rt_bandwidth def_rt_bandwidth;
17 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
19 struct rt_bandwidth *rt_b =
20 container_of(timer, struct rt_bandwidth, rt_period_timer);
21 int idle = 0;
22 int overrun;
24 raw_spin_lock(&rt_b->rt_runtime_lock);
25 for (;;) {
26 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
27 if (!overrun)
28 break;
30 raw_spin_unlock(&rt_b->rt_runtime_lock);
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 raw_spin_lock(&rt_b->rt_runtime_lock);
34 if (idle)
35 rt_b->rt_period_active = 0;
36 raw_spin_unlock(&rt_b->rt_runtime_lock);
38 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
41 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
43 rt_b->rt_period = ns_to_ktime(period);
44 rt_b->rt_runtime = runtime;
46 raw_spin_lock_init(&rt_b->rt_runtime_lock);
48 hrtimer_init(&rt_b->rt_period_timer,
49 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
50 rt_b->rt_period_timer.function = sched_rt_period_timer;
53 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
55 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
56 return;
58 raw_spin_lock(&rt_b->rt_runtime_lock);
59 if (!rt_b->rt_period_active) {
60 rt_b->rt_period_active = 1;
62 * SCHED_DEADLINE updates the bandwidth, as a run away
63 * RT task with a DL task could hog a CPU. But DL does
64 * not reset the period. If a deadline task was running
65 * without an RT task running, it can cause RT tasks to
66 * throttle when they start up. Kick the timer right away
67 * to update the period.
69 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
70 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
72 raw_spin_unlock(&rt_b->rt_runtime_lock);
75 void init_rt_rq(struct rt_rq *rt_rq)
77 struct rt_prio_array *array;
78 int i;
80 array = &rt_rq->active;
81 for (i = 0; i < MAX_RT_PRIO; i++) {
82 INIT_LIST_HEAD(array->queue + i);
83 __clear_bit(i, array->bitmap);
85 /* delimiter for bitsearch: */
86 __set_bit(MAX_RT_PRIO, array->bitmap);
88 #if defined CONFIG_SMP
89 rt_rq->highest_prio.curr = MAX_RT_PRIO;
90 rt_rq->highest_prio.next = MAX_RT_PRIO;
91 rt_rq->rt_nr_migratory = 0;
92 rt_rq->overloaded = 0;
93 plist_head_init(&rt_rq->pushable_tasks);
94 #endif /* CONFIG_SMP */
95 /* We start is dequeued state, because no RT tasks are queued */
96 rt_rq->rt_queued = 0;
98 rt_rq->rt_time = 0;
99 rt_rq->rt_throttled = 0;
100 rt_rq->rt_runtime = 0;
101 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
104 #ifdef CONFIG_RT_GROUP_SCHED
105 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
107 hrtimer_cancel(&rt_b->rt_period_timer);
110 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
112 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
114 #ifdef CONFIG_SCHED_DEBUG
115 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
116 #endif
117 return container_of(rt_se, struct task_struct, rt);
120 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
122 return rt_rq->rq;
125 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
127 return rt_se->rt_rq;
130 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
132 struct rt_rq *rt_rq = rt_se->rt_rq;
134 return rt_rq->rq;
137 void free_rt_sched_group(struct task_group *tg)
139 int i;
141 if (tg->rt_se)
142 destroy_rt_bandwidth(&tg->rt_bandwidth);
144 for_each_possible_cpu(i) {
145 if (tg->rt_rq)
146 kfree(tg->rt_rq[i]);
147 if (tg->rt_se)
148 kfree(tg->rt_se[i]);
151 kfree(tg->rt_rq);
152 kfree(tg->rt_se);
155 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
156 struct sched_rt_entity *rt_se, int cpu,
157 struct sched_rt_entity *parent)
159 struct rq *rq = cpu_rq(cpu);
161 rt_rq->highest_prio.curr = MAX_RT_PRIO;
162 rt_rq->rt_nr_boosted = 0;
163 rt_rq->rq = rq;
164 rt_rq->tg = tg;
166 tg->rt_rq[cpu] = rt_rq;
167 tg->rt_se[cpu] = rt_se;
169 if (!rt_se)
170 return;
172 if (!parent)
173 rt_se->rt_rq = &rq->rt;
174 else
175 rt_se->rt_rq = parent->my_q;
177 rt_se->my_q = rt_rq;
178 rt_se->parent = parent;
179 INIT_LIST_HEAD(&rt_se->run_list);
182 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
184 struct rt_rq *rt_rq;
185 struct sched_rt_entity *rt_se;
186 int i;
188 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
189 if (!tg->rt_rq)
190 goto err;
191 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
192 if (!tg->rt_se)
193 goto err;
195 init_rt_bandwidth(&tg->rt_bandwidth,
196 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
198 for_each_possible_cpu(i) {
199 rt_rq = kzalloc_node(sizeof(struct rt_rq),
200 GFP_KERNEL, cpu_to_node(i));
201 if (!rt_rq)
202 goto err;
204 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
205 GFP_KERNEL, cpu_to_node(i));
206 if (!rt_se)
207 goto err_free_rq;
209 init_rt_rq(rt_rq);
210 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
211 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
214 return 1;
216 err_free_rq:
217 kfree(rt_rq);
218 err:
219 return 0;
222 #else /* CONFIG_RT_GROUP_SCHED */
224 #define rt_entity_is_task(rt_se) (1)
226 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
228 return container_of(rt_se, struct task_struct, rt);
231 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
233 return container_of(rt_rq, struct rq, rt);
236 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
238 struct task_struct *p = rt_task_of(rt_se);
240 return task_rq(p);
243 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
245 struct rq *rq = rq_of_rt_se(rt_se);
247 return &rq->rt;
250 void free_rt_sched_group(struct task_group *tg) { }
252 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
254 return 1;
256 #endif /* CONFIG_RT_GROUP_SCHED */
258 #ifdef CONFIG_SMP
260 static void pull_rt_task(struct rq *this_rq);
262 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
264 /* Try to pull RT tasks here if we lower this rq's prio */
265 return rq->rt.highest_prio.curr > prev->prio;
268 static inline int rt_overloaded(struct rq *rq)
270 return atomic_read(&rq->rd->rto_count);
273 static inline void rt_set_overload(struct rq *rq)
275 if (!rq->online)
276 return;
278 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
280 * Make sure the mask is visible before we set
281 * the overload count. That is checked to determine
282 * if we should look at the mask. It would be a shame
283 * if we looked at the mask, but the mask was not
284 * updated yet.
286 * Matched by the barrier in pull_rt_task().
288 smp_wmb();
289 atomic_inc(&rq->rd->rto_count);
292 static inline void rt_clear_overload(struct rq *rq)
294 if (!rq->online)
295 return;
297 /* the order here really doesn't matter */
298 atomic_dec(&rq->rd->rto_count);
299 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
302 static void update_rt_migration(struct rt_rq *rt_rq)
304 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
305 if (!rt_rq->overloaded) {
306 rt_set_overload(rq_of_rt_rq(rt_rq));
307 rt_rq->overloaded = 1;
309 } else if (rt_rq->overloaded) {
310 rt_clear_overload(rq_of_rt_rq(rt_rq));
311 rt_rq->overloaded = 0;
315 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
317 struct task_struct *p;
319 if (!rt_entity_is_task(rt_se))
320 return;
322 p = rt_task_of(rt_se);
323 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
325 rt_rq->rt_nr_total++;
326 if (tsk_nr_cpus_allowed(p) > 1)
327 rt_rq->rt_nr_migratory++;
329 update_rt_migration(rt_rq);
332 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
334 struct task_struct *p;
336 if (!rt_entity_is_task(rt_se))
337 return;
339 p = rt_task_of(rt_se);
340 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
342 rt_rq->rt_nr_total--;
343 if (tsk_nr_cpus_allowed(p) > 1)
344 rt_rq->rt_nr_migratory--;
346 update_rt_migration(rt_rq);
349 static inline int has_pushable_tasks(struct rq *rq)
351 return !plist_head_empty(&rq->rt.pushable_tasks);
354 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
355 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
357 static void push_rt_tasks(struct rq *);
358 static void pull_rt_task(struct rq *);
360 static inline void queue_push_tasks(struct rq *rq)
362 if (!has_pushable_tasks(rq))
363 return;
365 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
368 static inline void queue_pull_task(struct rq *rq)
370 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
373 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
375 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
376 plist_node_init(&p->pushable_tasks, p->prio);
377 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
379 /* Update the highest prio pushable task */
380 if (p->prio < rq->rt.highest_prio.next)
381 rq->rt.highest_prio.next = p->prio;
384 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
386 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
388 /* Update the new highest prio pushable task */
389 if (has_pushable_tasks(rq)) {
390 p = plist_first_entry(&rq->rt.pushable_tasks,
391 struct task_struct, pushable_tasks);
392 rq->rt.highest_prio.next = p->prio;
393 } else
394 rq->rt.highest_prio.next = MAX_RT_PRIO;
397 #else
399 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
403 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
407 static inline
408 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
412 static inline
413 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
417 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
419 return false;
422 static inline void pull_rt_task(struct rq *this_rq)
426 static inline void queue_push_tasks(struct rq *rq)
429 #endif /* CONFIG_SMP */
431 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
432 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
434 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
436 return rt_se->on_rq;
439 #ifdef CONFIG_RT_GROUP_SCHED
441 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
443 if (!rt_rq->tg)
444 return RUNTIME_INF;
446 return rt_rq->rt_runtime;
449 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
451 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
454 typedef struct task_group *rt_rq_iter_t;
456 static inline struct task_group *next_task_group(struct task_group *tg)
458 do {
459 tg = list_entry_rcu(tg->list.next,
460 typeof(struct task_group), list);
461 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
463 if (&tg->list == &task_groups)
464 tg = NULL;
466 return tg;
469 #define for_each_rt_rq(rt_rq, iter, rq) \
470 for (iter = container_of(&task_groups, typeof(*iter), list); \
471 (iter = next_task_group(iter)) && \
472 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
474 #define for_each_sched_rt_entity(rt_se) \
475 for (; rt_se; rt_se = rt_se->parent)
477 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
479 return rt_se->my_q;
482 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
483 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
485 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
487 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
488 struct rq *rq = rq_of_rt_rq(rt_rq);
489 struct sched_rt_entity *rt_se;
491 int cpu = cpu_of(rq);
493 rt_se = rt_rq->tg->rt_se[cpu];
495 if (rt_rq->rt_nr_running) {
496 if (!rt_se)
497 enqueue_top_rt_rq(rt_rq);
498 else if (!on_rt_rq(rt_se))
499 enqueue_rt_entity(rt_se, 0);
501 if (rt_rq->highest_prio.curr < curr->prio)
502 resched_curr(rq);
506 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
508 struct sched_rt_entity *rt_se;
509 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
511 rt_se = rt_rq->tg->rt_se[cpu];
513 if (!rt_se)
514 dequeue_top_rt_rq(rt_rq);
515 else if (on_rt_rq(rt_se))
516 dequeue_rt_entity(rt_se, 0);
519 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
521 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
524 static int rt_se_boosted(struct sched_rt_entity *rt_se)
526 struct rt_rq *rt_rq = group_rt_rq(rt_se);
527 struct task_struct *p;
529 if (rt_rq)
530 return !!rt_rq->rt_nr_boosted;
532 p = rt_task_of(rt_se);
533 return p->prio != p->normal_prio;
536 #ifdef CONFIG_SMP
537 static inline const struct cpumask *sched_rt_period_mask(void)
539 return this_rq()->rd->span;
541 #else
542 static inline const struct cpumask *sched_rt_period_mask(void)
544 return cpu_online_mask;
546 #endif
548 static inline
549 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
551 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
554 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
556 return &rt_rq->tg->rt_bandwidth;
559 #else /* !CONFIG_RT_GROUP_SCHED */
561 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
563 return rt_rq->rt_runtime;
566 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
568 return ktime_to_ns(def_rt_bandwidth.rt_period);
571 typedef struct rt_rq *rt_rq_iter_t;
573 #define for_each_rt_rq(rt_rq, iter, rq) \
574 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
576 #define for_each_sched_rt_entity(rt_se) \
577 for (; rt_se; rt_se = NULL)
579 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
581 return NULL;
584 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
586 struct rq *rq = rq_of_rt_rq(rt_rq);
588 if (!rt_rq->rt_nr_running)
589 return;
591 enqueue_top_rt_rq(rt_rq);
592 resched_curr(rq);
595 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
597 dequeue_top_rt_rq(rt_rq);
600 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
602 return rt_rq->rt_throttled;
605 static inline const struct cpumask *sched_rt_period_mask(void)
607 return cpu_online_mask;
610 static inline
611 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
613 return &cpu_rq(cpu)->rt;
616 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
618 return &def_rt_bandwidth;
621 #endif /* CONFIG_RT_GROUP_SCHED */
623 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
625 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
627 return (hrtimer_active(&rt_b->rt_period_timer) ||
628 rt_rq->rt_time < rt_b->rt_runtime);
631 #ifdef CONFIG_SMP
633 * We ran out of runtime, see if we can borrow some from our neighbours.
635 static void do_balance_runtime(struct rt_rq *rt_rq)
637 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
638 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
639 int i, weight;
640 u64 rt_period;
642 weight = cpumask_weight(rd->span);
644 raw_spin_lock(&rt_b->rt_runtime_lock);
645 rt_period = ktime_to_ns(rt_b->rt_period);
646 for_each_cpu(i, rd->span) {
647 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
648 s64 diff;
650 if (iter == rt_rq)
651 continue;
653 raw_spin_lock(&iter->rt_runtime_lock);
655 * Either all rqs have inf runtime and there's nothing to steal
656 * or __disable_runtime() below sets a specific rq to inf to
657 * indicate its been disabled and disalow stealing.
659 if (iter->rt_runtime == RUNTIME_INF)
660 goto next;
663 * From runqueues with spare time, take 1/n part of their
664 * spare time, but no more than our period.
666 diff = iter->rt_runtime - iter->rt_time;
667 if (diff > 0) {
668 diff = div_u64((u64)diff, weight);
669 if (rt_rq->rt_runtime + diff > rt_period)
670 diff = rt_period - rt_rq->rt_runtime;
671 iter->rt_runtime -= diff;
672 rt_rq->rt_runtime += diff;
673 if (rt_rq->rt_runtime == rt_period) {
674 raw_spin_unlock(&iter->rt_runtime_lock);
675 break;
678 next:
679 raw_spin_unlock(&iter->rt_runtime_lock);
681 raw_spin_unlock(&rt_b->rt_runtime_lock);
685 * Ensure this RQ takes back all the runtime it lend to its neighbours.
687 static void __disable_runtime(struct rq *rq)
689 struct root_domain *rd = rq->rd;
690 rt_rq_iter_t iter;
691 struct rt_rq *rt_rq;
693 if (unlikely(!scheduler_running))
694 return;
696 for_each_rt_rq(rt_rq, iter, rq) {
697 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
698 s64 want;
699 int i;
701 raw_spin_lock(&rt_b->rt_runtime_lock);
702 raw_spin_lock(&rt_rq->rt_runtime_lock);
704 * Either we're all inf and nobody needs to borrow, or we're
705 * already disabled and thus have nothing to do, or we have
706 * exactly the right amount of runtime to take out.
708 if (rt_rq->rt_runtime == RUNTIME_INF ||
709 rt_rq->rt_runtime == rt_b->rt_runtime)
710 goto balanced;
711 raw_spin_unlock(&rt_rq->rt_runtime_lock);
714 * Calculate the difference between what we started out with
715 * and what we current have, that's the amount of runtime
716 * we lend and now have to reclaim.
718 want = rt_b->rt_runtime - rt_rq->rt_runtime;
721 * Greedy reclaim, take back as much as we can.
723 for_each_cpu(i, rd->span) {
724 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
725 s64 diff;
728 * Can't reclaim from ourselves or disabled runqueues.
730 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
731 continue;
733 raw_spin_lock(&iter->rt_runtime_lock);
734 if (want > 0) {
735 diff = min_t(s64, iter->rt_runtime, want);
736 iter->rt_runtime -= diff;
737 want -= diff;
738 } else {
739 iter->rt_runtime -= want;
740 want -= want;
742 raw_spin_unlock(&iter->rt_runtime_lock);
744 if (!want)
745 break;
748 raw_spin_lock(&rt_rq->rt_runtime_lock);
750 * We cannot be left wanting - that would mean some runtime
751 * leaked out of the system.
753 BUG_ON(want);
754 balanced:
756 * Disable all the borrow logic by pretending we have inf
757 * runtime - in which case borrowing doesn't make sense.
759 rt_rq->rt_runtime = RUNTIME_INF;
760 rt_rq->rt_throttled = 0;
761 raw_spin_unlock(&rt_rq->rt_runtime_lock);
762 raw_spin_unlock(&rt_b->rt_runtime_lock);
764 /* Make rt_rq available for pick_next_task() */
765 sched_rt_rq_enqueue(rt_rq);
769 static void __enable_runtime(struct rq *rq)
771 rt_rq_iter_t iter;
772 struct rt_rq *rt_rq;
774 if (unlikely(!scheduler_running))
775 return;
778 * Reset each runqueue's bandwidth settings
780 for_each_rt_rq(rt_rq, iter, rq) {
781 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
783 raw_spin_lock(&rt_b->rt_runtime_lock);
784 raw_spin_lock(&rt_rq->rt_runtime_lock);
785 rt_rq->rt_runtime = rt_b->rt_runtime;
786 rt_rq->rt_time = 0;
787 rt_rq->rt_throttled = 0;
788 raw_spin_unlock(&rt_rq->rt_runtime_lock);
789 raw_spin_unlock(&rt_b->rt_runtime_lock);
793 static void balance_runtime(struct rt_rq *rt_rq)
795 if (!sched_feat(RT_RUNTIME_SHARE))
796 return;
798 if (rt_rq->rt_time > rt_rq->rt_runtime) {
799 raw_spin_unlock(&rt_rq->rt_runtime_lock);
800 do_balance_runtime(rt_rq);
801 raw_spin_lock(&rt_rq->rt_runtime_lock);
804 #else /* !CONFIG_SMP */
805 static inline void balance_runtime(struct rt_rq *rt_rq) {}
806 #endif /* CONFIG_SMP */
808 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
810 int i, idle = 1, throttled = 0;
811 const struct cpumask *span;
813 span = sched_rt_period_mask();
814 #ifdef CONFIG_RT_GROUP_SCHED
816 * FIXME: isolated CPUs should really leave the root task group,
817 * whether they are isolcpus or were isolated via cpusets, lest
818 * the timer run on a CPU which does not service all runqueues,
819 * potentially leaving other CPUs indefinitely throttled. If
820 * isolation is really required, the user will turn the throttle
821 * off to kill the perturbations it causes anyway. Meanwhile,
822 * this maintains functionality for boot and/or troubleshooting.
824 if (rt_b == &root_task_group.rt_bandwidth)
825 span = cpu_online_mask;
826 #endif
827 for_each_cpu(i, span) {
828 int enqueue = 0;
829 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
830 struct rq *rq = rq_of_rt_rq(rt_rq);
832 raw_spin_lock(&rq->lock);
833 if (rt_rq->rt_time) {
834 u64 runtime;
836 raw_spin_lock(&rt_rq->rt_runtime_lock);
837 if (rt_rq->rt_throttled)
838 balance_runtime(rt_rq);
839 runtime = rt_rq->rt_runtime;
840 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
841 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
842 rt_rq->rt_throttled = 0;
843 enqueue = 1;
846 * When we're idle and a woken (rt) task is
847 * throttled check_preempt_curr() will set
848 * skip_update and the time between the wakeup
849 * and this unthrottle will get accounted as
850 * 'runtime'.
852 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
853 rq_clock_skip_update(rq, false);
855 if (rt_rq->rt_time || rt_rq->rt_nr_running)
856 idle = 0;
857 raw_spin_unlock(&rt_rq->rt_runtime_lock);
858 } else if (rt_rq->rt_nr_running) {
859 idle = 0;
860 if (!rt_rq_throttled(rt_rq))
861 enqueue = 1;
863 if (rt_rq->rt_throttled)
864 throttled = 1;
866 if (enqueue)
867 sched_rt_rq_enqueue(rt_rq);
868 raw_spin_unlock(&rq->lock);
871 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
872 return 1;
874 return idle;
877 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
879 #ifdef CONFIG_RT_GROUP_SCHED
880 struct rt_rq *rt_rq = group_rt_rq(rt_se);
882 if (rt_rq)
883 return rt_rq->highest_prio.curr;
884 #endif
886 return rt_task_of(rt_se)->prio;
889 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
891 u64 runtime = sched_rt_runtime(rt_rq);
893 if (rt_rq->rt_throttled)
894 return rt_rq_throttled(rt_rq);
896 if (runtime >= sched_rt_period(rt_rq))
897 return 0;
899 balance_runtime(rt_rq);
900 runtime = sched_rt_runtime(rt_rq);
901 if (runtime == RUNTIME_INF)
902 return 0;
904 if (rt_rq->rt_time > runtime) {
905 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
908 * Don't actually throttle groups that have no runtime assigned
909 * but accrue some time due to boosting.
911 if (likely(rt_b->rt_runtime)) {
912 rt_rq->rt_throttled = 1;
913 printk_deferred_once("sched: RT throttling activated\n");
914 } else {
916 * In case we did anyway, make it go away,
917 * replenishment is a joke, since it will replenish us
918 * with exactly 0 ns.
920 rt_rq->rt_time = 0;
923 if (rt_rq_throttled(rt_rq)) {
924 sched_rt_rq_dequeue(rt_rq);
925 return 1;
929 return 0;
933 * Update the current task's runtime statistics. Skip current tasks that
934 * are not in our scheduling class.
936 static void update_curr_rt(struct rq *rq)
938 struct task_struct *curr = rq->curr;
939 struct sched_rt_entity *rt_se = &curr->rt;
940 u64 delta_exec;
942 if (curr->sched_class != &rt_sched_class)
943 return;
945 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
946 if (unlikely((s64)delta_exec <= 0))
947 return;
949 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
950 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
952 schedstat_set(curr->se.statistics.exec_max,
953 max(curr->se.statistics.exec_max, delta_exec));
955 curr->se.sum_exec_runtime += delta_exec;
956 account_group_exec_runtime(curr, delta_exec);
958 curr->se.exec_start = rq_clock_task(rq);
959 cpuacct_charge(curr, delta_exec);
961 sched_rt_avg_update(rq, delta_exec);
963 if (!rt_bandwidth_enabled())
964 return;
966 for_each_sched_rt_entity(rt_se) {
967 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
969 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
970 raw_spin_lock(&rt_rq->rt_runtime_lock);
971 rt_rq->rt_time += delta_exec;
972 if (sched_rt_runtime_exceeded(rt_rq))
973 resched_curr(rq);
974 raw_spin_unlock(&rt_rq->rt_runtime_lock);
979 static void
980 dequeue_top_rt_rq(struct rt_rq *rt_rq)
982 struct rq *rq = rq_of_rt_rq(rt_rq);
984 BUG_ON(&rq->rt != rt_rq);
986 if (!rt_rq->rt_queued)
987 return;
989 BUG_ON(!rq->nr_running);
991 sub_nr_running(rq, rt_rq->rt_nr_running);
992 rt_rq->rt_queued = 0;
995 static void
996 enqueue_top_rt_rq(struct rt_rq *rt_rq)
998 struct rq *rq = rq_of_rt_rq(rt_rq);
1000 BUG_ON(&rq->rt != rt_rq);
1002 if (rt_rq->rt_queued)
1003 return;
1004 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1005 return;
1007 add_nr_running(rq, rt_rq->rt_nr_running);
1008 rt_rq->rt_queued = 1;
1011 #if defined CONFIG_SMP
1013 static void
1014 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1016 struct rq *rq = rq_of_rt_rq(rt_rq);
1018 #ifdef CONFIG_RT_GROUP_SCHED
1020 * Change rq's cpupri only if rt_rq is the top queue.
1022 if (&rq->rt != rt_rq)
1023 return;
1024 #endif
1025 if (rq->online && prio < prev_prio)
1026 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1029 static void
1030 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1032 struct rq *rq = rq_of_rt_rq(rt_rq);
1034 #ifdef CONFIG_RT_GROUP_SCHED
1036 * Change rq's cpupri only if rt_rq is the top queue.
1038 if (&rq->rt != rt_rq)
1039 return;
1040 #endif
1041 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1042 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1045 #else /* CONFIG_SMP */
1047 static inline
1048 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1049 static inline
1050 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1052 #endif /* CONFIG_SMP */
1054 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1055 static void
1056 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1058 int prev_prio = rt_rq->highest_prio.curr;
1060 if (prio < prev_prio)
1061 rt_rq->highest_prio.curr = prio;
1063 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1066 static void
1067 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1069 int prev_prio = rt_rq->highest_prio.curr;
1071 if (rt_rq->rt_nr_running) {
1073 WARN_ON(prio < prev_prio);
1076 * This may have been our highest task, and therefore
1077 * we may have some recomputation to do
1079 if (prio == prev_prio) {
1080 struct rt_prio_array *array = &rt_rq->active;
1082 rt_rq->highest_prio.curr =
1083 sched_find_first_bit(array->bitmap);
1086 } else
1087 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1089 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1092 #else
1094 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1095 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1097 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1099 #ifdef CONFIG_RT_GROUP_SCHED
1101 static void
1102 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1104 if (rt_se_boosted(rt_se))
1105 rt_rq->rt_nr_boosted++;
1107 if (rt_rq->tg)
1108 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1111 static void
1112 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1114 if (rt_se_boosted(rt_se))
1115 rt_rq->rt_nr_boosted--;
1117 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1120 #else /* CONFIG_RT_GROUP_SCHED */
1122 static void
1123 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1125 start_rt_bandwidth(&def_rt_bandwidth);
1128 static inline
1129 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1131 #endif /* CONFIG_RT_GROUP_SCHED */
1133 static inline
1134 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1136 struct rt_rq *group_rq = group_rt_rq(rt_se);
1138 if (group_rq)
1139 return group_rq->rt_nr_running;
1140 else
1141 return 1;
1144 static inline
1145 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1147 struct rt_rq *group_rq = group_rt_rq(rt_se);
1148 struct task_struct *tsk;
1150 if (group_rq)
1151 return group_rq->rr_nr_running;
1153 tsk = rt_task_of(rt_se);
1155 return (tsk->policy == SCHED_RR) ? 1 : 0;
1158 static inline
1159 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1161 int prio = rt_se_prio(rt_se);
1163 WARN_ON(!rt_prio(prio));
1164 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1165 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1167 inc_rt_prio(rt_rq, prio);
1168 inc_rt_migration(rt_se, rt_rq);
1169 inc_rt_group(rt_se, rt_rq);
1172 static inline
1173 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1175 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1176 WARN_ON(!rt_rq->rt_nr_running);
1177 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1178 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1180 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1181 dec_rt_migration(rt_se, rt_rq);
1182 dec_rt_group(rt_se, rt_rq);
1186 * Change rt_se->run_list location unless SAVE && !MOVE
1188 * assumes ENQUEUE/DEQUEUE flags match
1190 static inline bool move_entity(unsigned int flags)
1192 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1193 return false;
1195 return true;
1198 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1200 list_del_init(&rt_se->run_list);
1202 if (list_empty(array->queue + rt_se_prio(rt_se)))
1203 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1205 rt_se->on_list = 0;
1208 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1210 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1211 struct rt_prio_array *array = &rt_rq->active;
1212 struct rt_rq *group_rq = group_rt_rq(rt_se);
1213 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1216 * Don't enqueue the group if its throttled, or when empty.
1217 * The latter is a consequence of the former when a child group
1218 * get throttled and the current group doesn't have any other
1219 * active members.
1221 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1222 if (rt_se->on_list)
1223 __delist_rt_entity(rt_se, array);
1224 return;
1227 if (move_entity(flags)) {
1228 WARN_ON_ONCE(rt_se->on_list);
1229 if (flags & ENQUEUE_HEAD)
1230 list_add(&rt_se->run_list, queue);
1231 else
1232 list_add_tail(&rt_se->run_list, queue);
1234 __set_bit(rt_se_prio(rt_se), array->bitmap);
1235 rt_se->on_list = 1;
1237 rt_se->on_rq = 1;
1239 inc_rt_tasks(rt_se, rt_rq);
1242 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1244 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1245 struct rt_prio_array *array = &rt_rq->active;
1247 if (move_entity(flags)) {
1248 WARN_ON_ONCE(!rt_se->on_list);
1249 __delist_rt_entity(rt_se, array);
1251 rt_se->on_rq = 0;
1253 dec_rt_tasks(rt_se, rt_rq);
1257 * Because the prio of an upper entry depends on the lower
1258 * entries, we must remove entries top - down.
1260 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1262 struct sched_rt_entity *back = NULL;
1264 for_each_sched_rt_entity(rt_se) {
1265 rt_se->back = back;
1266 back = rt_se;
1269 dequeue_top_rt_rq(rt_rq_of_se(back));
1271 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1272 if (on_rt_rq(rt_se))
1273 __dequeue_rt_entity(rt_se, flags);
1277 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1279 struct rq *rq = rq_of_rt_se(rt_se);
1281 dequeue_rt_stack(rt_se, flags);
1282 for_each_sched_rt_entity(rt_se)
1283 __enqueue_rt_entity(rt_se, flags);
1284 enqueue_top_rt_rq(&rq->rt);
1287 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1289 struct rq *rq = rq_of_rt_se(rt_se);
1291 dequeue_rt_stack(rt_se, flags);
1293 for_each_sched_rt_entity(rt_se) {
1294 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1296 if (rt_rq && rt_rq->rt_nr_running)
1297 __enqueue_rt_entity(rt_se, flags);
1299 enqueue_top_rt_rq(&rq->rt);
1303 * Adding/removing a task to/from a priority array:
1305 static void
1306 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1308 struct sched_rt_entity *rt_se = &p->rt;
1310 if (flags & ENQUEUE_WAKEUP)
1311 rt_se->timeout = 0;
1313 enqueue_rt_entity(rt_se, flags);
1315 if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
1316 enqueue_pushable_task(rq, p);
1319 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1321 struct sched_rt_entity *rt_se = &p->rt;
1323 update_curr_rt(rq);
1324 dequeue_rt_entity(rt_se, flags);
1326 dequeue_pushable_task(rq, p);
1330 * Put task to the head or the end of the run list without the overhead of
1331 * dequeue followed by enqueue.
1333 static void
1334 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1336 if (on_rt_rq(rt_se)) {
1337 struct rt_prio_array *array = &rt_rq->active;
1338 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1340 if (head)
1341 list_move(&rt_se->run_list, queue);
1342 else
1343 list_move_tail(&rt_se->run_list, queue);
1347 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1349 struct sched_rt_entity *rt_se = &p->rt;
1350 struct rt_rq *rt_rq;
1352 for_each_sched_rt_entity(rt_se) {
1353 rt_rq = rt_rq_of_se(rt_se);
1354 requeue_rt_entity(rt_rq, rt_se, head);
1358 static void yield_task_rt(struct rq *rq)
1360 requeue_task_rt(rq, rq->curr, 0);
1363 #ifdef CONFIG_SMP
1364 static int find_lowest_rq(struct task_struct *task);
1366 static int
1367 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1369 struct task_struct *curr;
1370 struct rq *rq;
1372 /* For anything but wake ups, just return the task_cpu */
1373 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1374 goto out;
1376 rq = cpu_rq(cpu);
1378 rcu_read_lock();
1379 curr = READ_ONCE(rq->curr); /* unlocked access */
1382 * If the current task on @p's runqueue is an RT task, then
1383 * try to see if we can wake this RT task up on another
1384 * runqueue. Otherwise simply start this RT task
1385 * on its current runqueue.
1387 * We want to avoid overloading runqueues. If the woken
1388 * task is a higher priority, then it will stay on this CPU
1389 * and the lower prio task should be moved to another CPU.
1390 * Even though this will probably make the lower prio task
1391 * lose its cache, we do not want to bounce a higher task
1392 * around just because it gave up its CPU, perhaps for a
1393 * lock?
1395 * For equal prio tasks, we just let the scheduler sort it out.
1397 * Otherwise, just let it ride on the affined RQ and the
1398 * post-schedule router will push the preempted task away
1400 * This test is optimistic, if we get it wrong the load-balancer
1401 * will have to sort it out.
1403 if (curr && unlikely(rt_task(curr)) &&
1404 (tsk_nr_cpus_allowed(curr) < 2 ||
1405 curr->prio <= p->prio)) {
1406 int target = find_lowest_rq(p);
1409 * Don't bother moving it if the destination CPU is
1410 * not running a lower priority task.
1412 if (target != -1 &&
1413 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1414 cpu = target;
1416 rcu_read_unlock();
1418 out:
1419 return cpu;
1422 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1425 * Current can't be migrated, useless to reschedule,
1426 * let's hope p can move out.
1428 if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
1429 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1430 return;
1433 * p is migratable, so let's not schedule it and
1434 * see if it is pushed or pulled somewhere else.
1436 if (tsk_nr_cpus_allowed(p) != 1
1437 && cpupri_find(&rq->rd->cpupri, p, NULL))
1438 return;
1441 * There appears to be other cpus that can accept
1442 * current and none to run 'p', so lets reschedule
1443 * to try and push current away:
1445 requeue_task_rt(rq, p, 1);
1446 resched_curr(rq);
1449 #endif /* CONFIG_SMP */
1452 * Preempt the current task with a newly woken task if needed:
1454 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1456 if (p->prio < rq->curr->prio) {
1457 resched_curr(rq);
1458 return;
1461 #ifdef CONFIG_SMP
1463 * If:
1465 * - the newly woken task is of equal priority to the current task
1466 * - the newly woken task is non-migratable while current is migratable
1467 * - current will be preempted on the next reschedule
1469 * we should check to see if current can readily move to a different
1470 * cpu. If so, we will reschedule to allow the push logic to try
1471 * to move current somewhere else, making room for our non-migratable
1472 * task.
1474 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1475 check_preempt_equal_prio(rq, p);
1476 #endif
1479 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1480 struct rt_rq *rt_rq)
1482 struct rt_prio_array *array = &rt_rq->active;
1483 struct sched_rt_entity *next = NULL;
1484 struct list_head *queue;
1485 int idx;
1487 idx = sched_find_first_bit(array->bitmap);
1488 BUG_ON(idx >= MAX_RT_PRIO);
1490 queue = array->queue + idx;
1491 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1493 return next;
1496 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1498 struct sched_rt_entity *rt_se;
1499 struct task_struct *p;
1500 struct rt_rq *rt_rq = &rq->rt;
1502 do {
1503 rt_se = pick_next_rt_entity(rq, rt_rq);
1504 BUG_ON(!rt_se);
1505 rt_rq = group_rt_rq(rt_se);
1506 } while (rt_rq);
1508 p = rt_task_of(rt_se);
1509 p->se.exec_start = rq_clock_task(rq);
1511 return p;
1514 static struct task_struct *
1515 pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1517 struct task_struct *p;
1518 struct rt_rq *rt_rq = &rq->rt;
1520 if (need_pull_rt_task(rq, prev)) {
1522 * This is OK, because current is on_cpu, which avoids it being
1523 * picked for load-balance and preemption/IRQs are still
1524 * disabled avoiding further scheduler activity on it and we're
1525 * being very careful to re-start the picking loop.
1527 lockdep_unpin_lock(&rq->lock, cookie);
1528 pull_rt_task(rq);
1529 lockdep_repin_lock(&rq->lock, cookie);
1531 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1532 * means a dl or stop task can slip in, in which case we need
1533 * to re-start task selection.
1535 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1536 rq->dl.dl_nr_running))
1537 return RETRY_TASK;
1541 * We may dequeue prev's rt_rq in put_prev_task().
1542 * So, we update time before rt_nr_running check.
1544 if (prev->sched_class == &rt_sched_class)
1545 update_curr_rt(rq);
1547 if (!rt_rq->rt_queued)
1548 return NULL;
1550 put_prev_task(rq, prev);
1552 p = _pick_next_task_rt(rq);
1554 /* The running task is never eligible for pushing */
1555 dequeue_pushable_task(rq, p);
1557 queue_push_tasks(rq);
1559 return p;
1562 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1564 update_curr_rt(rq);
1567 * The previous task needs to be made eligible for pushing
1568 * if it is still active
1570 if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
1571 enqueue_pushable_task(rq, p);
1574 #ifdef CONFIG_SMP
1576 /* Only try algorithms three times */
1577 #define RT_MAX_TRIES 3
1579 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1581 if (!task_running(rq, p) &&
1582 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1583 return 1;
1584 return 0;
1588 * Return the highest pushable rq's task, which is suitable to be executed
1589 * on the cpu, NULL otherwise
1591 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1593 struct plist_head *head = &rq->rt.pushable_tasks;
1594 struct task_struct *p;
1596 if (!has_pushable_tasks(rq))
1597 return NULL;
1599 plist_for_each_entry(p, head, pushable_tasks) {
1600 if (pick_rt_task(rq, p, cpu))
1601 return p;
1604 return NULL;
1607 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1609 static int find_lowest_rq(struct task_struct *task)
1611 struct sched_domain *sd;
1612 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1613 int this_cpu = smp_processor_id();
1614 int cpu = task_cpu(task);
1616 /* Make sure the mask is initialized first */
1617 if (unlikely(!lowest_mask))
1618 return -1;
1620 if (tsk_nr_cpus_allowed(task) == 1)
1621 return -1; /* No other targets possible */
1623 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1624 return -1; /* No targets found */
1627 * At this point we have built a mask of cpus representing the
1628 * lowest priority tasks in the system. Now we want to elect
1629 * the best one based on our affinity and topology.
1631 * We prioritize the last cpu that the task executed on since
1632 * it is most likely cache-hot in that location.
1634 if (cpumask_test_cpu(cpu, lowest_mask))
1635 return cpu;
1638 * Otherwise, we consult the sched_domains span maps to figure
1639 * out which cpu is logically closest to our hot cache data.
1641 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1642 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1644 rcu_read_lock();
1645 for_each_domain(cpu, sd) {
1646 if (sd->flags & SD_WAKE_AFFINE) {
1647 int best_cpu;
1650 * "this_cpu" is cheaper to preempt than a
1651 * remote processor.
1653 if (this_cpu != -1 &&
1654 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1655 rcu_read_unlock();
1656 return this_cpu;
1659 best_cpu = cpumask_first_and(lowest_mask,
1660 sched_domain_span(sd));
1661 if (best_cpu < nr_cpu_ids) {
1662 rcu_read_unlock();
1663 return best_cpu;
1667 rcu_read_unlock();
1670 * And finally, if there were no matches within the domains
1671 * just give the caller *something* to work with from the compatible
1672 * locations.
1674 if (this_cpu != -1)
1675 return this_cpu;
1677 cpu = cpumask_any(lowest_mask);
1678 if (cpu < nr_cpu_ids)
1679 return cpu;
1680 return -1;
1683 /* Will lock the rq it finds */
1684 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1686 struct rq *lowest_rq = NULL;
1687 int tries;
1688 int cpu;
1690 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1691 cpu = find_lowest_rq(task);
1693 if ((cpu == -1) || (cpu == rq->cpu))
1694 break;
1696 lowest_rq = cpu_rq(cpu);
1698 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1700 * Target rq has tasks of equal or higher priority,
1701 * retrying does not release any lock and is unlikely
1702 * to yield a different result.
1704 lowest_rq = NULL;
1705 break;
1708 /* if the prio of this runqueue changed, try again */
1709 if (double_lock_balance(rq, lowest_rq)) {
1711 * We had to unlock the run queue. In
1712 * the mean time, task could have
1713 * migrated already or had its affinity changed.
1714 * Also make sure that it wasn't scheduled on its rq.
1716 if (unlikely(task_rq(task) != rq ||
1717 !cpumask_test_cpu(lowest_rq->cpu,
1718 tsk_cpus_allowed(task)) ||
1719 task_running(rq, task) ||
1720 !rt_task(task) ||
1721 !task_on_rq_queued(task))) {
1723 double_unlock_balance(rq, lowest_rq);
1724 lowest_rq = NULL;
1725 break;
1729 /* If this rq is still suitable use it. */
1730 if (lowest_rq->rt.highest_prio.curr > task->prio)
1731 break;
1733 /* try again */
1734 double_unlock_balance(rq, lowest_rq);
1735 lowest_rq = NULL;
1738 return lowest_rq;
1741 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1743 struct task_struct *p;
1745 if (!has_pushable_tasks(rq))
1746 return NULL;
1748 p = plist_first_entry(&rq->rt.pushable_tasks,
1749 struct task_struct, pushable_tasks);
1751 BUG_ON(rq->cpu != task_cpu(p));
1752 BUG_ON(task_current(rq, p));
1753 BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
1755 BUG_ON(!task_on_rq_queued(p));
1756 BUG_ON(!rt_task(p));
1758 return p;
1762 * If the current CPU has more than one RT task, see if the non
1763 * running task can migrate over to a CPU that is running a task
1764 * of lesser priority.
1766 static int push_rt_task(struct rq *rq)
1768 struct task_struct *next_task;
1769 struct rq *lowest_rq;
1770 int ret = 0;
1772 if (!rq->rt.overloaded)
1773 return 0;
1775 next_task = pick_next_pushable_task(rq);
1776 if (!next_task)
1777 return 0;
1779 retry:
1780 if (unlikely(next_task == rq->curr)) {
1781 WARN_ON(1);
1782 return 0;
1786 * It's possible that the next_task slipped in of
1787 * higher priority than current. If that's the case
1788 * just reschedule current.
1790 if (unlikely(next_task->prio < rq->curr->prio)) {
1791 resched_curr(rq);
1792 return 0;
1795 /* We might release rq lock */
1796 get_task_struct(next_task);
1798 /* find_lock_lowest_rq locks the rq if found */
1799 lowest_rq = find_lock_lowest_rq(next_task, rq);
1800 if (!lowest_rq) {
1801 struct task_struct *task;
1803 * find_lock_lowest_rq releases rq->lock
1804 * so it is possible that next_task has migrated.
1806 * We need to make sure that the task is still on the same
1807 * run-queue and is also still the next task eligible for
1808 * pushing.
1810 task = pick_next_pushable_task(rq);
1811 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1813 * The task hasn't migrated, and is still the next
1814 * eligible task, but we failed to find a run-queue
1815 * to push it to. Do not retry in this case, since
1816 * other cpus will pull from us when ready.
1818 goto out;
1821 if (!task)
1822 /* No more tasks, just exit */
1823 goto out;
1826 * Something has shifted, try again.
1828 put_task_struct(next_task);
1829 next_task = task;
1830 goto retry;
1833 deactivate_task(rq, next_task, 0);
1834 set_task_cpu(next_task, lowest_rq->cpu);
1835 activate_task(lowest_rq, next_task, 0);
1836 ret = 1;
1838 resched_curr(lowest_rq);
1840 double_unlock_balance(rq, lowest_rq);
1842 out:
1843 put_task_struct(next_task);
1845 return ret;
1848 static void push_rt_tasks(struct rq *rq)
1850 /* push_rt_task will return true if it moved an RT */
1851 while (push_rt_task(rq))
1855 #ifdef HAVE_RT_PUSH_IPI
1858 * When a high priority task schedules out from a CPU and a lower priority
1859 * task is scheduled in, a check is made to see if there's any RT tasks
1860 * on other CPUs that are waiting to run because a higher priority RT task
1861 * is currently running on its CPU. In this case, the CPU with multiple RT
1862 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1863 * up that may be able to run one of its non-running queued RT tasks.
1865 * All CPUs with overloaded RT tasks need to be notified as there is currently
1866 * no way to know which of these CPUs have the highest priority task waiting
1867 * to run. Instead of trying to take a spinlock on each of these CPUs,
1868 * which has shown to cause large latency when done on machines with many
1869 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1870 * RT tasks waiting to run.
1872 * Just sending an IPI to each of the CPUs is also an issue, as on large
1873 * count CPU machines, this can cause an IPI storm on a CPU, especially
1874 * if its the only CPU with multiple RT tasks queued, and a large number
1875 * of CPUs scheduling a lower priority task at the same time.
1877 * Each root domain has its own irq work function that can iterate over
1878 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1879 * tassk must be checked if there's one or many CPUs that are lowering
1880 * their priority, there's a single irq work iterator that will try to
1881 * push off RT tasks that are waiting to run.
1883 * When a CPU schedules a lower priority task, it will kick off the
1884 * irq work iterator that will jump to each CPU with overloaded RT tasks.
1885 * As it only takes the first CPU that schedules a lower priority task
1886 * to start the process, the rto_start variable is incremented and if
1887 * the atomic result is one, then that CPU will try to take the rto_lock.
1888 * This prevents high contention on the lock as the process handles all
1889 * CPUs scheduling lower priority tasks.
1891 * All CPUs that are scheduling a lower priority task will increment the
1892 * rt_loop_next variable. This will make sure that the irq work iterator
1893 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1894 * priority task, even if the iterator is in the middle of a scan. Incrementing
1895 * the rt_loop_next will cause the iterator to perform another scan.
1898 static int rto_next_cpu(struct root_domain *rd)
1900 int next;
1901 int cpu;
1904 * When starting the IPI RT pushing, the rto_cpu is set to -1,
1905 * rt_next_cpu() will simply return the first CPU found in
1906 * the rto_mask.
1908 * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
1909 * will return the next CPU found in the rto_mask.
1911 * If there are no more CPUs left in the rto_mask, then a check is made
1912 * against rto_loop and rto_loop_next. rto_loop is only updated with
1913 * the rto_lock held, but any CPU may increment the rto_loop_next
1914 * without any locking.
1916 for (;;) {
1918 /* When rto_cpu is -1 this acts like cpumask_first() */
1919 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
1921 rd->rto_cpu = cpu;
1923 if (cpu < nr_cpu_ids)
1924 return cpu;
1926 rd->rto_cpu = -1;
1929 * ACQUIRE ensures we see the @rto_mask changes
1930 * made prior to the @next value observed.
1932 * Matches WMB in rt_set_overload().
1934 next = atomic_read_acquire(&rd->rto_loop_next);
1936 if (rd->rto_loop == next)
1937 break;
1939 rd->rto_loop = next;
1942 return -1;
1945 static inline bool rto_start_trylock(atomic_t *v)
1947 return !atomic_cmpxchg_acquire(v, 0, 1);
1950 static inline void rto_start_unlock(atomic_t *v)
1952 atomic_set_release(v, 0);
1955 static void tell_cpu_to_push(struct rq *rq)
1957 int cpu = -1;
1959 /* Keep the loop going if the IPI is currently active */
1960 atomic_inc(&rq->rd->rto_loop_next);
1962 /* Only one CPU can initiate a loop at a time */
1963 if (!rto_start_trylock(&rq->rd->rto_loop_start))
1964 return;
1966 raw_spin_lock(&rq->rd->rto_lock);
1969 * The rto_cpu is updated under the lock, if it has a valid cpu
1970 * then the IPI is still running and will continue due to the
1971 * update to loop_next, and nothing needs to be done here.
1972 * Otherwise it is finishing up and an ipi needs to be sent.
1974 if (rq->rd->rto_cpu < 0)
1975 cpu = rto_next_cpu(rq->rd);
1977 raw_spin_unlock(&rq->rd->rto_lock);
1979 rto_start_unlock(&rq->rd->rto_loop_start);
1981 if (cpu >= 0) {
1982 /* Make sure the rd does not get freed while pushing */
1983 sched_get_rd(rq->rd);
1984 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
1988 /* Called from hardirq context */
1989 void rto_push_irq_work_func(struct irq_work *work)
1991 struct root_domain *rd =
1992 container_of(work, struct root_domain, rto_push_work);
1993 struct rq *rq;
1994 int cpu;
1996 rq = this_rq();
1999 * We do not need to grab the lock to check for has_pushable_tasks.
2000 * When it gets updated, a check is made if a push is possible.
2002 if (has_pushable_tasks(rq)) {
2003 raw_spin_lock(&rq->lock);
2004 push_rt_tasks(rq);
2005 raw_spin_unlock(&rq->lock);
2008 raw_spin_lock(&rd->rto_lock);
2010 /* Pass the IPI to the next rt overloaded queue */
2011 cpu = rto_next_cpu(rd);
2013 raw_spin_unlock(&rd->rto_lock);
2015 if (cpu < 0) {
2016 sched_put_rd(rd);
2017 return;
2020 /* Try the next RT overloaded CPU */
2021 irq_work_queue_on(&rd->rto_push_work, cpu);
2023 #endif /* HAVE_RT_PUSH_IPI */
2025 static void pull_rt_task(struct rq *this_rq)
2027 int this_cpu = this_rq->cpu, cpu;
2028 bool resched = false;
2029 struct task_struct *p;
2030 struct rq *src_rq;
2031 int rt_overload_count = rt_overloaded(this_rq);
2033 if (likely(!rt_overload_count))
2034 return;
2037 * Match the barrier from rt_set_overloaded; this guarantees that if we
2038 * see overloaded we must also see the rto_mask bit.
2040 smp_rmb();
2042 /* If we are the only overloaded CPU do nothing */
2043 if (rt_overload_count == 1 &&
2044 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2045 return;
2047 #ifdef HAVE_RT_PUSH_IPI
2048 if (sched_feat(RT_PUSH_IPI)) {
2049 tell_cpu_to_push(this_rq);
2050 return;
2052 #endif
2054 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2055 if (this_cpu == cpu)
2056 continue;
2058 src_rq = cpu_rq(cpu);
2061 * Don't bother taking the src_rq->lock if the next highest
2062 * task is known to be lower-priority than our current task.
2063 * This may look racy, but if this value is about to go
2064 * logically higher, the src_rq will push this task away.
2065 * And if its going logically lower, we do not care
2067 if (src_rq->rt.highest_prio.next >=
2068 this_rq->rt.highest_prio.curr)
2069 continue;
2072 * We can potentially drop this_rq's lock in
2073 * double_lock_balance, and another CPU could
2074 * alter this_rq
2076 double_lock_balance(this_rq, src_rq);
2079 * We can pull only a task, which is pushable
2080 * on its rq, and no others.
2082 p = pick_highest_pushable_task(src_rq, this_cpu);
2085 * Do we have an RT task that preempts
2086 * the to-be-scheduled task?
2088 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2089 WARN_ON(p == src_rq->curr);
2090 WARN_ON(!task_on_rq_queued(p));
2093 * There's a chance that p is higher in priority
2094 * than what's currently running on its cpu.
2095 * This is just that p is wakeing up and hasn't
2096 * had a chance to schedule. We only pull
2097 * p if it is lower in priority than the
2098 * current task on the run queue
2100 if (p->prio < src_rq->curr->prio)
2101 goto skip;
2103 resched = true;
2105 deactivate_task(src_rq, p, 0);
2106 set_task_cpu(p, this_cpu);
2107 activate_task(this_rq, p, 0);
2109 * We continue with the search, just in
2110 * case there's an even higher prio task
2111 * in another runqueue. (low likelihood
2112 * but possible)
2115 skip:
2116 double_unlock_balance(this_rq, src_rq);
2119 if (resched)
2120 resched_curr(this_rq);
2124 * If we are not running and we are not going to reschedule soon, we should
2125 * try to push tasks away now
2127 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2129 if (!task_running(rq, p) &&
2130 !test_tsk_need_resched(rq->curr) &&
2131 tsk_nr_cpus_allowed(p) > 1 &&
2132 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2133 (tsk_nr_cpus_allowed(rq->curr) < 2 ||
2134 rq->curr->prio <= p->prio))
2135 push_rt_tasks(rq);
2138 /* Assumes rq->lock is held */
2139 static void rq_online_rt(struct rq *rq)
2141 if (rq->rt.overloaded)
2142 rt_set_overload(rq);
2144 __enable_runtime(rq);
2146 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2149 /* Assumes rq->lock is held */
2150 static void rq_offline_rt(struct rq *rq)
2152 if (rq->rt.overloaded)
2153 rt_clear_overload(rq);
2155 __disable_runtime(rq);
2157 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2161 * When switch from the rt queue, we bring ourselves to a position
2162 * that we might want to pull RT tasks from other runqueues.
2164 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2167 * If there are other RT tasks then we will reschedule
2168 * and the scheduling of the other RT tasks will handle
2169 * the balancing. But if we are the last RT task
2170 * we may need to handle the pulling of RT tasks
2171 * now.
2173 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2174 return;
2176 queue_pull_task(rq);
2179 void __init init_sched_rt_class(void)
2181 unsigned int i;
2183 for_each_possible_cpu(i) {
2184 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2185 GFP_KERNEL, cpu_to_node(i));
2188 #endif /* CONFIG_SMP */
2191 * When switching a task to RT, we may overload the runqueue
2192 * with RT tasks. In this case we try to push them off to
2193 * other runqueues.
2195 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2198 * If we are already running, then there's nothing
2199 * that needs to be done. But if we are not running
2200 * we may need to preempt the current running task.
2201 * If that current running task is also an RT task
2202 * then see if we can move to another run queue.
2204 if (task_on_rq_queued(p) && rq->curr != p) {
2205 #ifdef CONFIG_SMP
2206 if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
2207 queue_push_tasks(rq);
2208 #endif /* CONFIG_SMP */
2209 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2210 resched_curr(rq);
2215 * Priority of the task has changed. This may cause
2216 * us to initiate a push or pull.
2218 static void
2219 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2221 if (!task_on_rq_queued(p))
2222 return;
2224 if (rq->curr == p) {
2225 #ifdef CONFIG_SMP
2227 * If our priority decreases while running, we
2228 * may need to pull tasks to this runqueue.
2230 if (oldprio < p->prio)
2231 queue_pull_task(rq);
2234 * If there's a higher priority task waiting to run
2235 * then reschedule.
2237 if (p->prio > rq->rt.highest_prio.curr)
2238 resched_curr(rq);
2239 #else
2240 /* For UP simply resched on drop of prio */
2241 if (oldprio < p->prio)
2242 resched_curr(rq);
2243 #endif /* CONFIG_SMP */
2244 } else {
2246 * This task is not running, but if it is
2247 * greater than the current running task
2248 * then reschedule.
2250 if (p->prio < rq->curr->prio)
2251 resched_curr(rq);
2255 static void watchdog(struct rq *rq, struct task_struct *p)
2257 unsigned long soft, hard;
2259 /* max may change after cur was read, this will be fixed next tick */
2260 soft = task_rlimit(p, RLIMIT_RTTIME);
2261 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2263 if (soft != RLIM_INFINITY) {
2264 unsigned long next;
2266 if (p->rt.watchdog_stamp != jiffies) {
2267 p->rt.timeout++;
2268 p->rt.watchdog_stamp = jiffies;
2271 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2272 if (p->rt.timeout > next)
2273 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2277 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2279 struct sched_rt_entity *rt_se = &p->rt;
2281 update_curr_rt(rq);
2283 watchdog(rq, p);
2286 * RR tasks need a special form of timeslice management.
2287 * FIFO tasks have no timeslices.
2289 if (p->policy != SCHED_RR)
2290 return;
2292 if (--p->rt.time_slice)
2293 return;
2295 p->rt.time_slice = sched_rr_timeslice;
2298 * Requeue to the end of queue if we (and all of our ancestors) are not
2299 * the only element on the queue
2301 for_each_sched_rt_entity(rt_se) {
2302 if (rt_se->run_list.prev != rt_se->run_list.next) {
2303 requeue_task_rt(rq, p, 0);
2304 resched_curr(rq);
2305 return;
2310 static void set_curr_task_rt(struct rq *rq)
2312 struct task_struct *p = rq->curr;
2314 p->se.exec_start = rq_clock_task(rq);
2316 /* The running task is never eligible for pushing */
2317 dequeue_pushable_task(rq, p);
2320 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2323 * Time slice is 0 for SCHED_FIFO tasks
2325 if (task->policy == SCHED_RR)
2326 return sched_rr_timeslice;
2327 else
2328 return 0;
2331 const struct sched_class rt_sched_class = {
2332 .next = &fair_sched_class,
2333 .enqueue_task = enqueue_task_rt,
2334 .dequeue_task = dequeue_task_rt,
2335 .yield_task = yield_task_rt,
2337 .check_preempt_curr = check_preempt_curr_rt,
2339 .pick_next_task = pick_next_task_rt,
2340 .put_prev_task = put_prev_task_rt,
2342 #ifdef CONFIG_SMP
2343 .select_task_rq = select_task_rq_rt,
2345 .set_cpus_allowed = set_cpus_allowed_common,
2346 .rq_online = rq_online_rt,
2347 .rq_offline = rq_offline_rt,
2348 .task_woken = task_woken_rt,
2349 .switched_from = switched_from_rt,
2350 #endif
2352 .set_curr_task = set_curr_task_rt,
2353 .task_tick = task_tick_rt,
2355 .get_rr_interval = get_rr_interval_rt,
2357 .prio_changed = prio_changed_rt,
2358 .switched_to = switched_to_rt,
2360 .update_curr = update_curr_rt,
2363 #ifdef CONFIG_SCHED_DEBUG
2364 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2366 void print_rt_stats(struct seq_file *m, int cpu)
2368 rt_rq_iter_t iter;
2369 struct rt_rq *rt_rq;
2371 rcu_read_lock();
2372 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2373 print_rt_rq(m, cpu, rt_rq);
2374 rcu_read_unlock();
2376 #endif /* CONFIG_SCHED_DEBUG */