accel/amdxdna: use modern PM helpers
[drm/drm-misc.git] / kernel / sched / deadline.c
blobd9d5a702f1a61f3d9ced7ad10f5a724d11916185
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 * Juri Lelli <juri.lelli@gmail.com>,
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
19 #include <linux/cpuset.h>
22 * Default limits for DL period; on the top end we guard against small util
23 * tasks still getting ridiculously long effective runtimes, on the bottom end we
24 * guard against timer DoS.
26 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
27 static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
28 #ifdef CONFIG_SYSCTL
29 static struct ctl_table sched_dl_sysctls[] = {
31 .procname = "sched_deadline_period_max_us",
32 .data = &sysctl_sched_dl_period_max,
33 .maxlen = sizeof(unsigned int),
34 .mode = 0644,
35 .proc_handler = proc_douintvec_minmax,
36 .extra1 = (void *)&sysctl_sched_dl_period_min,
39 .procname = "sched_deadline_period_min_us",
40 .data = &sysctl_sched_dl_period_min,
41 .maxlen = sizeof(unsigned int),
42 .mode = 0644,
43 .proc_handler = proc_douintvec_minmax,
44 .extra2 = (void *)&sysctl_sched_dl_period_max,
48 static int __init sched_dl_sysctl_init(void)
50 register_sysctl_init("kernel", sched_dl_sysctls);
51 return 0;
53 late_initcall(sched_dl_sysctl_init);
54 #endif
56 static bool dl_server(struct sched_dl_entity *dl_se)
58 return dl_se->dl_server;
61 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
63 BUG_ON(dl_server(dl_se));
64 return container_of(dl_se, struct task_struct, dl);
67 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
69 return container_of(dl_rq, struct rq, dl);
72 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
74 struct rq *rq = dl_se->rq;
76 if (!dl_server(dl_se))
77 rq = task_rq(dl_task_of(dl_se));
79 return rq;
82 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
84 return &rq_of_dl_se(dl_se)->dl;
87 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
89 return !RB_EMPTY_NODE(&dl_se->rb_node);
92 #ifdef CONFIG_RT_MUTEXES
93 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
95 return dl_se->pi_se;
98 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
100 return pi_of(dl_se) != dl_se;
102 #else
103 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
105 return dl_se;
108 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
110 return false;
112 #endif
114 #ifdef CONFIG_SMP
115 static inline struct dl_bw *dl_bw_of(int i)
117 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
118 "sched RCU must be held");
119 return &cpu_rq(i)->rd->dl_bw;
122 static inline int dl_bw_cpus(int i)
124 struct root_domain *rd = cpu_rq(i)->rd;
125 int cpus;
127 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
128 "sched RCU must be held");
130 if (cpumask_subset(rd->span, cpu_active_mask))
131 return cpumask_weight(rd->span);
133 cpus = 0;
135 for_each_cpu_and(i, rd->span, cpu_active_mask)
136 cpus++;
138 return cpus;
141 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
143 unsigned long cap = 0;
144 int i;
146 for_each_cpu_and(i, mask, cpu_active_mask)
147 cap += arch_scale_cpu_capacity(i);
149 return cap;
153 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
154 * of the CPU the task is running on rather rd's \Sum CPU capacity.
156 static inline unsigned long dl_bw_capacity(int i)
158 if (!sched_asym_cpucap_active() &&
159 arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
160 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
161 } else {
162 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
163 "sched RCU must be held");
165 return __dl_bw_capacity(cpu_rq(i)->rd->span);
169 static inline bool dl_bw_visited(int cpu, u64 gen)
171 struct root_domain *rd = cpu_rq(cpu)->rd;
173 if (rd->visit_gen == gen)
174 return true;
176 rd->visit_gen = gen;
177 return false;
180 static inline
181 void __dl_update(struct dl_bw *dl_b, s64 bw)
183 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
184 int i;
186 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
187 "sched RCU must be held");
188 for_each_cpu_and(i, rd->span, cpu_active_mask) {
189 struct rq *rq = cpu_rq(i);
191 rq->dl.extra_bw += bw;
194 #else
195 static inline struct dl_bw *dl_bw_of(int i)
197 return &cpu_rq(i)->dl.dl_bw;
200 static inline int dl_bw_cpus(int i)
202 return 1;
205 static inline unsigned long dl_bw_capacity(int i)
207 return SCHED_CAPACITY_SCALE;
210 static inline bool dl_bw_visited(int cpu, u64 gen)
212 return false;
215 static inline
216 void __dl_update(struct dl_bw *dl_b, s64 bw)
218 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
220 dl->extra_bw += bw;
222 #endif
224 static inline
225 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
227 dl_b->total_bw -= tsk_bw;
228 __dl_update(dl_b, (s32)tsk_bw / cpus);
231 static inline
232 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
234 dl_b->total_bw += tsk_bw;
235 __dl_update(dl_b, -((s32)tsk_bw / cpus));
238 static inline bool
239 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
241 return dl_b->bw != -1 &&
242 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
245 static inline
246 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
248 u64 old = dl_rq->running_bw;
250 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
251 dl_rq->running_bw += dl_bw;
252 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
253 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
254 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
255 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
258 static inline
259 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
261 u64 old = dl_rq->running_bw;
263 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
264 dl_rq->running_bw -= dl_bw;
265 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
266 if (dl_rq->running_bw > old)
267 dl_rq->running_bw = 0;
268 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
269 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
272 static inline
273 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
275 u64 old = dl_rq->this_bw;
277 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
278 dl_rq->this_bw += dl_bw;
279 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
282 static inline
283 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
285 u64 old = dl_rq->this_bw;
287 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
288 dl_rq->this_bw -= dl_bw;
289 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
290 if (dl_rq->this_bw > old)
291 dl_rq->this_bw = 0;
292 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
295 static inline
296 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
298 if (!dl_entity_is_special(dl_se))
299 __add_rq_bw(dl_se->dl_bw, dl_rq);
302 static inline
303 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
305 if (!dl_entity_is_special(dl_se))
306 __sub_rq_bw(dl_se->dl_bw, dl_rq);
309 static inline
310 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
312 if (!dl_entity_is_special(dl_se))
313 __add_running_bw(dl_se->dl_bw, dl_rq);
316 static inline
317 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
319 if (!dl_entity_is_special(dl_se))
320 __sub_running_bw(dl_se->dl_bw, dl_rq);
323 static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw)
325 if (dl_se->dl_non_contending) {
326 sub_running_bw(dl_se, &rq->dl);
327 dl_se->dl_non_contending = 0;
330 * If the timer handler is currently running and the
331 * timer cannot be canceled, inactive_task_timer()
332 * will see that dl_not_contending is not set, and
333 * will not touch the rq's active utilization,
334 * so we are still safe.
336 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
337 if (!dl_server(dl_se))
338 put_task_struct(dl_task_of(dl_se));
341 __sub_rq_bw(dl_se->dl_bw, &rq->dl);
342 __add_rq_bw(new_bw, &rq->dl);
345 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
347 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
349 if (task_on_rq_queued(p))
350 return;
352 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw);
355 static void __dl_clear_params(struct sched_dl_entity *dl_se);
358 * The utilization of a task cannot be immediately removed from
359 * the rq active utilization (running_bw) when the task blocks.
360 * Instead, we have to wait for the so called "0-lag time".
362 * If a task blocks before the "0-lag time", a timer (the inactive
363 * timer) is armed, and running_bw is decreased when the timer
364 * fires.
366 * If the task wakes up again before the inactive timer fires,
367 * the timer is canceled, whereas if the task wakes up after the
368 * inactive timer fired (and running_bw has been decreased) the
369 * task's utilization has to be added to running_bw again.
370 * A flag in the deadline scheduling entity (dl_non_contending)
371 * is used to avoid race conditions between the inactive timer handler
372 * and task wakeups.
374 * The following diagram shows how running_bw is updated. A task is
375 * "ACTIVE" when its utilization contributes to running_bw; an
376 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
377 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
378 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
379 * time already passed, which does not contribute to running_bw anymore.
380 * +------------------+
381 * wakeup | ACTIVE |
382 * +------------------>+ contending |
383 * | add_running_bw | |
384 * | +----+------+------+
385 * | | ^
386 * | dequeue | |
387 * +--------+-------+ | |
388 * | | t >= 0-lag | | wakeup
389 * | INACTIVE |<---------------+ |
390 * | | sub_running_bw | |
391 * +--------+-------+ | |
392 * ^ | |
393 * | t < 0-lag | |
394 * | | |
395 * | V |
396 * | +----+------+------+
397 * | sub_running_bw | ACTIVE |
398 * +-------------------+ |
399 * inactive timer | non contending |
400 * fired +------------------+
402 * The task_non_contending() function is invoked when a task
403 * blocks, and checks if the 0-lag time already passed or
404 * not (in the first case, it directly updates running_bw;
405 * in the second case, it arms the inactive timer).
407 * The task_contending() function is invoked when a task wakes
408 * up, and checks if the task is still in the "ACTIVE non contending"
409 * state or not (in the second case, it updates running_bw).
411 static void task_non_contending(struct sched_dl_entity *dl_se)
413 struct hrtimer *timer = &dl_se->inactive_timer;
414 struct rq *rq = rq_of_dl_se(dl_se);
415 struct dl_rq *dl_rq = &rq->dl;
416 s64 zerolag_time;
419 * If this is a non-deadline task that has been boosted,
420 * do nothing
422 if (dl_se->dl_runtime == 0)
423 return;
425 if (dl_entity_is_special(dl_se))
426 return;
428 WARN_ON(dl_se->dl_non_contending);
430 zerolag_time = dl_se->deadline -
431 div64_long((dl_se->runtime * dl_se->dl_period),
432 dl_se->dl_runtime);
435 * Using relative times instead of the absolute "0-lag time"
436 * allows to simplify the code
438 zerolag_time -= rq_clock(rq);
441 * If the "0-lag time" already passed, decrease the active
442 * utilization now, instead of starting a timer
444 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
445 if (dl_server(dl_se)) {
446 sub_running_bw(dl_se, dl_rq);
447 } else {
448 struct task_struct *p = dl_task_of(dl_se);
450 if (dl_task(p))
451 sub_running_bw(dl_se, dl_rq);
453 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
454 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
456 if (READ_ONCE(p->__state) == TASK_DEAD)
457 sub_rq_bw(dl_se, &rq->dl);
458 raw_spin_lock(&dl_b->lock);
459 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
460 raw_spin_unlock(&dl_b->lock);
461 __dl_clear_params(dl_se);
465 return;
468 dl_se->dl_non_contending = 1;
469 if (!dl_server(dl_se))
470 get_task_struct(dl_task_of(dl_se));
472 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
475 static void task_contending(struct sched_dl_entity *dl_se, int flags)
477 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
480 * If this is a non-deadline task that has been boosted,
481 * do nothing
483 if (dl_se->dl_runtime == 0)
484 return;
486 if (flags & ENQUEUE_MIGRATED)
487 add_rq_bw(dl_se, dl_rq);
489 if (dl_se->dl_non_contending) {
490 dl_se->dl_non_contending = 0;
492 * If the timer handler is currently running and the
493 * timer cannot be canceled, inactive_task_timer()
494 * will see that dl_not_contending is not set, and
495 * will not touch the rq's active utilization,
496 * so we are still safe.
498 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
499 if (!dl_server(dl_se))
500 put_task_struct(dl_task_of(dl_se));
502 } else {
504 * Since "dl_non_contending" is not set, the
505 * task's utilization has already been removed from
506 * active utilization (either when the task blocked,
507 * when the "inactive timer" fired).
508 * So, add it back.
510 add_running_bw(dl_se, dl_rq);
514 static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
516 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
519 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
521 void init_dl_bw(struct dl_bw *dl_b)
523 raw_spin_lock_init(&dl_b->lock);
524 if (global_rt_runtime() == RUNTIME_INF)
525 dl_b->bw = -1;
526 else
527 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
528 dl_b->total_bw = 0;
531 void init_dl_rq(struct dl_rq *dl_rq)
533 dl_rq->root = RB_ROOT_CACHED;
535 #ifdef CONFIG_SMP
536 /* zero means no -deadline tasks */
537 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
539 dl_rq->overloaded = 0;
540 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
541 #else
542 init_dl_bw(&dl_rq->dl_bw);
543 #endif
545 dl_rq->running_bw = 0;
546 dl_rq->this_bw = 0;
547 init_dl_rq_bw_ratio(dl_rq);
550 #ifdef CONFIG_SMP
552 static inline int dl_overloaded(struct rq *rq)
554 return atomic_read(&rq->rd->dlo_count);
557 static inline void dl_set_overload(struct rq *rq)
559 if (!rq->online)
560 return;
562 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
564 * Must be visible before the overload count is
565 * set (as in sched_rt.c).
567 * Matched by the barrier in pull_dl_task().
569 smp_wmb();
570 atomic_inc(&rq->rd->dlo_count);
573 static inline void dl_clear_overload(struct rq *rq)
575 if (!rq->online)
576 return;
578 atomic_dec(&rq->rd->dlo_count);
579 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
582 #define __node_2_pdl(node) \
583 rb_entry((node), struct task_struct, pushable_dl_tasks)
585 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
587 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
590 static inline int has_pushable_dl_tasks(struct rq *rq)
592 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
596 * The list of pushable -deadline task is not a plist, like in
597 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
599 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
601 struct rb_node *leftmost;
603 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
605 leftmost = rb_add_cached(&p->pushable_dl_tasks,
606 &rq->dl.pushable_dl_tasks_root,
607 __pushable_less);
608 if (leftmost)
609 rq->dl.earliest_dl.next = p->dl.deadline;
611 if (!rq->dl.overloaded) {
612 dl_set_overload(rq);
613 rq->dl.overloaded = 1;
617 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
619 struct dl_rq *dl_rq = &rq->dl;
620 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
621 struct rb_node *leftmost;
623 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
624 return;
626 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
627 if (leftmost)
628 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
630 RB_CLEAR_NODE(&p->pushable_dl_tasks);
632 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
633 dl_clear_overload(rq);
634 rq->dl.overloaded = 0;
638 static int push_dl_task(struct rq *rq);
640 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
642 return rq->online && dl_task(prev);
645 static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
646 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
648 static void push_dl_tasks(struct rq *);
649 static void pull_dl_task(struct rq *);
651 static inline void deadline_queue_push_tasks(struct rq *rq)
653 if (!has_pushable_dl_tasks(rq))
654 return;
656 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
659 static inline void deadline_queue_pull_task(struct rq *rq)
661 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
664 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
666 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
668 struct rq *later_rq = NULL;
669 struct dl_bw *dl_b;
671 later_rq = find_lock_later_rq(p, rq);
672 if (!later_rq) {
673 int cpu;
676 * If we cannot preempt any rq, fall back to pick any
677 * online CPU:
679 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
680 if (cpu >= nr_cpu_ids) {
682 * Failed to find any suitable CPU.
683 * The task will never come back!
685 WARN_ON_ONCE(dl_bandwidth_enabled());
688 * If admission control is disabled we
689 * try a little harder to let the task
690 * run.
692 cpu = cpumask_any(cpu_active_mask);
694 later_rq = cpu_rq(cpu);
695 double_lock_balance(rq, later_rq);
698 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
700 * Inactive timer is armed (or callback is running, but
701 * waiting for us to release rq locks). In any case, when it
702 * will fire (or continue), it will see running_bw of this
703 * task migrated to later_rq (and correctly handle it).
705 sub_running_bw(&p->dl, &rq->dl);
706 sub_rq_bw(&p->dl, &rq->dl);
708 add_rq_bw(&p->dl, &later_rq->dl);
709 add_running_bw(&p->dl, &later_rq->dl);
710 } else {
711 sub_rq_bw(&p->dl, &rq->dl);
712 add_rq_bw(&p->dl, &later_rq->dl);
716 * And we finally need to fix up root_domain(s) bandwidth accounting,
717 * since p is still hanging out in the old (now moved to default) root
718 * domain.
720 dl_b = &rq->rd->dl_bw;
721 raw_spin_lock(&dl_b->lock);
722 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
723 raw_spin_unlock(&dl_b->lock);
725 dl_b = &later_rq->rd->dl_bw;
726 raw_spin_lock(&dl_b->lock);
727 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
728 raw_spin_unlock(&dl_b->lock);
730 set_task_cpu(p, later_rq->cpu);
731 double_unlock_balance(later_rq, rq);
733 return later_rq;
736 #else
738 static inline
739 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
743 static inline
744 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
748 static inline
749 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
753 static inline
754 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
758 static inline void deadline_queue_push_tasks(struct rq *rq)
762 static inline void deadline_queue_pull_task(struct rq *rq)
765 #endif /* CONFIG_SMP */
767 static void
768 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags);
769 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
770 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags);
771 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
773 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
774 struct rq *rq)
776 /* for non-boosted task, pi_of(dl_se) == dl_se */
777 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
778 dl_se->runtime = pi_of(dl_se)->dl_runtime;
781 * If it is a deferred reservation, and the server
782 * is not handling an starvation case, defer it.
784 if (dl_se->dl_defer & !dl_se->dl_defer_running) {
785 dl_se->dl_throttled = 1;
786 dl_se->dl_defer_armed = 1;
791 * We are being explicitly informed that a new instance is starting,
792 * and this means that:
793 * - the absolute deadline of the entity has to be placed at
794 * current time + relative deadline;
795 * - the runtime of the entity has to be set to the maximum value.
797 * The capability of specifying such event is useful whenever a -deadline
798 * entity wants to (try to!) synchronize its behaviour with the scheduler's
799 * one, and to (try to!) reconcile itself with its own scheduling
800 * parameters.
802 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
804 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
805 struct rq *rq = rq_of_dl_rq(dl_rq);
807 WARN_ON(is_dl_boosted(dl_se));
808 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
811 * We are racing with the deadline timer. So, do nothing because
812 * the deadline timer handler will take care of properly recharging
813 * the runtime and postponing the deadline
815 if (dl_se->dl_throttled)
816 return;
819 * We use the regular wall clock time to set deadlines in the
820 * future; in fact, we must consider execution overheads (time
821 * spent on hardirq context, etc.).
823 replenish_dl_new_period(dl_se, rq);
826 static int start_dl_timer(struct sched_dl_entity *dl_se);
827 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t);
830 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
831 * possibility of a entity lasting more than what it declared, and thus
832 * exhausting its runtime.
834 * Here we are interested in making runtime overrun possible, but we do
835 * not want a entity which is misbehaving to affect the scheduling of all
836 * other entities.
837 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
838 * is used, in order to confine each entity within its own bandwidth.
840 * This function deals exactly with that, and ensures that when the runtime
841 * of a entity is replenished, its deadline is also postponed. That ensures
842 * the overrunning entity can't interfere with other entity in the system and
843 * can't make them miss their deadlines. Reasons why this kind of overruns
844 * could happen are, typically, a entity voluntarily trying to overcome its
845 * runtime, or it just underestimated it during sched_setattr().
847 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
849 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
850 struct rq *rq = rq_of_dl_rq(dl_rq);
852 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
855 * This could be the case for a !-dl task that is boosted.
856 * Just go with full inherited parameters.
858 * Or, it could be the case of a deferred reservation that
859 * was not able to consume its runtime in background and
860 * reached this point with current u > U.
862 * In both cases, set a new period.
864 if (dl_se->dl_deadline == 0 ||
865 (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) {
866 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
867 dl_se->runtime = pi_of(dl_se)->dl_runtime;
870 if (dl_se->dl_yielded && dl_se->runtime > 0)
871 dl_se->runtime = 0;
874 * We keep moving the deadline away until we get some
875 * available runtime for the entity. This ensures correct
876 * handling of situations where the runtime overrun is
877 * arbitrary large.
879 while (dl_se->runtime <= 0) {
880 dl_se->deadline += pi_of(dl_se)->dl_period;
881 dl_se->runtime += pi_of(dl_se)->dl_runtime;
885 * At this point, the deadline really should be "in
886 * the future" with respect to rq->clock. If it's
887 * not, we are, for some reason, lagging too much!
888 * Anyway, after having warn userspace abut that,
889 * we still try to keep the things running by
890 * resetting the deadline and the budget of the
891 * entity.
893 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
894 printk_deferred_once("sched: DL replenish lagged too much\n");
895 replenish_dl_new_period(dl_se, rq);
898 if (dl_se->dl_yielded)
899 dl_se->dl_yielded = 0;
900 if (dl_se->dl_throttled)
901 dl_se->dl_throttled = 0;
904 * If this is the replenishment of a deferred reservation,
905 * clear the flag and return.
907 if (dl_se->dl_defer_armed) {
908 dl_se->dl_defer_armed = 0;
909 return;
913 * A this point, if the deferred server is not armed, and the deadline
914 * is in the future, if it is not running already, throttle the server
915 * and arm the defer timer.
917 if (dl_se->dl_defer && !dl_se->dl_defer_running &&
918 dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) {
919 if (!is_dl_boosted(dl_se) && dl_se->server_has_tasks(dl_se)) {
922 * Set dl_se->dl_defer_armed and dl_throttled variables to
923 * inform the start_dl_timer() that this is a deferred
924 * activation.
926 dl_se->dl_defer_armed = 1;
927 dl_se->dl_throttled = 1;
928 if (!start_dl_timer(dl_se)) {
930 * If for whatever reason (delays), a previous timer was
931 * queued but not serviced, cancel it and clean the
932 * deferrable server variables intended for start_dl_timer().
934 hrtimer_try_to_cancel(&dl_se->dl_timer);
935 dl_se->dl_defer_armed = 0;
936 dl_se->dl_throttled = 0;
943 * Here we check if --at time t-- an entity (which is probably being
944 * [re]activated or, in general, enqueued) can use its remaining runtime
945 * and its current deadline _without_ exceeding the bandwidth it is
946 * assigned (function returns true if it can't). We are in fact applying
947 * one of the CBS rules: when a task wakes up, if the residual runtime
948 * over residual deadline fits within the allocated bandwidth, then we
949 * can keep the current (absolute) deadline and residual budget without
950 * disrupting the schedulability of the system. Otherwise, we should
951 * refill the runtime and set the deadline a period in the future,
952 * because keeping the current (absolute) deadline of the task would
953 * result in breaking guarantees promised to other tasks (refer to
954 * Documentation/scheduler/sched-deadline.rst for more information).
956 * This function returns true if:
958 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
960 * IOW we can't recycle current parameters.
962 * Notice that the bandwidth check is done against the deadline. For
963 * task with deadline equal to period this is the same of using
964 * dl_period instead of dl_deadline in the equation above.
966 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
968 u64 left, right;
971 * left and right are the two sides of the equation above,
972 * after a bit of shuffling to use multiplications instead
973 * of divisions.
975 * Note that none of the time values involved in the two
976 * multiplications are absolute: dl_deadline and dl_runtime
977 * are the relative deadline and the maximum runtime of each
978 * instance, runtime is the runtime left for the last instance
979 * and (deadline - t), since t is rq->clock, is the time left
980 * to the (absolute) deadline. Even if overflowing the u64 type
981 * is very unlikely to occur in both cases, here we scale down
982 * as we want to avoid that risk at all. Scaling down by 10
983 * means that we reduce granularity to 1us. We are fine with it,
984 * since this is only a true/false check and, anyway, thinking
985 * of anything below microseconds resolution is actually fiction
986 * (but still we want to give the user that illusion >;).
988 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
989 right = ((dl_se->deadline - t) >> DL_SCALE) *
990 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
992 return dl_time_before(right, left);
996 * Revised wakeup rule [1]: For self-suspending tasks, rather then
997 * re-initializing task's runtime and deadline, the revised wakeup
998 * rule adjusts the task's runtime to avoid the task to overrun its
999 * density.
1001 * Reasoning: a task may overrun the density if:
1002 * runtime / (deadline - t) > dl_runtime / dl_deadline
1004 * Therefore, runtime can be adjusted to:
1005 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
1007 * In such way that runtime will be equal to the maximum density
1008 * the task can use without breaking any rule.
1010 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
1011 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
1013 static void
1014 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
1016 u64 laxity = dl_se->deadline - rq_clock(rq);
1019 * If the task has deadline < period, and the deadline is in the past,
1020 * it should already be throttled before this check.
1022 * See update_dl_entity() comments for further details.
1024 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
1026 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
1030 * Regarding the deadline, a task with implicit deadline has a relative
1031 * deadline == relative period. A task with constrained deadline has a
1032 * relative deadline <= relative period.
1034 * We support constrained deadline tasks. However, there are some restrictions
1035 * applied only for tasks which do not have an implicit deadline. See
1036 * update_dl_entity() to know more about such restrictions.
1038 * The dl_is_implicit() returns true if the task has an implicit deadline.
1040 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
1042 return dl_se->dl_deadline == dl_se->dl_period;
1046 * When a deadline entity is placed in the runqueue, its runtime and deadline
1047 * might need to be updated. This is done by a CBS wake up rule. There are two
1048 * different rules: 1) the original CBS; and 2) the Revisited CBS.
1050 * When the task is starting a new period, the Original CBS is used. In this
1051 * case, the runtime is replenished and a new absolute deadline is set.
1053 * When a task is queued before the begin of the next period, using the
1054 * remaining runtime and deadline could make the entity to overflow, see
1055 * dl_entity_overflow() to find more about runtime overflow. When such case
1056 * is detected, the runtime and deadline need to be updated.
1058 * If the task has an implicit deadline, i.e., deadline == period, the Original
1059 * CBS is applied. The runtime is replenished and a new absolute deadline is
1060 * set, as in the previous cases.
1062 * However, the Original CBS does not work properly for tasks with
1063 * deadline < period, which are said to have a constrained deadline. By
1064 * applying the Original CBS, a constrained deadline task would be able to run
1065 * runtime/deadline in a period. With deadline < period, the task would
1066 * overrun the runtime/period allowed bandwidth, breaking the admission test.
1068 * In order to prevent this misbehave, the Revisited CBS is used for
1069 * constrained deadline tasks when a runtime overflow is detected. In the
1070 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1071 * the remaining runtime of the task is reduced to avoid runtime overflow.
1072 * Please refer to the comments update_dl_revised_wakeup() function to find
1073 * more about the Revised CBS rule.
1075 static void update_dl_entity(struct sched_dl_entity *dl_se)
1077 struct rq *rq = rq_of_dl_se(dl_se);
1079 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1080 dl_entity_overflow(dl_se, rq_clock(rq))) {
1082 if (unlikely(!dl_is_implicit(dl_se) &&
1083 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1084 !is_dl_boosted(dl_se))) {
1085 update_dl_revised_wakeup(dl_se, rq);
1086 return;
1089 replenish_dl_new_period(dl_se, rq);
1090 } else if (dl_server(dl_se) && dl_se->dl_defer) {
1092 * The server can still use its previous deadline, so check if
1093 * it left the dl_defer_running state.
1095 if (!dl_se->dl_defer_running) {
1096 dl_se->dl_defer_armed = 1;
1097 dl_se->dl_throttled = 1;
1102 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1104 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1108 * If the entity depleted all its runtime, and if we want it to sleep
1109 * while waiting for some new execution time to become available, we
1110 * set the bandwidth replenishment timer to the replenishment instant
1111 * and try to activate it.
1113 * Notice that it is important for the caller to know if the timer
1114 * actually started or not (i.e., the replenishment instant is in
1115 * the future or in the past).
1117 static int start_dl_timer(struct sched_dl_entity *dl_se)
1119 struct hrtimer *timer = &dl_se->dl_timer;
1120 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1121 struct rq *rq = rq_of_dl_rq(dl_rq);
1122 ktime_t now, act;
1123 s64 delta;
1125 lockdep_assert_rq_held(rq);
1128 * We want the timer to fire at the deadline, but considering
1129 * that it is actually coming from rq->clock and not from
1130 * hrtimer's time base reading.
1132 * The deferred reservation will have its timer set to
1133 * (deadline - runtime). At that point, the CBS rule will decide
1134 * if the current deadline can be used, or if a replenishment is
1135 * required to avoid add too much pressure on the system
1136 * (current u > U).
1138 if (dl_se->dl_defer_armed) {
1139 WARN_ON_ONCE(!dl_se->dl_throttled);
1140 act = ns_to_ktime(dl_se->deadline - dl_se->runtime);
1141 } else {
1142 /* act = deadline - rel-deadline + period */
1143 act = ns_to_ktime(dl_next_period(dl_se));
1146 now = hrtimer_cb_get_time(timer);
1147 delta = ktime_to_ns(now) - rq_clock(rq);
1148 act = ktime_add_ns(act, delta);
1151 * If the expiry time already passed, e.g., because the value
1152 * chosen as the deadline is too small, don't even try to
1153 * start the timer in the past!
1155 if (ktime_us_delta(act, now) < 0)
1156 return 0;
1159 * !enqueued will guarantee another callback; even if one is already in
1160 * progress. This ensures a balanced {get,put}_task_struct().
1162 * The race against __run_timer() clearing the enqueued state is
1163 * harmless because we're holding task_rq()->lock, therefore the timer
1164 * expiring after we've done the check will wait on its task_rq_lock()
1165 * and observe our state.
1167 if (!hrtimer_is_queued(timer)) {
1168 if (!dl_server(dl_se))
1169 get_task_struct(dl_task_of(dl_se));
1170 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1173 return 1;
1176 static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
1178 #ifdef CONFIG_SMP
1180 * Queueing this task back might have overloaded rq, check if we need
1181 * to kick someone away.
1183 if (has_pushable_dl_tasks(rq)) {
1185 * Nothing relies on rq->lock after this, so its safe to drop
1186 * rq->lock.
1188 rq_unpin_lock(rq, rf);
1189 push_dl_task(rq);
1190 rq_repin_lock(rq, rf);
1192 #endif
1195 /* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */
1196 static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC;
1198 static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se)
1200 struct rq *rq = rq_of_dl_se(dl_se);
1201 u64 fw;
1203 scoped_guard (rq_lock, rq) {
1204 struct rq_flags *rf = &scope.rf;
1206 if (!dl_se->dl_throttled || !dl_se->dl_runtime)
1207 return HRTIMER_NORESTART;
1209 sched_clock_tick();
1210 update_rq_clock(rq);
1212 if (!dl_se->dl_runtime)
1213 return HRTIMER_NORESTART;
1215 if (!dl_se->server_has_tasks(dl_se)) {
1216 replenish_dl_entity(dl_se);
1217 return HRTIMER_NORESTART;
1220 if (dl_se->dl_defer_armed) {
1222 * First check if the server could consume runtime in background.
1223 * If so, it is possible to push the defer timer for this amount
1224 * of time. The dl_server_min_res serves as a limit to avoid
1225 * forwarding the timer for a too small amount of time.
1227 if (dl_time_before(rq_clock(dl_se->rq),
1228 (dl_se->deadline - dl_se->runtime - dl_server_min_res))) {
1230 /* reset the defer timer */
1231 fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime;
1233 hrtimer_forward_now(timer, ns_to_ktime(fw));
1234 return HRTIMER_RESTART;
1237 dl_se->dl_defer_running = 1;
1240 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1242 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl))
1243 resched_curr(rq);
1245 __push_dl_task(rq, rf);
1248 return HRTIMER_NORESTART;
1252 * This is the bandwidth enforcement timer callback. If here, we know
1253 * a task is not on its dl_rq, since the fact that the timer was running
1254 * means the task is throttled and needs a runtime replenishment.
1256 * However, what we actually do depends on the fact the task is active,
1257 * (it is on its rq) or has been removed from there by a call to
1258 * dequeue_task_dl(). In the former case we must issue the runtime
1259 * replenishment and add the task back to the dl_rq; in the latter, we just
1260 * do nothing but clearing dl_throttled, so that runtime and deadline
1261 * updating (and the queueing back to dl_rq) will be done by the
1262 * next call to enqueue_task_dl().
1264 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1266 struct sched_dl_entity *dl_se = container_of(timer,
1267 struct sched_dl_entity,
1268 dl_timer);
1269 struct task_struct *p;
1270 struct rq_flags rf;
1271 struct rq *rq;
1273 if (dl_server(dl_se))
1274 return dl_server_timer(timer, dl_se);
1276 p = dl_task_of(dl_se);
1277 rq = task_rq_lock(p, &rf);
1280 * The task might have changed its scheduling policy to something
1281 * different than SCHED_DEADLINE (through switched_from_dl()).
1283 if (!dl_task(p))
1284 goto unlock;
1287 * The task might have been boosted by someone else and might be in the
1288 * boosting/deboosting path, its not throttled.
1290 if (is_dl_boosted(dl_se))
1291 goto unlock;
1294 * Spurious timer due to start_dl_timer() race; or we already received
1295 * a replenishment from rt_mutex_setprio().
1297 if (!dl_se->dl_throttled)
1298 goto unlock;
1300 sched_clock_tick();
1301 update_rq_clock(rq);
1304 * If the throttle happened during sched-out; like:
1306 * schedule()
1307 * deactivate_task()
1308 * dequeue_task_dl()
1309 * update_curr_dl()
1310 * start_dl_timer()
1311 * __dequeue_task_dl()
1312 * prev->on_rq = 0;
1314 * We can be both throttled and !queued. Replenish the counter
1315 * but do not enqueue -- wait for our wakeup to do that.
1317 if (!task_on_rq_queued(p)) {
1318 replenish_dl_entity(dl_se);
1319 goto unlock;
1322 #ifdef CONFIG_SMP
1323 if (unlikely(!rq->online)) {
1325 * If the runqueue is no longer available, migrate the
1326 * task elsewhere. This necessarily changes rq.
1328 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1329 rq = dl_task_offline_migration(rq, p);
1330 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1331 update_rq_clock(rq);
1334 * Now that the task has been migrated to the new RQ and we
1335 * have that locked, proceed as normal and enqueue the task
1336 * there.
1339 #endif
1341 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1342 if (dl_task(rq->donor))
1343 wakeup_preempt_dl(rq, p, 0);
1344 else
1345 resched_curr(rq);
1347 __push_dl_task(rq, &rf);
1349 unlock:
1350 task_rq_unlock(rq, p, &rf);
1353 * This can free the task_struct, including this hrtimer, do not touch
1354 * anything related to that after this.
1356 put_task_struct(p);
1358 return HRTIMER_NORESTART;
1361 static void init_dl_task_timer(struct sched_dl_entity *dl_se)
1363 struct hrtimer *timer = &dl_se->dl_timer;
1365 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1366 timer->function = dl_task_timer;
1370 * During the activation, CBS checks if it can reuse the current task's
1371 * runtime and period. If the deadline of the task is in the past, CBS
1372 * cannot use the runtime, and so it replenishes the task. This rule
1373 * works fine for implicit deadline tasks (deadline == period), and the
1374 * CBS was designed for implicit deadline tasks. However, a task with
1375 * constrained deadline (deadline < period) might be awakened after the
1376 * deadline, but before the next period. In this case, replenishing the
1377 * task would allow it to run for runtime / deadline. As in this case
1378 * deadline < period, CBS enables a task to run for more than the
1379 * runtime / period. In a very loaded system, this can cause a domino
1380 * effect, making other tasks miss their deadlines.
1382 * To avoid this problem, in the activation of a constrained deadline
1383 * task after the deadline but before the next period, throttle the
1384 * task and set the replenishing timer to the begin of the next period,
1385 * unless it is boosted.
1387 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1389 struct rq *rq = rq_of_dl_se(dl_se);
1391 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1392 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1393 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
1394 return;
1395 dl_se->dl_throttled = 1;
1396 if (dl_se->runtime > 0)
1397 dl_se->runtime = 0;
1401 static
1402 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1404 return (dl_se->runtime <= 0);
1408 * This function implements the GRUB accounting rule. According to the
1409 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
1410 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
1411 * where u is the utilization of the task, Umax is the maximum reclaimable
1412 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1413 * as the difference between the "total runqueue utilization" and the
1414 * "runqueue active utilization", and Uextra is the (per runqueue) extra
1415 * reclaimable utilization.
1416 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1417 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
1418 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1419 * is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1420 * Since delta is a 64 bit variable, to have an overflow its value should be
1421 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
1422 * not an issue here.
1424 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1426 u64 u_act;
1427 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1430 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we
1431 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra
1432 * can be larger than u_max. So, u_max - u_inact - u_extra would be
1433 * negative leading to wrong results.
1435 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1436 u_act = dl_se->dl_bw;
1437 else
1438 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
1440 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
1441 return (delta * u_act) >> BW_SHIFT;
1444 s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1446 s64 scaled_delta_exec;
1449 * For tasks that participate in GRUB, we implement GRUB-PA: the
1450 * spare reclaimed bandwidth is used to clock down frequency.
1452 * For the others, we still need to scale reservation parameters
1453 * according to current frequency and CPU maximum capacity.
1455 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1456 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
1457 } else {
1458 int cpu = cpu_of(rq);
1459 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1460 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1462 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1463 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1466 return scaled_delta_exec;
1469 static inline void
1470 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1471 int flags);
1472 static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1474 s64 scaled_delta_exec;
1476 if (unlikely(delta_exec <= 0)) {
1477 if (unlikely(dl_se->dl_yielded))
1478 goto throttle;
1479 return;
1482 if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer)
1483 return;
1485 if (dl_entity_is_special(dl_se))
1486 return;
1488 scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
1490 dl_se->runtime -= scaled_delta_exec;
1493 * The fair server can consume its runtime while throttled (not queued/
1494 * running as regular CFS).
1496 * If the server consumes its entire runtime in this state. The server
1497 * is not required for the current period. Thus, reset the server by
1498 * starting a new period, pushing the activation.
1500 if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se)) {
1502 * If the server was previously activated - the starving condition
1503 * took place, it this point it went away because the fair scheduler
1504 * was able to get runtime in background. So return to the initial
1505 * state.
1507 dl_se->dl_defer_running = 0;
1509 hrtimer_try_to_cancel(&dl_se->dl_timer);
1511 replenish_dl_new_period(dl_se, dl_se->rq);
1514 * Not being able to start the timer seems problematic. If it could not
1515 * be started for whatever reason, we need to "unthrottle" the DL server
1516 * and queue right away. Otherwise nothing might queue it. That's similar
1517 * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn.
1519 WARN_ON_ONCE(!start_dl_timer(dl_se));
1521 return;
1524 throttle:
1525 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1526 dl_se->dl_throttled = 1;
1528 /* If requested, inform the user about runtime overruns. */
1529 if (dl_runtime_exceeded(dl_se) &&
1530 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1531 dl_se->dl_overrun = 1;
1533 dequeue_dl_entity(dl_se, 0);
1534 if (!dl_server(dl_se)) {
1535 update_stats_dequeue_dl(&rq->dl, dl_se, 0);
1536 dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
1539 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) {
1540 if (dl_server(dl_se))
1541 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1542 else
1543 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
1546 if (!is_leftmost(dl_se, &rq->dl))
1547 resched_curr(rq);
1551 * The fair server (sole dl_server) does not account for real-time
1552 * workload because it is running fair work.
1554 if (dl_se == &rq->fair_server)
1555 return;
1557 #ifdef CONFIG_RT_GROUP_SCHED
1559 * Because -- for now -- we share the rt bandwidth, we need to
1560 * account our runtime there too, otherwise actual rt tasks
1561 * would be able to exceed the shared quota.
1563 * Account to the root rt group for now.
1565 * The solution we're working towards is having the RT groups scheduled
1566 * using deadline servers -- however there's a few nasties to figure
1567 * out before that can happen.
1569 if (rt_bandwidth_enabled()) {
1570 struct rt_rq *rt_rq = &rq->rt;
1572 raw_spin_lock(&rt_rq->rt_runtime_lock);
1574 * We'll let actual RT tasks worry about the overflow here, we
1575 * have our own CBS to keep us inline; only account when RT
1576 * bandwidth is relevant.
1578 if (sched_rt_bandwidth_account(rt_rq))
1579 rt_rq->rt_time += delta_exec;
1580 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1582 #endif
1586 * In the non-defer mode, the idle time is not accounted, as the
1587 * server provides a guarantee.
1589 * If the dl_server is in defer mode, the idle time is also considered
1590 * as time available for the fair server, avoiding a penalty for the
1591 * rt scheduler that did not consumed that time.
1593 void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
1595 s64 delta_exec, scaled_delta_exec;
1597 if (!rq->fair_server.dl_defer)
1598 return;
1600 /* no need to discount more */
1601 if (rq->fair_server.runtime < 0)
1602 return;
1604 delta_exec = rq_clock_task(rq) - p->se.exec_start;
1605 if (delta_exec < 0)
1606 return;
1608 scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec);
1610 rq->fair_server.runtime -= scaled_delta_exec;
1612 if (rq->fair_server.runtime < 0) {
1613 rq->fair_server.dl_defer_running = 0;
1614 rq->fair_server.runtime = 0;
1617 p->se.exec_start = rq_clock_task(rq);
1620 void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
1622 /* 0 runtime = fair server disabled */
1623 if (dl_se->dl_runtime)
1624 update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
1627 void dl_server_start(struct sched_dl_entity *dl_se)
1629 struct rq *rq = dl_se->rq;
1632 * XXX: the apply do not work fine at the init phase for the
1633 * fair server because things are not yet set. We need to improve
1634 * this before getting generic.
1636 if (!dl_server(dl_se)) {
1637 u64 runtime = 50 * NSEC_PER_MSEC;
1638 u64 period = 1000 * NSEC_PER_MSEC;
1640 dl_server_apply_params(dl_se, runtime, period, 1);
1642 dl_se->dl_server = 1;
1643 dl_se->dl_defer = 1;
1644 setup_new_dl_entity(dl_se);
1647 if (!dl_se->dl_runtime)
1648 return;
1650 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
1651 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
1652 resched_curr(dl_se->rq);
1655 void dl_server_stop(struct sched_dl_entity *dl_se)
1657 if (!dl_se->dl_runtime)
1658 return;
1660 dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
1661 hrtimer_try_to_cancel(&dl_se->dl_timer);
1662 dl_se->dl_defer_armed = 0;
1663 dl_se->dl_throttled = 0;
1666 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
1667 dl_server_has_tasks_f has_tasks,
1668 dl_server_pick_f pick_task)
1670 dl_se->rq = rq;
1671 dl_se->server_has_tasks = has_tasks;
1672 dl_se->server_pick_task = pick_task;
1675 void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq)
1677 u64 new_bw = dl_se->dl_bw;
1678 int cpu = cpu_of(rq);
1679 struct dl_bw *dl_b;
1681 dl_b = dl_bw_of(cpu_of(rq));
1682 guard(raw_spinlock)(&dl_b->lock);
1684 if (!dl_bw_cpus(cpu))
1685 return;
1687 __dl_add(dl_b, new_bw, dl_bw_cpus(cpu));
1690 int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init)
1692 u64 old_bw = init ? 0 : to_ratio(dl_se->dl_period, dl_se->dl_runtime);
1693 u64 new_bw = to_ratio(period, runtime);
1694 struct rq *rq = dl_se->rq;
1695 int cpu = cpu_of(rq);
1696 struct dl_bw *dl_b;
1697 unsigned long cap;
1698 int retval = 0;
1699 int cpus;
1701 dl_b = dl_bw_of(cpu);
1702 guard(raw_spinlock)(&dl_b->lock);
1704 cpus = dl_bw_cpus(cpu);
1705 cap = dl_bw_capacity(cpu);
1707 if (__dl_overflow(dl_b, cap, old_bw, new_bw))
1708 return -EBUSY;
1710 if (init) {
1711 __add_rq_bw(new_bw, &rq->dl);
1712 __dl_add(dl_b, new_bw, cpus);
1713 } else {
1714 __dl_sub(dl_b, dl_se->dl_bw, cpus);
1715 __dl_add(dl_b, new_bw, cpus);
1717 dl_rq_change_utilization(rq, dl_se, new_bw);
1720 dl_se->dl_runtime = runtime;
1721 dl_se->dl_deadline = period;
1722 dl_se->dl_period = period;
1724 dl_se->runtime = 0;
1725 dl_se->deadline = 0;
1727 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
1728 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
1730 return retval;
1734 * Update the current task's runtime statistics (provided it is still
1735 * a -deadline task and has not been removed from the dl_rq).
1737 static void update_curr_dl(struct rq *rq)
1739 struct task_struct *donor = rq->donor;
1740 struct sched_dl_entity *dl_se = &donor->dl;
1741 s64 delta_exec;
1743 if (!dl_task(donor) || !on_dl_rq(dl_se))
1744 return;
1747 * Consumed budget is computed considering the time as
1748 * observed by schedulable tasks (excluding time spent
1749 * in hardirq context, etc.). Deadlines are instead
1750 * computed using hard walltime. This seems to be the more
1751 * natural solution, but the full ramifications of this
1752 * approach need further study.
1754 delta_exec = update_curr_common(rq);
1755 update_curr_dl_se(rq, dl_se, delta_exec);
1758 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1760 struct sched_dl_entity *dl_se = container_of(timer,
1761 struct sched_dl_entity,
1762 inactive_timer);
1763 struct task_struct *p = NULL;
1764 struct rq_flags rf;
1765 struct rq *rq;
1767 if (!dl_server(dl_se)) {
1768 p = dl_task_of(dl_se);
1769 rq = task_rq_lock(p, &rf);
1770 } else {
1771 rq = dl_se->rq;
1772 rq_lock(rq, &rf);
1775 sched_clock_tick();
1776 update_rq_clock(rq);
1778 if (dl_server(dl_se))
1779 goto no_task;
1781 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1782 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1784 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1785 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1786 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1787 dl_se->dl_non_contending = 0;
1790 raw_spin_lock(&dl_b->lock);
1791 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1792 raw_spin_unlock(&dl_b->lock);
1793 __dl_clear_params(dl_se);
1795 goto unlock;
1798 no_task:
1799 if (dl_se->dl_non_contending == 0)
1800 goto unlock;
1802 sub_running_bw(dl_se, &rq->dl);
1803 dl_se->dl_non_contending = 0;
1804 unlock:
1806 if (!dl_server(dl_se)) {
1807 task_rq_unlock(rq, p, &rf);
1808 put_task_struct(p);
1809 } else {
1810 rq_unlock(rq, &rf);
1813 return HRTIMER_NORESTART;
1816 static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1818 struct hrtimer *timer = &dl_se->inactive_timer;
1820 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1821 timer->function = inactive_task_timer;
1824 #define __node_2_dle(node) \
1825 rb_entry((node), struct sched_dl_entity, rb_node)
1827 #ifdef CONFIG_SMP
1829 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1831 struct rq *rq = rq_of_dl_rq(dl_rq);
1833 if (dl_rq->earliest_dl.curr == 0 ||
1834 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1835 if (dl_rq->earliest_dl.curr == 0)
1836 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1837 dl_rq->earliest_dl.curr = deadline;
1838 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1842 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1844 struct rq *rq = rq_of_dl_rq(dl_rq);
1847 * Since we may have removed our earliest (and/or next earliest)
1848 * task we must recompute them.
1850 if (!dl_rq->dl_nr_running) {
1851 dl_rq->earliest_dl.curr = 0;
1852 dl_rq->earliest_dl.next = 0;
1853 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1854 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1855 } else {
1856 struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
1857 struct sched_dl_entity *entry = __node_2_dle(leftmost);
1859 dl_rq->earliest_dl.curr = entry->deadline;
1860 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1864 #else
1866 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1867 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1869 #endif /* CONFIG_SMP */
1871 static inline
1872 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1874 u64 deadline = dl_se->deadline;
1876 dl_rq->dl_nr_running++;
1877 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1879 inc_dl_deadline(dl_rq, deadline);
1882 static inline
1883 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1885 WARN_ON(!dl_rq->dl_nr_running);
1886 dl_rq->dl_nr_running--;
1887 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1889 dec_dl_deadline(dl_rq, dl_se->deadline);
1892 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1894 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1897 static __always_inline struct sched_statistics *
1898 __schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1900 if (!schedstat_enabled())
1901 return NULL;
1903 if (dl_server(dl_se))
1904 return NULL;
1906 return &dl_task_of(dl_se)->stats;
1909 static inline void
1910 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1912 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
1913 if (stats)
1914 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1917 static inline void
1918 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1920 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
1921 if (stats)
1922 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1925 static inline void
1926 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1928 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
1929 if (stats)
1930 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1933 static inline void
1934 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1935 int flags)
1937 if (!schedstat_enabled())
1938 return;
1940 if (flags & ENQUEUE_WAKEUP)
1941 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1944 static inline void
1945 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1946 int flags)
1948 struct task_struct *p = dl_task_of(dl_se);
1950 if (!schedstat_enabled())
1951 return;
1953 if ((flags & DEQUEUE_SLEEP)) {
1954 unsigned int state;
1956 state = READ_ONCE(p->__state);
1957 if (state & TASK_INTERRUPTIBLE)
1958 __schedstat_set(p->stats.sleep_start,
1959 rq_clock(rq_of_dl_rq(dl_rq)));
1961 if (state & TASK_UNINTERRUPTIBLE)
1962 __schedstat_set(p->stats.block_start,
1963 rq_clock(rq_of_dl_rq(dl_rq)));
1967 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1969 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1971 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
1973 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1975 inc_dl_tasks(dl_se, dl_rq);
1978 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1980 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1982 if (RB_EMPTY_NODE(&dl_se->rb_node))
1983 return;
1985 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1987 RB_CLEAR_NODE(&dl_se->rb_node);
1989 dec_dl_tasks(dl_se, dl_rq);
1992 static void
1993 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1995 WARN_ON_ONCE(on_dl_rq(dl_se));
1997 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
2000 * Check if a constrained deadline task was activated
2001 * after the deadline but before the next period.
2002 * If that is the case, the task will be throttled and
2003 * the replenishment timer will be set to the next period.
2005 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
2006 dl_check_constrained_dl(dl_se);
2008 if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
2009 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2011 add_rq_bw(dl_se, dl_rq);
2012 add_running_bw(dl_se, dl_rq);
2016 * If p is throttled, we do not enqueue it. In fact, if it exhausted
2017 * its budget it needs a replenishment and, since it now is on
2018 * its rq, the bandwidth timer callback (which clearly has not
2019 * run yet) will take care of this.
2020 * However, the active utilization does not depend on the fact
2021 * that the task is on the runqueue or not (but depends on the
2022 * task's state - in GRUB parlance, "inactive" vs "active contending").
2023 * In other words, even if a task is throttled its utilization must
2024 * be counted in the active utilization; hence, we need to call
2025 * add_running_bw().
2027 if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
2028 if (flags & ENQUEUE_WAKEUP)
2029 task_contending(dl_se, flags);
2031 return;
2035 * If this is a wakeup or a new instance, the scheduling
2036 * parameters of the task might need updating. Otherwise,
2037 * we want a replenishment of its runtime.
2039 if (flags & ENQUEUE_WAKEUP) {
2040 task_contending(dl_se, flags);
2041 update_dl_entity(dl_se);
2042 } else if (flags & ENQUEUE_REPLENISH) {
2043 replenish_dl_entity(dl_se);
2044 } else if ((flags & ENQUEUE_RESTORE) &&
2045 dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
2046 setup_new_dl_entity(dl_se);
2050 * If the reservation is still throttled, e.g., it got replenished but is a
2051 * deferred task and still got to wait, don't enqueue.
2053 if (dl_se->dl_throttled && start_dl_timer(dl_se))
2054 return;
2057 * We're about to enqueue, make sure we're not ->dl_throttled!
2058 * In case the timer was not started, say because the defer time
2059 * has passed, mark as not throttled and mark unarmed.
2060 * Also cancel earlier timers, since letting those run is pointless.
2062 if (dl_se->dl_throttled) {
2063 hrtimer_try_to_cancel(&dl_se->dl_timer);
2064 dl_se->dl_defer_armed = 0;
2065 dl_se->dl_throttled = 0;
2068 __enqueue_dl_entity(dl_se);
2071 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
2073 __dequeue_dl_entity(dl_se);
2075 if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
2076 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2078 sub_running_bw(dl_se, dl_rq);
2079 sub_rq_bw(dl_se, dl_rq);
2083 * This check allows to start the inactive timer (or to immediately
2084 * decrease the active utilization, if needed) in two cases:
2085 * when the task blocks and when it is terminating
2086 * (p->state == TASK_DEAD). We can handle the two cases in the same
2087 * way, because from GRUB's point of view the same thing is happening
2088 * (the task moves from "active contending" to "active non contending"
2089 * or "inactive")
2091 if (flags & DEQUEUE_SLEEP)
2092 task_non_contending(dl_se);
2095 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2097 if (is_dl_boosted(&p->dl)) {
2099 * Because of delays in the detection of the overrun of a
2100 * thread's runtime, it might be the case that a thread
2101 * goes to sleep in a rt mutex with negative runtime. As
2102 * a consequence, the thread will be throttled.
2104 * While waiting for the mutex, this thread can also be
2105 * boosted via PI, resulting in a thread that is throttled
2106 * and boosted at the same time.
2108 * In this case, the boost overrides the throttle.
2110 if (p->dl.dl_throttled) {
2112 * The replenish timer needs to be canceled. No
2113 * problem if it fires concurrently: boosted threads
2114 * are ignored in dl_task_timer().
2116 * If the timer callback was running (hrtimer_try_to_cancel == -1),
2117 * it will eventually call put_task_struct().
2119 if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 &&
2120 !dl_server(&p->dl))
2121 put_task_struct(p);
2122 p->dl.dl_throttled = 0;
2124 } else if (!dl_prio(p->normal_prio)) {
2126 * Special case in which we have a !SCHED_DEADLINE task that is going
2127 * to be deboosted, but exceeds its runtime while doing so. No point in
2128 * replenishing it, as it's going to return back to its original
2129 * scheduling class after this. If it has been throttled, we need to
2130 * clear the flag, otherwise the task may wake up as throttled after
2131 * being boosted again with no means to replenish the runtime and clear
2132 * the throttle.
2134 p->dl.dl_throttled = 0;
2135 if (!(flags & ENQUEUE_REPLENISH))
2136 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
2137 task_pid_nr(p));
2139 return;
2142 check_schedstat_required();
2143 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
2145 if (p->on_rq == TASK_ON_RQ_MIGRATING)
2146 flags |= ENQUEUE_MIGRATING;
2148 enqueue_dl_entity(&p->dl, flags);
2150 if (dl_server(&p->dl))
2151 return;
2153 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
2154 enqueue_pushable_dl_task(rq, p);
2157 static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2159 update_curr_dl(rq);
2161 if (p->on_rq == TASK_ON_RQ_MIGRATING)
2162 flags |= DEQUEUE_MIGRATING;
2164 dequeue_dl_entity(&p->dl, flags);
2165 if (!p->dl.dl_throttled && !dl_server(&p->dl))
2166 dequeue_pushable_dl_task(rq, p);
2168 return true;
2172 * Yield task semantic for -deadline tasks is:
2174 * get off from the CPU until our next instance, with
2175 * a new runtime. This is of little use now, since we
2176 * don't have a bandwidth reclaiming mechanism. Anyway,
2177 * bandwidth reclaiming is planned for the future, and
2178 * yield_task_dl will indicate that some spare budget
2179 * is available for other task instances to use it.
2181 static void yield_task_dl(struct rq *rq)
2184 * We make the task go to sleep until its current deadline by
2185 * forcing its runtime to zero. This way, update_curr_dl() stops
2186 * it and the bandwidth timer will wake it up and will give it
2187 * new scheduling parameters (thanks to dl_yielded=1).
2189 rq->curr->dl.dl_yielded = 1;
2191 update_rq_clock(rq);
2192 update_curr_dl(rq);
2194 * Tell update_rq_clock() that we've just updated,
2195 * so we don't do microscopic update in schedule()
2196 * and double the fastpath cost.
2198 rq_clock_skip_update(rq);
2201 #ifdef CONFIG_SMP
2203 static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
2204 struct rq *rq)
2206 return (!rq->dl.dl_nr_running ||
2207 dl_time_before(p->dl.deadline,
2208 rq->dl.earliest_dl.curr));
2211 static int find_later_rq(struct task_struct *task);
2213 static int
2214 select_task_rq_dl(struct task_struct *p, int cpu, int flags)
2216 struct task_struct *curr, *donor;
2217 bool select_rq;
2218 struct rq *rq;
2220 if (!(flags & WF_TTWU))
2221 goto out;
2223 rq = cpu_rq(cpu);
2225 rcu_read_lock();
2226 curr = READ_ONCE(rq->curr); /* unlocked access */
2227 donor = READ_ONCE(rq->donor);
2230 * If we are dealing with a -deadline task, we must
2231 * decide where to wake it up.
2232 * If it has a later deadline and the current task
2233 * on this rq can't move (provided the waking task
2234 * can!) we prefer to send it somewhere else. On the
2235 * other hand, if it has a shorter deadline, we
2236 * try to make it stay here, it might be important.
2238 select_rq = unlikely(dl_task(donor)) &&
2239 (curr->nr_cpus_allowed < 2 ||
2240 !dl_entity_preempt(&p->dl, &donor->dl)) &&
2241 p->nr_cpus_allowed > 1;
2244 * Take the capacity of the CPU into account to
2245 * ensure it fits the requirement of the task.
2247 if (sched_asym_cpucap_active())
2248 select_rq |= !dl_task_fits_capacity(p, cpu);
2250 if (select_rq) {
2251 int target = find_later_rq(p);
2253 if (target != -1 &&
2254 dl_task_is_earliest_deadline(p, cpu_rq(target)))
2255 cpu = target;
2257 rcu_read_unlock();
2259 out:
2260 return cpu;
2263 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
2265 struct rq_flags rf;
2266 struct rq *rq;
2268 if (READ_ONCE(p->__state) != TASK_WAKING)
2269 return;
2271 rq = task_rq(p);
2273 * Since p->state == TASK_WAKING, set_task_cpu() has been called
2274 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
2275 * rq->lock is not... So, lock it
2277 rq_lock(rq, &rf);
2278 if (p->dl.dl_non_contending) {
2279 update_rq_clock(rq);
2280 sub_running_bw(&p->dl, &rq->dl);
2281 p->dl.dl_non_contending = 0;
2283 * If the timer handler is currently running and the
2284 * timer cannot be canceled, inactive_task_timer()
2285 * will see that dl_not_contending is not set, and
2286 * will not touch the rq's active utilization,
2287 * so we are still safe.
2289 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2290 put_task_struct(p);
2292 sub_rq_bw(&p->dl, &rq->dl);
2293 rq_unlock(rq, &rf);
2296 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
2299 * Current can't be migrated, useless to reschedule,
2300 * let's hope p can move out.
2302 if (rq->curr->nr_cpus_allowed == 1 ||
2303 !cpudl_find(&rq->rd->cpudl, rq->donor, NULL))
2304 return;
2307 * p is migratable, so let's not schedule it and
2308 * see if it is pushed or pulled somewhere else.
2310 if (p->nr_cpus_allowed != 1 &&
2311 cpudl_find(&rq->rd->cpudl, p, NULL))
2312 return;
2314 resched_curr(rq);
2317 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2319 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2321 * This is OK, because current is on_cpu, which avoids it being
2322 * picked for load-balance and preemption/IRQs are still
2323 * disabled avoiding further scheduler activity on it and we've
2324 * not yet started the picking loop.
2326 rq_unpin_lock(rq, rf);
2327 pull_dl_task(rq);
2328 rq_repin_lock(rq, rf);
2331 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2333 #endif /* CONFIG_SMP */
2336 * Only called when both the current and waking task are -deadline
2337 * tasks.
2339 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
2340 int flags)
2342 if (dl_entity_preempt(&p->dl, &rq->donor->dl)) {
2343 resched_curr(rq);
2344 return;
2347 #ifdef CONFIG_SMP
2349 * In the unlikely case current and p have the same deadline
2350 * let us try to decide what's the best thing to do...
2352 if ((p->dl.deadline == rq->donor->dl.deadline) &&
2353 !test_tsk_need_resched(rq->curr))
2354 check_preempt_equal_dl(rq, p);
2355 #endif /* CONFIG_SMP */
2358 #ifdef CONFIG_SCHED_HRTICK
2359 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2361 hrtick_start(rq, dl_se->runtime);
2363 #else /* !CONFIG_SCHED_HRTICK */
2364 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2367 #endif
2369 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
2371 struct sched_dl_entity *dl_se = &p->dl;
2372 struct dl_rq *dl_rq = &rq->dl;
2374 p->se.exec_start = rq_clock_task(rq);
2375 if (on_dl_rq(&p->dl))
2376 update_stats_wait_end_dl(dl_rq, dl_se);
2378 /* You can't push away the running task */
2379 dequeue_pushable_dl_task(rq, p);
2381 if (!first)
2382 return;
2384 if (rq->donor->sched_class != &dl_sched_class)
2385 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2387 deadline_queue_push_tasks(rq);
2389 if (hrtick_enabled_dl(rq))
2390 start_hrtick_dl(rq, &p->dl);
2393 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
2395 struct rb_node *left = rb_first_cached(&dl_rq->root);
2397 if (!left)
2398 return NULL;
2400 return __node_2_dle(left);
2404 * __pick_next_task_dl - Helper to pick the next -deadline task to run.
2405 * @rq: The runqueue to pick the next task from.
2407 static struct task_struct *__pick_task_dl(struct rq *rq)
2409 struct sched_dl_entity *dl_se;
2410 struct dl_rq *dl_rq = &rq->dl;
2411 struct task_struct *p;
2413 again:
2414 if (!sched_dl_runnable(rq))
2415 return NULL;
2417 dl_se = pick_next_dl_entity(dl_rq);
2418 WARN_ON_ONCE(!dl_se);
2420 if (dl_server(dl_se)) {
2421 p = dl_se->server_pick_task(dl_se);
2422 if (!p) {
2423 dl_se->dl_yielded = 1;
2424 update_curr_dl_se(rq, dl_se, 0);
2425 goto again;
2427 rq->dl_server = dl_se;
2428 } else {
2429 p = dl_task_of(dl_se);
2432 return p;
2435 static struct task_struct *pick_task_dl(struct rq *rq)
2437 return __pick_task_dl(rq);
2440 static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
2442 struct sched_dl_entity *dl_se = &p->dl;
2443 struct dl_rq *dl_rq = &rq->dl;
2445 if (on_dl_rq(&p->dl))
2446 update_stats_wait_start_dl(dl_rq, dl_se);
2448 update_curr_dl(rq);
2450 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2451 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2452 enqueue_pushable_dl_task(rq, p);
2456 * scheduler tick hitting a task of our scheduling class.
2458 * NOTE: This function can be called remotely by the tick offload that
2459 * goes along full dynticks. Therefore no local assumption can be made
2460 * and everything must be accessed through the @rq and @curr passed in
2461 * parameters.
2463 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2465 update_curr_dl(rq);
2467 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2469 * Even when we have runtime, update_curr_dl() might have resulted in us
2470 * not being the leftmost task anymore. In that case NEED_RESCHED will
2471 * be set and schedule() will start a new hrtick for the next task.
2473 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2474 is_leftmost(&p->dl, &rq->dl))
2475 start_hrtick_dl(rq, &p->dl);
2478 static void task_fork_dl(struct task_struct *p)
2481 * SCHED_DEADLINE tasks cannot fork and this is achieved through
2482 * sched_fork()
2486 #ifdef CONFIG_SMP
2488 /* Only try algorithms three times */
2489 #define DL_MAX_TRIES 3
2492 * Return the earliest pushable rq's task, which is suitable to be executed
2493 * on the CPU, NULL otherwise:
2495 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2497 struct task_struct *p = NULL;
2498 struct rb_node *next_node;
2500 if (!has_pushable_dl_tasks(rq))
2501 return NULL;
2503 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2505 next_node:
2506 if (next_node) {
2507 p = __node_2_pdl(next_node);
2509 if (task_is_pushable(rq, p, cpu))
2510 return p;
2512 next_node = rb_next(next_node);
2513 goto next_node;
2516 return NULL;
2519 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2521 static int find_later_rq(struct task_struct *task)
2523 struct sched_domain *sd;
2524 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2525 int this_cpu = smp_processor_id();
2526 int cpu = task_cpu(task);
2528 /* Make sure the mask is initialized first */
2529 if (unlikely(!later_mask))
2530 return -1;
2532 if (task->nr_cpus_allowed == 1)
2533 return -1;
2536 * We have to consider system topology and task affinity
2537 * first, then we can look for a suitable CPU.
2539 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2540 return -1;
2543 * If we are here, some targets have been found, including
2544 * the most suitable which is, among the runqueues where the
2545 * current tasks have later deadlines than the task's one, the
2546 * rq with the latest possible one.
2548 * Now we check how well this matches with task's
2549 * affinity and system topology.
2551 * The last CPU where the task run is our first
2552 * guess, since it is most likely cache-hot there.
2554 if (cpumask_test_cpu(cpu, later_mask))
2555 return cpu;
2557 * Check if this_cpu is to be skipped (i.e., it is
2558 * not in the mask) or not.
2560 if (!cpumask_test_cpu(this_cpu, later_mask))
2561 this_cpu = -1;
2563 rcu_read_lock();
2564 for_each_domain(cpu, sd) {
2565 if (sd->flags & SD_WAKE_AFFINE) {
2566 int best_cpu;
2569 * If possible, preempting this_cpu is
2570 * cheaper than migrating.
2572 if (this_cpu != -1 &&
2573 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2574 rcu_read_unlock();
2575 return this_cpu;
2578 best_cpu = cpumask_any_and_distribute(later_mask,
2579 sched_domain_span(sd));
2581 * Last chance: if a CPU being in both later_mask
2582 * and current sd span is valid, that becomes our
2583 * choice. Of course, the latest possible CPU is
2584 * already under consideration through later_mask.
2586 if (best_cpu < nr_cpu_ids) {
2587 rcu_read_unlock();
2588 return best_cpu;
2592 rcu_read_unlock();
2595 * At this point, all our guesses failed, we just return
2596 * 'something', and let the caller sort the things out.
2598 if (this_cpu != -1)
2599 return this_cpu;
2601 cpu = cpumask_any_distribute(later_mask);
2602 if (cpu < nr_cpu_ids)
2603 return cpu;
2605 return -1;
2608 /* Locks the rq it finds */
2609 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2611 struct rq *later_rq = NULL;
2612 int tries;
2613 int cpu;
2615 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2616 cpu = find_later_rq(task);
2618 if ((cpu == -1) || (cpu == rq->cpu))
2619 break;
2621 later_rq = cpu_rq(cpu);
2623 if (!dl_task_is_earliest_deadline(task, later_rq)) {
2625 * Target rq has tasks of equal or earlier deadline,
2626 * retrying does not release any lock and is unlikely
2627 * to yield a different result.
2629 later_rq = NULL;
2630 break;
2633 /* Retry if something changed. */
2634 if (double_lock_balance(rq, later_rq)) {
2635 if (unlikely(task_rq(task) != rq ||
2636 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2637 task_on_cpu(rq, task) ||
2638 !dl_task(task) ||
2639 is_migration_disabled(task) ||
2640 !task_on_rq_queued(task))) {
2641 double_unlock_balance(rq, later_rq);
2642 later_rq = NULL;
2643 break;
2648 * If the rq we found has no -deadline task, or
2649 * its earliest one has a later deadline than our
2650 * task, the rq is a good one.
2652 if (dl_task_is_earliest_deadline(task, later_rq))
2653 break;
2655 /* Otherwise we try again. */
2656 double_unlock_balance(rq, later_rq);
2657 later_rq = NULL;
2660 return later_rq;
2663 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2665 struct task_struct *p;
2667 if (!has_pushable_dl_tasks(rq))
2668 return NULL;
2670 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2672 WARN_ON_ONCE(rq->cpu != task_cpu(p));
2673 WARN_ON_ONCE(task_current(rq, p));
2674 WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
2676 WARN_ON_ONCE(!task_on_rq_queued(p));
2677 WARN_ON_ONCE(!dl_task(p));
2679 return p;
2683 * See if the non running -deadline tasks on this rq
2684 * can be sent to some other CPU where they can preempt
2685 * and start executing.
2687 static int push_dl_task(struct rq *rq)
2689 struct task_struct *next_task;
2690 struct rq *later_rq;
2691 int ret = 0;
2693 next_task = pick_next_pushable_dl_task(rq);
2694 if (!next_task)
2695 return 0;
2697 retry:
2699 * If next_task preempts rq->curr, and rq->curr
2700 * can move away, it makes sense to just reschedule
2701 * without going further in pushing next_task.
2703 if (dl_task(rq->donor) &&
2704 dl_time_before(next_task->dl.deadline, rq->donor->dl.deadline) &&
2705 rq->curr->nr_cpus_allowed > 1) {
2706 resched_curr(rq);
2707 return 0;
2710 if (is_migration_disabled(next_task))
2711 return 0;
2713 if (WARN_ON(next_task == rq->curr))
2714 return 0;
2716 /* We might release rq lock */
2717 get_task_struct(next_task);
2719 /* Will lock the rq it'll find */
2720 later_rq = find_lock_later_rq(next_task, rq);
2721 if (!later_rq) {
2722 struct task_struct *task;
2725 * We must check all this again, since
2726 * find_lock_later_rq releases rq->lock and it is
2727 * then possible that next_task has migrated.
2729 task = pick_next_pushable_dl_task(rq);
2730 if (task == next_task) {
2732 * The task is still there. We don't try
2733 * again, some other CPU will pull it when ready.
2735 goto out;
2738 if (!task)
2739 /* No more tasks */
2740 goto out;
2742 put_task_struct(next_task);
2743 next_task = task;
2744 goto retry;
2747 move_queued_task_locked(rq, later_rq, next_task);
2748 ret = 1;
2750 resched_curr(later_rq);
2752 double_unlock_balance(rq, later_rq);
2754 out:
2755 put_task_struct(next_task);
2757 return ret;
2760 static void push_dl_tasks(struct rq *rq)
2762 /* push_dl_task() will return true if it moved a -deadline task */
2763 while (push_dl_task(rq))
2767 static void pull_dl_task(struct rq *this_rq)
2769 int this_cpu = this_rq->cpu, cpu;
2770 struct task_struct *p, *push_task;
2771 bool resched = false;
2772 struct rq *src_rq;
2773 u64 dmin = LONG_MAX;
2775 if (likely(!dl_overloaded(this_rq)))
2776 return;
2779 * Match the barrier from dl_set_overloaded; this guarantees that if we
2780 * see overloaded we must also see the dlo_mask bit.
2782 smp_rmb();
2784 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2785 if (this_cpu == cpu)
2786 continue;
2788 src_rq = cpu_rq(cpu);
2791 * It looks racy, and it is! However, as in sched_rt.c,
2792 * we are fine with this.
2794 if (this_rq->dl.dl_nr_running &&
2795 dl_time_before(this_rq->dl.earliest_dl.curr,
2796 src_rq->dl.earliest_dl.next))
2797 continue;
2799 /* Might drop this_rq->lock */
2800 push_task = NULL;
2801 double_lock_balance(this_rq, src_rq);
2804 * If there are no more pullable tasks on the
2805 * rq, we're done with it.
2807 if (src_rq->dl.dl_nr_running <= 1)
2808 goto skip;
2810 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2813 * We found a task to be pulled if:
2814 * - it preempts our current (if there's one),
2815 * - it will preempt the last one we pulled (if any).
2817 if (p && dl_time_before(p->dl.deadline, dmin) &&
2818 dl_task_is_earliest_deadline(p, this_rq)) {
2819 WARN_ON(p == src_rq->curr);
2820 WARN_ON(!task_on_rq_queued(p));
2823 * Then we pull iff p has actually an earlier
2824 * deadline than the current task of its runqueue.
2826 if (dl_time_before(p->dl.deadline,
2827 src_rq->donor->dl.deadline))
2828 goto skip;
2830 if (is_migration_disabled(p)) {
2831 push_task = get_push_task(src_rq);
2832 } else {
2833 move_queued_task_locked(src_rq, this_rq, p);
2834 dmin = p->dl.deadline;
2835 resched = true;
2838 /* Is there any other task even earlier? */
2840 skip:
2841 double_unlock_balance(this_rq, src_rq);
2843 if (push_task) {
2844 preempt_disable();
2845 raw_spin_rq_unlock(this_rq);
2846 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2847 push_task, &src_rq->push_work);
2848 preempt_enable();
2849 raw_spin_rq_lock(this_rq);
2853 if (resched)
2854 resched_curr(this_rq);
2858 * Since the task is not running and a reschedule is not going to happen
2859 * anytime soon on its runqueue, we try pushing it away now.
2861 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2863 if (!task_on_cpu(rq, p) &&
2864 !test_tsk_need_resched(rq->curr) &&
2865 p->nr_cpus_allowed > 1 &&
2866 dl_task(rq->donor) &&
2867 (rq->curr->nr_cpus_allowed < 2 ||
2868 !dl_entity_preempt(&p->dl, &rq->donor->dl))) {
2869 push_dl_tasks(rq);
2873 static void set_cpus_allowed_dl(struct task_struct *p,
2874 struct affinity_context *ctx)
2876 struct root_domain *src_rd;
2877 struct rq *rq;
2879 WARN_ON_ONCE(!dl_task(p));
2881 rq = task_rq(p);
2882 src_rd = rq->rd;
2884 * Migrating a SCHED_DEADLINE task between exclusive
2885 * cpusets (different root_domains) entails a bandwidth
2886 * update. We already made space for us in the destination
2887 * domain (see cpuset_can_attach()).
2889 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
2890 struct dl_bw *src_dl_b;
2892 src_dl_b = dl_bw_of(cpu_of(rq));
2894 * We now free resources of the root_domain we are migrating
2895 * off. In the worst case, sched_setattr() may temporary fail
2896 * until we complete the update.
2898 raw_spin_lock(&src_dl_b->lock);
2899 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2900 raw_spin_unlock(&src_dl_b->lock);
2903 set_cpus_allowed_common(p, ctx);
2906 /* Assumes rq->lock is held */
2907 static void rq_online_dl(struct rq *rq)
2909 if (rq->dl.overloaded)
2910 dl_set_overload(rq);
2912 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2913 if (rq->dl.dl_nr_running > 0)
2914 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2917 /* Assumes rq->lock is held */
2918 static void rq_offline_dl(struct rq *rq)
2920 if (rq->dl.overloaded)
2921 dl_clear_overload(rq);
2923 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2924 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2927 void __init init_sched_dl_class(void)
2929 unsigned int i;
2931 for_each_possible_cpu(i)
2932 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2933 GFP_KERNEL, cpu_to_node(i));
2936 void dl_add_task_root_domain(struct task_struct *p)
2938 struct rq_flags rf;
2939 struct rq *rq;
2940 struct dl_bw *dl_b;
2942 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2943 if (!dl_task(p)) {
2944 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2945 return;
2948 rq = __task_rq_lock(p, &rf);
2950 dl_b = &rq->rd->dl_bw;
2951 raw_spin_lock(&dl_b->lock);
2953 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2955 raw_spin_unlock(&dl_b->lock);
2957 task_rq_unlock(rq, p, &rf);
2960 void dl_clear_root_domain(struct root_domain *rd)
2962 unsigned long flags;
2964 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2965 rd->dl_bw.total_bw = 0;
2966 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2969 #endif /* CONFIG_SMP */
2971 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2974 * task_non_contending() can start the "inactive timer" (if the 0-lag
2975 * time is in the future). If the task switches back to dl before
2976 * the "inactive timer" fires, it can continue to consume its current
2977 * runtime using its current deadline. If it stays outside of
2978 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2979 * will reset the task parameters.
2981 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2982 task_non_contending(&p->dl);
2985 * In case a task is setscheduled out from SCHED_DEADLINE we need to
2986 * keep track of that on its cpuset (for correct bandwidth tracking).
2988 dec_dl_tasks_cs(p);
2990 if (!task_on_rq_queued(p)) {
2992 * Inactive timer is armed. However, p is leaving DEADLINE and
2993 * might migrate away from this rq while continuing to run on
2994 * some other class. We need to remove its contribution from
2995 * this rq running_bw now, or sub_rq_bw (below) will complain.
2997 if (p->dl.dl_non_contending)
2998 sub_running_bw(&p->dl, &rq->dl);
2999 sub_rq_bw(&p->dl, &rq->dl);
3003 * We cannot use inactive_task_timer() to invoke sub_running_bw()
3004 * at the 0-lag time, because the task could have been migrated
3005 * while SCHED_OTHER in the meanwhile.
3007 if (p->dl.dl_non_contending)
3008 p->dl.dl_non_contending = 0;
3011 * Since this might be the only -deadline task on the rq,
3012 * this is the right place to try to pull some other one
3013 * from an overloaded CPU, if any.
3015 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
3016 return;
3018 deadline_queue_pull_task(rq);
3022 * When switching to -deadline, we may overload the rq, then
3023 * we try to push someone off, if possible.
3025 static void switched_to_dl(struct rq *rq, struct task_struct *p)
3027 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
3028 put_task_struct(p);
3031 * In case a task is setscheduled to SCHED_DEADLINE we need to keep
3032 * track of that on its cpuset (for correct bandwidth tracking).
3034 inc_dl_tasks_cs(p);
3036 /* If p is not queued we will update its parameters at next wakeup. */
3037 if (!task_on_rq_queued(p)) {
3038 add_rq_bw(&p->dl, &rq->dl);
3040 return;
3043 if (rq->donor != p) {
3044 #ifdef CONFIG_SMP
3045 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
3046 deadline_queue_push_tasks(rq);
3047 #endif
3048 if (dl_task(rq->donor))
3049 wakeup_preempt_dl(rq, p, 0);
3050 else
3051 resched_curr(rq);
3052 } else {
3053 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
3058 * If the scheduling parameters of a -deadline task changed,
3059 * a push or pull operation might be needed.
3061 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
3062 int oldprio)
3064 if (!task_on_rq_queued(p))
3065 return;
3067 #ifdef CONFIG_SMP
3069 * This might be too much, but unfortunately
3070 * we don't have the old deadline value, and
3071 * we can't argue if the task is increasing
3072 * or lowering its prio, so...
3074 if (!rq->dl.overloaded)
3075 deadline_queue_pull_task(rq);
3077 if (task_current_donor(rq, p)) {
3079 * If we now have a earlier deadline task than p,
3080 * then reschedule, provided p is still on this
3081 * runqueue.
3083 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
3084 resched_curr(rq);
3085 } else {
3087 * Current may not be deadline in case p was throttled but we
3088 * have just replenished it (e.g. rt_mutex_setprio()).
3090 * Otherwise, if p was given an earlier deadline, reschedule.
3092 if (!dl_task(rq->curr) ||
3093 dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
3094 resched_curr(rq);
3096 #else
3098 * We don't know if p has a earlier or later deadline, so let's blindly
3099 * set a (maybe not needed) rescheduling point.
3101 resched_curr(rq);
3102 #endif
3105 #ifdef CONFIG_SCHED_CORE
3106 static int task_is_throttled_dl(struct task_struct *p, int cpu)
3108 return p->dl.dl_throttled;
3110 #endif
3112 DEFINE_SCHED_CLASS(dl) = {
3114 .enqueue_task = enqueue_task_dl,
3115 .dequeue_task = dequeue_task_dl,
3116 .yield_task = yield_task_dl,
3118 .wakeup_preempt = wakeup_preempt_dl,
3120 .pick_task = pick_task_dl,
3121 .put_prev_task = put_prev_task_dl,
3122 .set_next_task = set_next_task_dl,
3124 #ifdef CONFIG_SMP
3125 .balance = balance_dl,
3126 .select_task_rq = select_task_rq_dl,
3127 .migrate_task_rq = migrate_task_rq_dl,
3128 .set_cpus_allowed = set_cpus_allowed_dl,
3129 .rq_online = rq_online_dl,
3130 .rq_offline = rq_offline_dl,
3131 .task_woken = task_woken_dl,
3132 .find_lock_rq = find_lock_later_rq,
3133 #endif
3135 .task_tick = task_tick_dl,
3136 .task_fork = task_fork_dl,
3138 .prio_changed = prio_changed_dl,
3139 .switched_from = switched_from_dl,
3140 .switched_to = switched_to_dl,
3142 .update_curr = update_curr_dl,
3143 #ifdef CONFIG_SCHED_CORE
3144 .task_is_throttled = task_is_throttled_dl,
3145 #endif
3148 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
3149 static u64 dl_generation;
3151 int sched_dl_global_validate(void)
3153 u64 runtime = global_rt_runtime();
3154 u64 period = global_rt_period();
3155 u64 new_bw = to_ratio(period, runtime);
3156 u64 gen = ++dl_generation;
3157 struct dl_bw *dl_b;
3158 int cpu, cpus, ret = 0;
3159 unsigned long flags;
3162 * Here we want to check the bandwidth not being set to some
3163 * value smaller than the currently allocated bandwidth in
3164 * any of the root_domains.
3166 for_each_possible_cpu(cpu) {
3167 rcu_read_lock_sched();
3169 if (dl_bw_visited(cpu, gen))
3170 goto next;
3172 dl_b = dl_bw_of(cpu);
3173 cpus = dl_bw_cpus(cpu);
3175 raw_spin_lock_irqsave(&dl_b->lock, flags);
3176 if (new_bw * cpus < dl_b->total_bw)
3177 ret = -EBUSY;
3178 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3180 next:
3181 rcu_read_unlock_sched();
3183 if (ret)
3184 break;
3187 return ret;
3190 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
3192 if (global_rt_runtime() == RUNTIME_INF) {
3193 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
3194 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
3195 } else {
3196 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
3197 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
3198 dl_rq->max_bw = dl_rq->extra_bw =
3199 to_ratio(global_rt_period(), global_rt_runtime());
3203 void sched_dl_do_global(void)
3205 u64 new_bw = -1;
3206 u64 gen = ++dl_generation;
3207 struct dl_bw *dl_b;
3208 int cpu;
3209 unsigned long flags;
3211 if (global_rt_runtime() != RUNTIME_INF)
3212 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
3214 for_each_possible_cpu(cpu) {
3215 rcu_read_lock_sched();
3217 if (dl_bw_visited(cpu, gen)) {
3218 rcu_read_unlock_sched();
3219 continue;
3222 dl_b = dl_bw_of(cpu);
3224 raw_spin_lock_irqsave(&dl_b->lock, flags);
3225 dl_b->bw = new_bw;
3226 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3228 rcu_read_unlock_sched();
3229 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
3234 * We must be sure that accepting a new task (or allowing changing the
3235 * parameters of an existing one) is consistent with the bandwidth
3236 * constraints. If yes, this function also accordingly updates the currently
3237 * allocated bandwidth to reflect the new situation.
3239 * This function is called while holding p's rq->lock.
3241 int sched_dl_overflow(struct task_struct *p, int policy,
3242 const struct sched_attr *attr)
3244 u64 period = attr->sched_period ?: attr->sched_deadline;
3245 u64 runtime = attr->sched_runtime;
3246 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
3247 int cpus, err = -1, cpu = task_cpu(p);
3248 struct dl_bw *dl_b = dl_bw_of(cpu);
3249 unsigned long cap;
3251 if (attr->sched_flags & SCHED_FLAG_SUGOV)
3252 return 0;
3254 /* !deadline task may carry old deadline bandwidth */
3255 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
3256 return 0;
3259 * Either if a task, enters, leave, or stays -deadline but changes
3260 * its parameters, we may need to update accordingly the total
3261 * allocated bandwidth of the container.
3263 raw_spin_lock(&dl_b->lock);
3264 cpus = dl_bw_cpus(cpu);
3265 cap = dl_bw_capacity(cpu);
3267 if (dl_policy(policy) && !task_has_dl_policy(p) &&
3268 !__dl_overflow(dl_b, cap, 0, new_bw)) {
3269 if (hrtimer_active(&p->dl.inactive_timer))
3270 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3271 __dl_add(dl_b, new_bw, cpus);
3272 err = 0;
3273 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
3274 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
3276 * XXX this is slightly incorrect: when the task
3277 * utilization decreases, we should delay the total
3278 * utilization change until the task's 0-lag point.
3279 * But this would require to set the task's "inactive
3280 * timer" when the task is not inactive.
3282 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3283 __dl_add(dl_b, new_bw, cpus);
3284 dl_change_utilization(p, new_bw);
3285 err = 0;
3286 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
3288 * Do not decrease the total deadline utilization here,
3289 * switched_from_dl() will take care to do it at the correct
3290 * (0-lag) time.
3292 err = 0;
3294 raw_spin_unlock(&dl_b->lock);
3296 return err;
3300 * This function initializes the sched_dl_entity of a newly becoming
3301 * SCHED_DEADLINE task.
3303 * Only the static values are considered here, the actual runtime and the
3304 * absolute deadline will be properly calculated when the task is enqueued
3305 * for the first time with its new policy.
3307 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3309 struct sched_dl_entity *dl_se = &p->dl;
3311 dl_se->dl_runtime = attr->sched_runtime;
3312 dl_se->dl_deadline = attr->sched_deadline;
3313 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3314 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
3315 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3316 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3319 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3321 struct sched_dl_entity *dl_se = &p->dl;
3323 attr->sched_priority = p->rt_priority;
3324 attr->sched_runtime = dl_se->dl_runtime;
3325 attr->sched_deadline = dl_se->dl_deadline;
3326 attr->sched_period = dl_se->dl_period;
3327 attr->sched_flags &= ~SCHED_DL_FLAGS;
3328 attr->sched_flags |= dl_se->flags;
3332 * This function validates the new parameters of a -deadline task.
3333 * We ask for the deadline not being zero, and greater or equal
3334 * than the runtime, as well as the period of being zero or
3335 * greater than deadline. Furthermore, we have to be sure that
3336 * user parameters are above the internal resolution of 1us (we
3337 * check sched_runtime only since it is always the smaller one) and
3338 * below 2^63 ns (we have to check both sched_deadline and
3339 * sched_period, as the latter can be zero).
3341 bool __checkparam_dl(const struct sched_attr *attr)
3343 u64 period, max, min;
3345 /* special dl tasks don't actually use any parameter */
3346 if (attr->sched_flags & SCHED_FLAG_SUGOV)
3347 return true;
3349 /* deadline != 0 */
3350 if (attr->sched_deadline == 0)
3351 return false;
3354 * Since we truncate DL_SCALE bits, make sure we're at least
3355 * that big.
3357 if (attr->sched_runtime < (1ULL << DL_SCALE))
3358 return false;
3361 * Since we use the MSB for wrap-around and sign issues, make
3362 * sure it's not set (mind that period can be equal to zero).
3364 if (attr->sched_deadline & (1ULL << 63) ||
3365 attr->sched_period & (1ULL << 63))
3366 return false;
3368 period = attr->sched_period;
3369 if (!period)
3370 period = attr->sched_deadline;
3372 /* runtime <= deadline <= period (if period != 0) */
3373 if (period < attr->sched_deadline ||
3374 attr->sched_deadline < attr->sched_runtime)
3375 return false;
3377 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
3378 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
3380 if (period < min || period > max)
3381 return false;
3383 return true;
3387 * This function clears the sched_dl_entity static params.
3389 static void __dl_clear_params(struct sched_dl_entity *dl_se)
3391 dl_se->dl_runtime = 0;
3392 dl_se->dl_deadline = 0;
3393 dl_se->dl_period = 0;
3394 dl_se->flags = 0;
3395 dl_se->dl_bw = 0;
3396 dl_se->dl_density = 0;
3398 dl_se->dl_throttled = 0;
3399 dl_se->dl_yielded = 0;
3400 dl_se->dl_non_contending = 0;
3401 dl_se->dl_overrun = 0;
3402 dl_se->dl_server = 0;
3404 #ifdef CONFIG_RT_MUTEXES
3405 dl_se->pi_se = dl_se;
3406 #endif
3409 void init_dl_entity(struct sched_dl_entity *dl_se)
3411 RB_CLEAR_NODE(&dl_se->rb_node);
3412 init_dl_task_timer(dl_se);
3413 init_dl_inactive_task_timer(dl_se);
3414 __dl_clear_params(dl_se);
3417 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3419 struct sched_dl_entity *dl_se = &p->dl;
3421 if (dl_se->dl_runtime != attr->sched_runtime ||
3422 dl_se->dl_deadline != attr->sched_deadline ||
3423 dl_se->dl_period != attr->sched_period ||
3424 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
3425 return true;
3427 return false;
3430 #ifdef CONFIG_SMP
3431 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3432 const struct cpumask *trial)
3434 unsigned long flags, cap;
3435 struct dl_bw *cur_dl_b;
3436 int ret = 1;
3438 rcu_read_lock_sched();
3439 cur_dl_b = dl_bw_of(cpumask_any(cur));
3440 cap = __dl_bw_capacity(trial);
3441 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3442 if (__dl_overflow(cur_dl_b, cap, 0, 0))
3443 ret = 0;
3444 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3445 rcu_read_unlock_sched();
3447 return ret;
3450 enum dl_bw_request {
3451 dl_bw_req_check_overflow = 0,
3452 dl_bw_req_alloc,
3453 dl_bw_req_free
3456 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
3458 unsigned long flags;
3459 struct dl_bw *dl_b;
3460 bool overflow = 0;
3462 rcu_read_lock_sched();
3463 dl_b = dl_bw_of(cpu);
3464 raw_spin_lock_irqsave(&dl_b->lock, flags);
3466 if (req == dl_bw_req_free) {
3467 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3468 } else {
3469 unsigned long cap = dl_bw_capacity(cpu);
3471 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3473 if (req == dl_bw_req_alloc && !overflow) {
3475 * We reserve space in the destination
3476 * root_domain, as we can't fail after this point.
3477 * We will free resources in the source root_domain
3478 * later on (see set_cpus_allowed_dl()).
3480 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3484 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3485 rcu_read_unlock_sched();
3487 return overflow ? -EBUSY : 0;
3490 int dl_bw_check_overflow(int cpu)
3492 return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
3495 int dl_bw_alloc(int cpu, u64 dl_bw)
3497 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3500 void dl_bw_free(int cpu, u64 dl_bw)
3502 dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3504 #endif
3506 #ifdef CONFIG_SCHED_DEBUG
3507 void print_dl_stats(struct seq_file *m, int cpu)
3509 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3511 #endif /* CONFIG_SCHED_DEBUG */