2 * Deadline Scheduling Class (SCHED_DEADLINE)
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
19 #include <linux/slab.h>
21 struct dl_bandwidth def_dl_bandwidth
;
23 static inline struct task_struct
*dl_task_of(struct sched_dl_entity
*dl_se
)
25 return container_of(dl_se
, struct task_struct
, dl
);
28 static inline struct rq
*rq_of_dl_rq(struct dl_rq
*dl_rq
)
30 return container_of(dl_rq
, struct rq
, dl
);
33 static inline struct dl_rq
*dl_rq_of_se(struct sched_dl_entity
*dl_se
)
35 struct task_struct
*p
= dl_task_of(dl_se
);
36 struct rq
*rq
= task_rq(p
);
41 static inline int on_dl_rq(struct sched_dl_entity
*dl_se
)
43 return !RB_EMPTY_NODE(&dl_se
->rb_node
);
46 static inline int is_leftmost(struct task_struct
*p
, struct dl_rq
*dl_rq
)
48 struct sched_dl_entity
*dl_se
= &p
->dl
;
50 return dl_rq
->rb_leftmost
== &dl_se
->rb_node
;
53 void init_dl_bandwidth(struct dl_bandwidth
*dl_b
, u64 period
, u64 runtime
)
55 raw_spin_lock_init(&dl_b
->dl_runtime_lock
);
56 dl_b
->dl_period
= period
;
57 dl_b
->dl_runtime
= runtime
;
60 void init_dl_bw(struct dl_bw
*dl_b
)
62 raw_spin_lock_init(&dl_b
->lock
);
63 raw_spin_lock(&def_dl_bandwidth
.dl_runtime_lock
);
64 if (global_rt_runtime() == RUNTIME_INF
)
67 dl_b
->bw
= to_ratio(global_rt_period(), global_rt_runtime());
68 raw_spin_unlock(&def_dl_bandwidth
.dl_runtime_lock
);
72 void init_dl_rq(struct dl_rq
*dl_rq
)
74 dl_rq
->rb_root
= RB_ROOT
;
77 /* zero means no -deadline tasks */
78 dl_rq
->earliest_dl
.curr
= dl_rq
->earliest_dl
.next
= 0;
80 dl_rq
->dl_nr_migratory
= 0;
81 dl_rq
->overloaded
= 0;
82 dl_rq
->pushable_dl_tasks_root
= RB_ROOT
;
84 init_dl_bw(&dl_rq
->dl_bw
);
90 static inline int dl_overloaded(struct rq
*rq
)
92 return atomic_read(&rq
->rd
->dlo_count
);
95 static inline void dl_set_overload(struct rq
*rq
)
100 cpumask_set_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
102 * Must be visible before the overload count is
103 * set (as in sched_rt.c).
105 * Matched by the barrier in pull_dl_task().
108 atomic_inc(&rq
->rd
->dlo_count
);
111 static inline void dl_clear_overload(struct rq
*rq
)
116 atomic_dec(&rq
->rd
->dlo_count
);
117 cpumask_clear_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
120 static void update_dl_migration(struct dl_rq
*dl_rq
)
122 if (dl_rq
->dl_nr_migratory
&& dl_rq
->dl_nr_running
> 1) {
123 if (!dl_rq
->overloaded
) {
124 dl_set_overload(rq_of_dl_rq(dl_rq
));
125 dl_rq
->overloaded
= 1;
127 } else if (dl_rq
->overloaded
) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq
));
129 dl_rq
->overloaded
= 0;
133 static void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
135 struct task_struct
*p
= dl_task_of(dl_se
);
137 if (tsk_nr_cpus_allowed(p
) > 1)
138 dl_rq
->dl_nr_migratory
++;
140 update_dl_migration(dl_rq
);
143 static void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
145 struct task_struct
*p
= dl_task_of(dl_se
);
147 if (tsk_nr_cpus_allowed(p
) > 1)
148 dl_rq
->dl_nr_migratory
--;
150 update_dl_migration(dl_rq
);
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
157 static void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
159 struct dl_rq
*dl_rq
= &rq
->dl
;
160 struct rb_node
**link
= &dl_rq
->pushable_dl_tasks_root
.rb_node
;
161 struct rb_node
*parent
= NULL
;
162 struct task_struct
*entry
;
165 BUG_ON(!RB_EMPTY_NODE(&p
->pushable_dl_tasks
));
169 entry
= rb_entry(parent
, struct task_struct
,
171 if (dl_entity_preempt(&p
->dl
, &entry
->dl
))
172 link
= &parent
->rb_left
;
174 link
= &parent
->rb_right
;
180 dl_rq
->pushable_dl_tasks_leftmost
= &p
->pushable_dl_tasks
;
181 dl_rq
->earliest_dl
.next
= p
->dl
.deadline
;
184 rb_link_node(&p
->pushable_dl_tasks
, parent
, link
);
185 rb_insert_color(&p
->pushable_dl_tasks
, &dl_rq
->pushable_dl_tasks_root
);
188 static void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
190 struct dl_rq
*dl_rq
= &rq
->dl
;
192 if (RB_EMPTY_NODE(&p
->pushable_dl_tasks
))
195 if (dl_rq
->pushable_dl_tasks_leftmost
== &p
->pushable_dl_tasks
) {
196 struct rb_node
*next_node
;
198 next_node
= rb_next(&p
->pushable_dl_tasks
);
199 dl_rq
->pushable_dl_tasks_leftmost
= next_node
;
201 dl_rq
->earliest_dl
.next
= rb_entry(next_node
,
202 struct task_struct
, pushable_dl_tasks
)->dl
.deadline
;
206 rb_erase(&p
->pushable_dl_tasks
, &dl_rq
->pushable_dl_tasks_root
);
207 RB_CLEAR_NODE(&p
->pushable_dl_tasks
);
210 static inline int has_pushable_dl_tasks(struct rq
*rq
)
212 return !RB_EMPTY_ROOT(&rq
->dl
.pushable_dl_tasks_root
);
215 static int push_dl_task(struct rq
*rq
);
217 static inline bool need_pull_dl_task(struct rq
*rq
, struct task_struct
*prev
)
219 return dl_task(prev
);
222 static DEFINE_PER_CPU(struct callback_head
, dl_push_head
);
223 static DEFINE_PER_CPU(struct callback_head
, dl_pull_head
);
225 static void push_dl_tasks(struct rq
*);
226 static void pull_dl_task(struct rq
*);
228 static inline void queue_push_tasks(struct rq
*rq
)
230 if (!has_pushable_dl_tasks(rq
))
233 queue_balance_callback(rq
, &per_cpu(dl_push_head
, rq
->cpu
), push_dl_tasks
);
236 static inline void queue_pull_task(struct rq
*rq
)
238 queue_balance_callback(rq
, &per_cpu(dl_pull_head
, rq
->cpu
), pull_dl_task
);
241 static struct rq
*find_lock_later_rq(struct task_struct
*task
, struct rq
*rq
);
243 static struct rq
*dl_task_offline_migration(struct rq
*rq
, struct task_struct
*p
)
245 struct rq
*later_rq
= NULL
;
246 bool fallback
= false;
248 later_rq
= find_lock_later_rq(p
, rq
);
254 * If we cannot preempt any rq, fall back to pick any
258 cpu
= cpumask_any_and(cpu_active_mask
, tsk_cpus_allowed(p
));
259 if (cpu
>= nr_cpu_ids
) {
261 * Fail to find any suitable cpu.
262 * The task will never come back!
264 BUG_ON(dl_bandwidth_enabled());
267 * If admission control is disabled we
268 * try a little harder to let the task
271 cpu
= cpumask_any(cpu_active_mask
);
273 later_rq
= cpu_rq(cpu
);
274 double_lock_balance(rq
, later_rq
);
278 * By now the task is replenished and enqueued; migrate it.
280 deactivate_task(rq
, p
, 0);
281 set_task_cpu(p
, later_rq
->cpu
);
282 activate_task(later_rq
, p
, 0);
285 resched_curr(later_rq
);
287 double_unlock_balance(later_rq
, rq
);
295 void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
300 void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
305 void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
310 void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
314 static inline bool need_pull_dl_task(struct rq
*rq
, struct task_struct
*prev
)
319 static inline void pull_dl_task(struct rq
*rq
)
323 static inline void queue_push_tasks(struct rq
*rq
)
327 static inline void queue_pull_task(struct rq
*rq
)
330 #endif /* CONFIG_SMP */
332 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
333 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
334 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
,
338 * We are being explicitly informed that a new instance is starting,
339 * and this means that:
340 * - the absolute deadline of the entity has to be placed at
341 * current time + relative deadline;
342 * - the runtime of the entity has to be set to the maximum value.
344 * The capability of specifying such event is useful whenever a -deadline
345 * entity wants to (try to!) synchronize its behaviour with the scheduler's
346 * one, and to (try to!) reconcile itself with its own scheduling
349 static inline void setup_new_dl_entity(struct sched_dl_entity
*dl_se
,
350 struct sched_dl_entity
*pi_se
)
352 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
353 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
355 WARN_ON(dl_time_before(rq_clock(rq
), dl_se
->deadline
));
358 * We are racing with the deadline timer. So, do nothing because
359 * the deadline timer handler will take care of properly recharging
360 * the runtime and postponing the deadline
362 if (dl_se
->dl_throttled
)
366 * We use the regular wall clock time to set deadlines in the
367 * future; in fact, we must consider execution overheads (time
368 * spent on hardirq context, etc.).
370 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
371 dl_se
->runtime
= pi_se
->dl_runtime
;
375 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
376 * possibility of a entity lasting more than what it declared, and thus
377 * exhausting its runtime.
379 * Here we are interested in making runtime overrun possible, but we do
380 * not want a entity which is misbehaving to affect the scheduling of all
382 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
383 * is used, in order to confine each entity within its own bandwidth.
385 * This function deals exactly with that, and ensures that when the runtime
386 * of a entity is replenished, its deadline is also postponed. That ensures
387 * the overrunning entity can't interfere with other entity in the system and
388 * can't make them miss their deadlines. Reasons why this kind of overruns
389 * could happen are, typically, a entity voluntarily trying to overcome its
390 * runtime, or it just underestimated it during sched_setattr().
392 static void replenish_dl_entity(struct sched_dl_entity
*dl_se
,
393 struct sched_dl_entity
*pi_se
)
395 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
396 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
398 BUG_ON(pi_se
->dl_runtime
<= 0);
401 * This could be the case for a !-dl task that is boosted.
402 * Just go with full inherited parameters.
404 if (dl_se
->dl_deadline
== 0) {
405 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
406 dl_se
->runtime
= pi_se
->dl_runtime
;
409 if (dl_se
->dl_yielded
&& dl_se
->runtime
> 0)
413 * We keep moving the deadline away until we get some
414 * available runtime for the entity. This ensures correct
415 * handling of situations where the runtime overrun is
418 while (dl_se
->runtime
<= 0) {
419 dl_se
->deadline
+= pi_se
->dl_period
;
420 dl_se
->runtime
+= pi_se
->dl_runtime
;
424 * At this point, the deadline really should be "in
425 * the future" with respect to rq->clock. If it's
426 * not, we are, for some reason, lagging too much!
427 * Anyway, after having warn userspace abut that,
428 * we still try to keep the things running by
429 * resetting the deadline and the budget of the
432 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
))) {
433 printk_deferred_once("sched: DL replenish lagged too much\n");
434 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
435 dl_se
->runtime
= pi_se
->dl_runtime
;
438 if (dl_se
->dl_yielded
)
439 dl_se
->dl_yielded
= 0;
440 if (dl_se
->dl_throttled
)
441 dl_se
->dl_throttled
= 0;
445 * Here we check if --at time t-- an entity (which is probably being
446 * [re]activated or, in general, enqueued) can use its remaining runtime
447 * and its current deadline _without_ exceeding the bandwidth it is
448 * assigned (function returns true if it can't). We are in fact applying
449 * one of the CBS rules: when a task wakes up, if the residual runtime
450 * over residual deadline fits within the allocated bandwidth, then we
451 * can keep the current (absolute) deadline and residual budget without
452 * disrupting the schedulability of the system. Otherwise, we should
453 * refill the runtime and set the deadline a period in the future,
454 * because keeping the current (absolute) deadline of the task would
455 * result in breaking guarantees promised to other tasks (refer to
456 * Documentation/scheduler/sched-deadline.txt for more informations).
458 * This function returns true if:
460 * runtime / (deadline - t) > dl_runtime / dl_period ,
462 * IOW we can't recycle current parameters.
464 * Notice that the bandwidth check is done against the period. For
465 * task with deadline equal to period this is the same of using
466 * dl_deadline instead of dl_period in the equation above.
468 static bool dl_entity_overflow(struct sched_dl_entity
*dl_se
,
469 struct sched_dl_entity
*pi_se
, u64 t
)
474 * left and right are the two sides of the equation above,
475 * after a bit of shuffling to use multiplications instead
478 * Note that none of the time values involved in the two
479 * multiplications are absolute: dl_deadline and dl_runtime
480 * are the relative deadline and the maximum runtime of each
481 * instance, runtime is the runtime left for the last instance
482 * and (deadline - t), since t is rq->clock, is the time left
483 * to the (absolute) deadline. Even if overflowing the u64 type
484 * is very unlikely to occur in both cases, here we scale down
485 * as we want to avoid that risk at all. Scaling down by 10
486 * means that we reduce granularity to 1us. We are fine with it,
487 * since this is only a true/false check and, anyway, thinking
488 * of anything below microseconds resolution is actually fiction
489 * (but still we want to give the user that illusion >;).
491 left
= (pi_se
->dl_period
>> DL_SCALE
) * (dl_se
->runtime
>> DL_SCALE
);
492 right
= ((dl_se
->deadline
- t
) >> DL_SCALE
) *
493 (pi_se
->dl_runtime
>> DL_SCALE
);
495 return dl_time_before(right
, left
);
499 * When a -deadline entity is queued back on the runqueue, its runtime and
500 * deadline might need updating.
502 * The policy here is that we update the deadline of the entity only if:
503 * - the current deadline is in the past,
504 * - using the remaining runtime with the current deadline would make
505 * the entity exceed its bandwidth.
507 static void update_dl_entity(struct sched_dl_entity
*dl_se
,
508 struct sched_dl_entity
*pi_se
)
510 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
511 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
513 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
)) ||
514 dl_entity_overflow(dl_se
, pi_se
, rq_clock(rq
))) {
515 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
516 dl_se
->runtime
= pi_se
->dl_runtime
;
521 * If the entity depleted all its runtime, and if we want it to sleep
522 * while waiting for some new execution time to become available, we
523 * set the bandwidth enforcement timer to the replenishment instant
524 * and try to activate it.
526 * Notice that it is important for the caller to know if the timer
527 * actually started or not (i.e., the replenishment instant is in
528 * the future or in the past).
530 static int start_dl_timer(struct task_struct
*p
)
532 struct sched_dl_entity
*dl_se
= &p
->dl
;
533 struct hrtimer
*timer
= &dl_se
->dl_timer
;
534 struct rq
*rq
= task_rq(p
);
538 lockdep_assert_held(&rq
->lock
);
541 * We want the timer to fire at the deadline, but considering
542 * that it is actually coming from rq->clock and not from
543 * hrtimer's time base reading.
545 act
= ns_to_ktime(dl_se
->deadline
);
546 now
= hrtimer_cb_get_time(timer
);
547 delta
= ktime_to_ns(now
) - rq_clock(rq
);
548 act
= ktime_add_ns(act
, delta
);
551 * If the expiry time already passed, e.g., because the value
552 * chosen as the deadline is too small, don't even try to
553 * start the timer in the past!
555 if (ktime_us_delta(act
, now
) < 0)
559 * !enqueued will guarantee another callback; even if one is already in
560 * progress. This ensures a balanced {get,put}_task_struct().
562 * The race against __run_timer() clearing the enqueued state is
563 * harmless because we're holding task_rq()->lock, therefore the timer
564 * expiring after we've done the check will wait on its task_rq_lock()
565 * and observe our state.
567 if (!hrtimer_is_queued(timer
)) {
569 hrtimer_start(timer
, act
, HRTIMER_MODE_ABS
);
576 * This is the bandwidth enforcement timer callback. If here, we know
577 * a task is not on its dl_rq, since the fact that the timer was running
578 * means the task is throttled and needs a runtime replenishment.
580 * However, what we actually do depends on the fact the task is active,
581 * (it is on its rq) or has been removed from there by a call to
582 * dequeue_task_dl(). In the former case we must issue the runtime
583 * replenishment and add the task back to the dl_rq; in the latter, we just
584 * do nothing but clearing dl_throttled, so that runtime and deadline
585 * updating (and the queueing back to dl_rq) will be done by the
586 * next call to enqueue_task_dl().
588 static enum hrtimer_restart
dl_task_timer(struct hrtimer
*timer
)
590 struct sched_dl_entity
*dl_se
= container_of(timer
,
591 struct sched_dl_entity
,
593 struct task_struct
*p
= dl_task_of(dl_se
);
597 rq
= task_rq_lock(p
, &rf
);
600 * The task might have changed its scheduling policy to something
601 * different than SCHED_DEADLINE (through switched_fromd_dl()).
604 __dl_clear_params(p
);
609 * The task might have been boosted by someone else and might be in the
610 * boosting/deboosting path, its not throttled.
612 if (dl_se
->dl_boosted
)
616 * Spurious timer due to start_dl_timer() race; or we already received
617 * a replenishment from rt_mutex_setprio().
619 if (!dl_se
->dl_throttled
)
626 * If the throttle happened during sched-out; like:
633 * __dequeue_task_dl()
636 * We can be both throttled and !queued. Replenish the counter
637 * but do not enqueue -- wait for our wakeup to do that.
639 if (!task_on_rq_queued(p
)) {
640 replenish_dl_entity(dl_se
, dl_se
);
644 enqueue_task_dl(rq
, p
, ENQUEUE_REPLENISH
);
645 if (dl_task(rq
->curr
))
646 check_preempt_curr_dl(rq
, p
, 0);
652 * Perform balancing operations here; after the replenishments. We
653 * cannot drop rq->lock before this, otherwise the assertion in
654 * start_dl_timer() about not missing updates is not true.
656 * If we find that the rq the task was on is no longer available, we
657 * need to select a new rq.
659 * XXX figure out if select_task_rq_dl() deals with offline cpus.
661 if (unlikely(!rq
->online
)) {
662 lockdep_unpin_lock(&rq
->lock
, rf
.cookie
);
663 rq
= dl_task_offline_migration(rq
, p
);
664 rf
.cookie
= lockdep_pin_lock(&rq
->lock
);
668 * Queueing this task back might have overloaded rq, check if we need
669 * to kick someone away.
671 if (has_pushable_dl_tasks(rq
)) {
673 * Nothing relies on rq->lock after this, so its safe to drop
676 lockdep_unpin_lock(&rq
->lock
, rf
.cookie
);
678 lockdep_repin_lock(&rq
->lock
, rf
.cookie
);
683 task_rq_unlock(rq
, p
, &rf
);
686 * This can free the task_struct, including this hrtimer, do not touch
687 * anything related to that after this.
691 return HRTIMER_NORESTART
;
694 void init_dl_task_timer(struct sched_dl_entity
*dl_se
)
696 struct hrtimer
*timer
= &dl_se
->dl_timer
;
698 hrtimer_init(timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
699 timer
->function
= dl_task_timer
;
703 int dl_runtime_exceeded(struct sched_dl_entity
*dl_se
)
705 return (dl_se
->runtime
<= 0);
708 extern bool sched_rt_bandwidth_account(struct rt_rq
*rt_rq
);
711 * Update the current task's runtime statistics (provided it is still
712 * a -deadline task and has not been removed from the dl_rq).
714 static void update_curr_dl(struct rq
*rq
)
716 struct task_struct
*curr
= rq
->curr
;
717 struct sched_dl_entity
*dl_se
= &curr
->dl
;
720 if (!dl_task(curr
) || !on_dl_rq(dl_se
))
724 * Consumed budget is computed considering the time as
725 * observed by schedulable tasks (excluding time spent
726 * in hardirq context, etc.). Deadlines are instead
727 * computed using hard walltime. This seems to be the more
728 * natural solution, but the full ramifications of this
729 * approach need further study.
731 delta_exec
= rq_clock_task(rq
) - curr
->se
.exec_start
;
732 if (unlikely((s64
)delta_exec
<= 0)) {
733 if (unlikely(dl_se
->dl_yielded
))
738 /* kick cpufreq (see the comment in linux/cpufreq.h). */
739 if (cpu_of(rq
) == smp_processor_id())
740 cpufreq_trigger_update(rq_clock(rq
));
742 schedstat_set(curr
->se
.statistics
.exec_max
,
743 max(curr
->se
.statistics
.exec_max
, delta_exec
));
745 curr
->se
.sum_exec_runtime
+= delta_exec
;
746 account_group_exec_runtime(curr
, delta_exec
);
748 curr
->se
.exec_start
= rq_clock_task(rq
);
749 cpuacct_charge(curr
, delta_exec
);
751 sched_rt_avg_update(rq
, delta_exec
);
753 dl_se
->runtime
-= delta_exec
;
756 if (dl_runtime_exceeded(dl_se
) || dl_se
->dl_yielded
) {
757 dl_se
->dl_throttled
= 1;
758 __dequeue_task_dl(rq
, curr
, 0);
759 if (unlikely(dl_se
->dl_boosted
|| !start_dl_timer(curr
)))
760 enqueue_task_dl(rq
, curr
, ENQUEUE_REPLENISH
);
762 if (!is_leftmost(curr
, &rq
->dl
))
767 * Because -- for now -- we share the rt bandwidth, we need to
768 * account our runtime there too, otherwise actual rt tasks
769 * would be able to exceed the shared quota.
771 * Account to the root rt group for now.
773 * The solution we're working towards is having the RT groups scheduled
774 * using deadline servers -- however there's a few nasties to figure
775 * out before that can happen.
777 if (rt_bandwidth_enabled()) {
778 struct rt_rq
*rt_rq
= &rq
->rt
;
780 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
782 * We'll let actual RT tasks worry about the overflow here, we
783 * have our own CBS to keep us inline; only account when RT
784 * bandwidth is relevant.
786 if (sched_rt_bandwidth_account(rt_rq
))
787 rt_rq
->rt_time
+= delta_exec
;
788 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
794 static void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
796 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
798 if (dl_rq
->earliest_dl
.curr
== 0 ||
799 dl_time_before(deadline
, dl_rq
->earliest_dl
.curr
)) {
800 dl_rq
->earliest_dl
.curr
= deadline
;
801 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, deadline
, 1);
805 static void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
807 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
810 * Since we may have removed our earliest (and/or next earliest)
811 * task we must recompute them.
813 if (!dl_rq
->dl_nr_running
) {
814 dl_rq
->earliest_dl
.curr
= 0;
815 dl_rq
->earliest_dl
.next
= 0;
816 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, 0, 0);
818 struct rb_node
*leftmost
= dl_rq
->rb_leftmost
;
819 struct sched_dl_entity
*entry
;
821 entry
= rb_entry(leftmost
, struct sched_dl_entity
, rb_node
);
822 dl_rq
->earliest_dl
.curr
= entry
->deadline
;
823 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, entry
->deadline
, 1);
829 static inline void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
830 static inline void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
832 #endif /* CONFIG_SMP */
835 void inc_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
837 int prio
= dl_task_of(dl_se
)->prio
;
838 u64 deadline
= dl_se
->deadline
;
840 WARN_ON(!dl_prio(prio
));
841 dl_rq
->dl_nr_running
++;
842 add_nr_running(rq_of_dl_rq(dl_rq
), 1);
844 inc_dl_deadline(dl_rq
, deadline
);
845 inc_dl_migration(dl_se
, dl_rq
);
849 void dec_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
851 int prio
= dl_task_of(dl_se
)->prio
;
853 WARN_ON(!dl_prio(prio
));
854 WARN_ON(!dl_rq
->dl_nr_running
);
855 dl_rq
->dl_nr_running
--;
856 sub_nr_running(rq_of_dl_rq(dl_rq
), 1);
858 dec_dl_deadline(dl_rq
, dl_se
->deadline
);
859 dec_dl_migration(dl_se
, dl_rq
);
862 static void __enqueue_dl_entity(struct sched_dl_entity
*dl_se
)
864 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
865 struct rb_node
**link
= &dl_rq
->rb_root
.rb_node
;
866 struct rb_node
*parent
= NULL
;
867 struct sched_dl_entity
*entry
;
870 BUG_ON(!RB_EMPTY_NODE(&dl_se
->rb_node
));
874 entry
= rb_entry(parent
, struct sched_dl_entity
, rb_node
);
875 if (dl_time_before(dl_se
->deadline
, entry
->deadline
))
876 link
= &parent
->rb_left
;
878 link
= &parent
->rb_right
;
884 dl_rq
->rb_leftmost
= &dl_se
->rb_node
;
886 rb_link_node(&dl_se
->rb_node
, parent
, link
);
887 rb_insert_color(&dl_se
->rb_node
, &dl_rq
->rb_root
);
889 inc_dl_tasks(dl_se
, dl_rq
);
892 static void __dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
894 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
896 if (RB_EMPTY_NODE(&dl_se
->rb_node
))
899 if (dl_rq
->rb_leftmost
== &dl_se
->rb_node
) {
900 struct rb_node
*next_node
;
902 next_node
= rb_next(&dl_se
->rb_node
);
903 dl_rq
->rb_leftmost
= next_node
;
906 rb_erase(&dl_se
->rb_node
, &dl_rq
->rb_root
);
907 RB_CLEAR_NODE(&dl_se
->rb_node
);
909 dec_dl_tasks(dl_se
, dl_rq
);
913 enqueue_dl_entity(struct sched_dl_entity
*dl_se
,
914 struct sched_dl_entity
*pi_se
, int flags
)
916 BUG_ON(on_dl_rq(dl_se
));
919 * If this is a wakeup or a new instance, the scheduling
920 * parameters of the task might need updating. Otherwise,
921 * we want a replenishment of its runtime.
923 if (flags
& ENQUEUE_WAKEUP
)
924 update_dl_entity(dl_se
, pi_se
);
925 else if (flags
& ENQUEUE_REPLENISH
)
926 replenish_dl_entity(dl_se
, pi_se
);
928 __enqueue_dl_entity(dl_se
);
931 static void dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
933 __dequeue_dl_entity(dl_se
);
936 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
938 struct task_struct
*pi_task
= rt_mutex_get_top_task(p
);
939 struct sched_dl_entity
*pi_se
= &p
->dl
;
942 * Use the scheduling parameters of the top pi-waiter
943 * task if we have one and its (absolute) deadline is
944 * smaller than our one... OTW we keep our runtime and
947 if (pi_task
&& p
->dl
.dl_boosted
&& dl_prio(pi_task
->normal_prio
)) {
948 pi_se
= &pi_task
->dl
;
949 } else if (!dl_prio(p
->normal_prio
)) {
951 * Special case in which we have a !SCHED_DEADLINE task
952 * that is going to be deboosted, but exceedes its
953 * runtime while doing so. No point in replenishing
954 * it, as it's going to return back to its original
955 * scheduling class after this.
957 BUG_ON(!p
->dl
.dl_boosted
|| flags
!= ENQUEUE_REPLENISH
);
962 * If p is throttled, we do nothing. In fact, if it exhausted
963 * its budget it needs a replenishment and, since it now is on
964 * its rq, the bandwidth timer callback (which clearly has not
965 * run yet) will take care of this.
967 if (p
->dl
.dl_throttled
&& !(flags
& ENQUEUE_REPLENISH
))
970 enqueue_dl_entity(&p
->dl
, pi_se
, flags
);
972 if (!task_current(rq
, p
) && tsk_nr_cpus_allowed(p
) > 1)
973 enqueue_pushable_dl_task(rq
, p
);
976 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
978 dequeue_dl_entity(&p
->dl
);
979 dequeue_pushable_dl_task(rq
, p
);
982 static void dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
985 __dequeue_task_dl(rq
, p
, flags
);
989 * Yield task semantic for -deadline tasks is:
991 * get off from the CPU until our next instance, with
992 * a new runtime. This is of little use now, since we
993 * don't have a bandwidth reclaiming mechanism. Anyway,
994 * bandwidth reclaiming is planned for the future, and
995 * yield_task_dl will indicate that some spare budget
996 * is available for other task instances to use it.
998 static void yield_task_dl(struct rq
*rq
)
1001 * We make the task go to sleep until its current deadline by
1002 * forcing its runtime to zero. This way, update_curr_dl() stops
1003 * it and the bandwidth timer will wake it up and will give it
1004 * new scheduling parameters (thanks to dl_yielded=1).
1006 rq
->curr
->dl
.dl_yielded
= 1;
1008 update_rq_clock(rq
);
1011 * Tell update_rq_clock() that we've just updated,
1012 * so we don't do microscopic update in schedule()
1013 * and double the fastpath cost.
1015 rq_clock_skip_update(rq
, true);
1020 static int find_later_rq(struct task_struct
*task
);
1023 select_task_rq_dl(struct task_struct
*p
, int cpu
, int sd_flag
, int flags
)
1025 struct task_struct
*curr
;
1028 if (sd_flag
!= SD_BALANCE_WAKE
)
1034 curr
= READ_ONCE(rq
->curr
); /* unlocked access */
1037 * If we are dealing with a -deadline task, we must
1038 * decide where to wake it up.
1039 * If it has a later deadline and the current task
1040 * on this rq can't move (provided the waking task
1041 * can!) we prefer to send it somewhere else. On the
1042 * other hand, if it has a shorter deadline, we
1043 * try to make it stay here, it might be important.
1045 if (unlikely(dl_task(curr
)) &&
1046 (tsk_nr_cpus_allowed(curr
) < 2 ||
1047 !dl_entity_preempt(&p
->dl
, &curr
->dl
)) &&
1048 (tsk_nr_cpus_allowed(p
) > 1)) {
1049 int target
= find_later_rq(p
);
1052 (dl_time_before(p
->dl
.deadline
,
1053 cpu_rq(target
)->dl
.earliest_dl
.curr
) ||
1054 (cpu_rq(target
)->dl
.dl_nr_running
== 0)))
1063 static void check_preempt_equal_dl(struct rq
*rq
, struct task_struct
*p
)
1066 * Current can't be migrated, useless to reschedule,
1067 * let's hope p can move out.
1069 if (tsk_nr_cpus_allowed(rq
->curr
) == 1 ||
1070 cpudl_find(&rq
->rd
->cpudl
, rq
->curr
, NULL
) == -1)
1074 * p is migratable, so let's not schedule it and
1075 * see if it is pushed or pulled somewhere else.
1077 if (tsk_nr_cpus_allowed(p
) != 1 &&
1078 cpudl_find(&rq
->rd
->cpudl
, p
, NULL
) != -1)
1084 #endif /* CONFIG_SMP */
1087 * Only called when both the current and waking task are -deadline
1090 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
,
1093 if (dl_entity_preempt(&p
->dl
, &rq
->curr
->dl
)) {
1100 * In the unlikely case current and p have the same deadline
1101 * let us try to decide what's the best thing to do...
1103 if ((p
->dl
.deadline
== rq
->curr
->dl
.deadline
) &&
1104 !test_tsk_need_resched(rq
->curr
))
1105 check_preempt_equal_dl(rq
, p
);
1106 #endif /* CONFIG_SMP */
1109 #ifdef CONFIG_SCHED_HRTICK
1110 static void start_hrtick_dl(struct rq
*rq
, struct task_struct
*p
)
1112 hrtick_start(rq
, p
->dl
.runtime
);
1114 #else /* !CONFIG_SCHED_HRTICK */
1115 static void start_hrtick_dl(struct rq
*rq
, struct task_struct
*p
)
1120 static struct sched_dl_entity
*pick_next_dl_entity(struct rq
*rq
,
1121 struct dl_rq
*dl_rq
)
1123 struct rb_node
*left
= dl_rq
->rb_leftmost
;
1128 return rb_entry(left
, struct sched_dl_entity
, rb_node
);
1131 struct task_struct
*
1132 pick_next_task_dl(struct rq
*rq
, struct task_struct
*prev
, struct pin_cookie cookie
)
1134 struct sched_dl_entity
*dl_se
;
1135 struct task_struct
*p
;
1136 struct dl_rq
*dl_rq
;
1140 if (need_pull_dl_task(rq
, prev
)) {
1142 * This is OK, because current is on_cpu, which avoids it being
1143 * picked for load-balance and preemption/IRQs are still
1144 * disabled avoiding further scheduler activity on it and we're
1145 * being very careful to re-start the picking loop.
1147 lockdep_unpin_lock(&rq
->lock
, cookie
);
1149 lockdep_repin_lock(&rq
->lock
, cookie
);
1151 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1152 * means a stop task can slip in, in which case we need to
1153 * re-start task selection.
1155 if (rq
->stop
&& task_on_rq_queued(rq
->stop
))
1160 * When prev is DL, we may throttle it in put_prev_task().
1161 * So, we update time before we check for dl_nr_running.
1163 if (prev
->sched_class
== &dl_sched_class
)
1166 if (unlikely(!dl_rq
->dl_nr_running
))
1169 put_prev_task(rq
, prev
);
1171 dl_se
= pick_next_dl_entity(rq
, dl_rq
);
1174 p
= dl_task_of(dl_se
);
1175 p
->se
.exec_start
= rq_clock_task(rq
);
1177 /* Running task will never be pushed. */
1178 dequeue_pushable_dl_task(rq
, p
);
1180 if (hrtick_enabled(rq
))
1181 start_hrtick_dl(rq
, p
);
1183 queue_push_tasks(rq
);
1188 static void put_prev_task_dl(struct rq
*rq
, struct task_struct
*p
)
1192 if (on_dl_rq(&p
->dl
) && tsk_nr_cpus_allowed(p
) > 1)
1193 enqueue_pushable_dl_task(rq
, p
);
1196 static void task_tick_dl(struct rq
*rq
, struct task_struct
*p
, int queued
)
1201 * Even when we have runtime, update_curr_dl() might have resulted in us
1202 * not being the leftmost task anymore. In that case NEED_RESCHED will
1203 * be set and schedule() will start a new hrtick for the next task.
1205 if (hrtick_enabled(rq
) && queued
&& p
->dl
.runtime
> 0 &&
1206 is_leftmost(p
, &rq
->dl
))
1207 start_hrtick_dl(rq
, p
);
1210 static void task_fork_dl(struct task_struct
*p
)
1213 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1218 static void task_dead_dl(struct task_struct
*p
)
1220 struct dl_bw
*dl_b
= dl_bw_of(task_cpu(p
));
1223 * Since we are TASK_DEAD we won't slip out of the domain!
1225 raw_spin_lock_irq(&dl_b
->lock
);
1226 /* XXX we should retain the bw until 0-lag */
1227 dl_b
->total_bw
-= p
->dl
.dl_bw
;
1228 raw_spin_unlock_irq(&dl_b
->lock
);
1231 static void set_curr_task_dl(struct rq
*rq
)
1233 struct task_struct
*p
= rq
->curr
;
1235 p
->se
.exec_start
= rq_clock_task(rq
);
1237 /* You can't push away the running task */
1238 dequeue_pushable_dl_task(rq
, p
);
1243 /* Only try algorithms three times */
1244 #define DL_MAX_TRIES 3
1246 static int pick_dl_task(struct rq
*rq
, struct task_struct
*p
, int cpu
)
1248 if (!task_running(rq
, p
) &&
1249 cpumask_test_cpu(cpu
, tsk_cpus_allowed(p
)))
1255 * Return the earliest pushable rq's task, which is suitable to be executed
1256 * on the CPU, NULL otherwise:
1258 static struct task_struct
*pick_earliest_pushable_dl_task(struct rq
*rq
, int cpu
)
1260 struct rb_node
*next_node
= rq
->dl
.pushable_dl_tasks_leftmost
;
1261 struct task_struct
*p
= NULL
;
1263 if (!has_pushable_dl_tasks(rq
))
1268 p
= rb_entry(next_node
, struct task_struct
, pushable_dl_tasks
);
1270 if (pick_dl_task(rq
, p
, cpu
))
1273 next_node
= rb_next(next_node
);
1280 static DEFINE_PER_CPU(cpumask_var_t
, local_cpu_mask_dl
);
1282 static int find_later_rq(struct task_struct
*task
)
1284 struct sched_domain
*sd
;
1285 struct cpumask
*later_mask
= this_cpu_cpumask_var_ptr(local_cpu_mask_dl
);
1286 int this_cpu
= smp_processor_id();
1287 int best_cpu
, cpu
= task_cpu(task
);
1289 /* Make sure the mask is initialized first */
1290 if (unlikely(!later_mask
))
1293 if (tsk_nr_cpus_allowed(task
) == 1)
1297 * We have to consider system topology and task affinity
1298 * first, then we can look for a suitable cpu.
1300 best_cpu
= cpudl_find(&task_rq(task
)->rd
->cpudl
,
1306 * If we are here, some target has been found,
1307 * the most suitable of which is cached in best_cpu.
1308 * This is, among the runqueues where the current tasks
1309 * have later deadlines than the task's one, the rq
1310 * with the latest possible one.
1312 * Now we check how well this matches with task's
1313 * affinity and system topology.
1315 * The last cpu where the task run is our first
1316 * guess, since it is most likely cache-hot there.
1318 if (cpumask_test_cpu(cpu
, later_mask
))
1321 * Check if this_cpu is to be skipped (i.e., it is
1322 * not in the mask) or not.
1324 if (!cpumask_test_cpu(this_cpu
, later_mask
))
1328 for_each_domain(cpu
, sd
) {
1329 if (sd
->flags
& SD_WAKE_AFFINE
) {
1332 * If possible, preempting this_cpu is
1333 * cheaper than migrating.
1335 if (this_cpu
!= -1 &&
1336 cpumask_test_cpu(this_cpu
, sched_domain_span(sd
))) {
1342 * Last chance: if best_cpu is valid and is
1343 * in the mask, that becomes our choice.
1345 if (best_cpu
< nr_cpu_ids
&&
1346 cpumask_test_cpu(best_cpu
, sched_domain_span(sd
))) {
1355 * At this point, all our guesses failed, we just return
1356 * 'something', and let the caller sort the things out.
1361 cpu
= cpumask_any(later_mask
);
1362 if (cpu
< nr_cpu_ids
)
1368 /* Locks the rq it finds */
1369 static struct rq
*find_lock_later_rq(struct task_struct
*task
, struct rq
*rq
)
1371 struct rq
*later_rq
= NULL
;
1375 for (tries
= 0; tries
< DL_MAX_TRIES
; tries
++) {
1376 cpu
= find_later_rq(task
);
1378 if ((cpu
== -1) || (cpu
== rq
->cpu
))
1381 later_rq
= cpu_rq(cpu
);
1383 if (later_rq
->dl
.dl_nr_running
&&
1384 !dl_time_before(task
->dl
.deadline
,
1385 later_rq
->dl
.earliest_dl
.curr
)) {
1387 * Target rq has tasks of equal or earlier deadline,
1388 * retrying does not release any lock and is unlikely
1389 * to yield a different result.
1395 /* Retry if something changed. */
1396 if (double_lock_balance(rq
, later_rq
)) {
1397 if (unlikely(task_rq(task
) != rq
||
1398 !cpumask_test_cpu(later_rq
->cpu
,
1399 tsk_cpus_allowed(task
)) ||
1400 task_running(rq
, task
) ||
1402 !task_on_rq_queued(task
))) {
1403 double_unlock_balance(rq
, later_rq
);
1410 * If the rq we found has no -deadline task, or
1411 * its earliest one has a later deadline than our
1412 * task, the rq is a good one.
1414 if (!later_rq
->dl
.dl_nr_running
||
1415 dl_time_before(task
->dl
.deadline
,
1416 later_rq
->dl
.earliest_dl
.curr
))
1419 /* Otherwise we try again. */
1420 double_unlock_balance(rq
, later_rq
);
1427 static struct task_struct
*pick_next_pushable_dl_task(struct rq
*rq
)
1429 struct task_struct
*p
;
1431 if (!has_pushable_dl_tasks(rq
))
1434 p
= rb_entry(rq
->dl
.pushable_dl_tasks_leftmost
,
1435 struct task_struct
, pushable_dl_tasks
);
1437 BUG_ON(rq
->cpu
!= task_cpu(p
));
1438 BUG_ON(task_current(rq
, p
));
1439 BUG_ON(tsk_nr_cpus_allowed(p
) <= 1);
1441 BUG_ON(!task_on_rq_queued(p
));
1442 BUG_ON(!dl_task(p
));
1448 * See if the non running -deadline tasks on this rq
1449 * can be sent to some other CPU where they can preempt
1450 * and start executing.
1452 static int push_dl_task(struct rq
*rq
)
1454 struct task_struct
*next_task
;
1455 struct rq
*later_rq
;
1458 if (!rq
->dl
.overloaded
)
1461 next_task
= pick_next_pushable_dl_task(rq
);
1466 if (unlikely(next_task
== rq
->curr
)) {
1472 * If next_task preempts rq->curr, and rq->curr
1473 * can move away, it makes sense to just reschedule
1474 * without going further in pushing next_task.
1476 if (dl_task(rq
->curr
) &&
1477 dl_time_before(next_task
->dl
.deadline
, rq
->curr
->dl
.deadline
) &&
1478 tsk_nr_cpus_allowed(rq
->curr
) > 1) {
1483 /* We might release rq lock */
1484 get_task_struct(next_task
);
1486 /* Will lock the rq it'll find */
1487 later_rq
= find_lock_later_rq(next_task
, rq
);
1489 struct task_struct
*task
;
1492 * We must check all this again, since
1493 * find_lock_later_rq releases rq->lock and it is
1494 * then possible that next_task has migrated.
1496 task
= pick_next_pushable_dl_task(rq
);
1497 if (task_cpu(next_task
) == rq
->cpu
&& task
== next_task
) {
1499 * The task is still there. We don't try
1500 * again, some other cpu will pull it when ready.
1509 put_task_struct(next_task
);
1514 deactivate_task(rq
, next_task
, 0);
1515 set_task_cpu(next_task
, later_rq
->cpu
);
1516 activate_task(later_rq
, next_task
, 0);
1519 resched_curr(later_rq
);
1521 double_unlock_balance(rq
, later_rq
);
1524 put_task_struct(next_task
);
1529 static void push_dl_tasks(struct rq
*rq
)
1531 /* push_dl_task() will return true if it moved a -deadline task */
1532 while (push_dl_task(rq
))
1536 static void pull_dl_task(struct rq
*this_rq
)
1538 int this_cpu
= this_rq
->cpu
, cpu
;
1539 struct task_struct
*p
;
1540 bool resched
= false;
1542 u64 dmin
= LONG_MAX
;
1544 if (likely(!dl_overloaded(this_rq
)))
1548 * Match the barrier from dl_set_overloaded; this guarantees that if we
1549 * see overloaded we must also see the dlo_mask bit.
1553 for_each_cpu(cpu
, this_rq
->rd
->dlo_mask
) {
1554 if (this_cpu
== cpu
)
1557 src_rq
= cpu_rq(cpu
);
1560 * It looks racy, abd it is! However, as in sched_rt.c,
1561 * we are fine with this.
1563 if (this_rq
->dl
.dl_nr_running
&&
1564 dl_time_before(this_rq
->dl
.earliest_dl
.curr
,
1565 src_rq
->dl
.earliest_dl
.next
))
1568 /* Might drop this_rq->lock */
1569 double_lock_balance(this_rq
, src_rq
);
1572 * If there are no more pullable tasks on the
1573 * rq, we're done with it.
1575 if (src_rq
->dl
.dl_nr_running
<= 1)
1578 p
= pick_earliest_pushable_dl_task(src_rq
, this_cpu
);
1581 * We found a task to be pulled if:
1582 * - it preempts our current (if there's one),
1583 * - it will preempt the last one we pulled (if any).
1585 if (p
&& dl_time_before(p
->dl
.deadline
, dmin
) &&
1586 (!this_rq
->dl
.dl_nr_running
||
1587 dl_time_before(p
->dl
.deadline
,
1588 this_rq
->dl
.earliest_dl
.curr
))) {
1589 WARN_ON(p
== src_rq
->curr
);
1590 WARN_ON(!task_on_rq_queued(p
));
1593 * Then we pull iff p has actually an earlier
1594 * deadline than the current task of its runqueue.
1596 if (dl_time_before(p
->dl
.deadline
,
1597 src_rq
->curr
->dl
.deadline
))
1602 deactivate_task(src_rq
, p
, 0);
1603 set_task_cpu(p
, this_cpu
);
1604 activate_task(this_rq
, p
, 0);
1605 dmin
= p
->dl
.deadline
;
1607 /* Is there any other task even earlier? */
1610 double_unlock_balance(this_rq
, src_rq
);
1614 resched_curr(this_rq
);
1618 * Since the task is not running and a reschedule is not going to happen
1619 * anytime soon on its runqueue, we try pushing it away now.
1621 static void task_woken_dl(struct rq
*rq
, struct task_struct
*p
)
1623 if (!task_running(rq
, p
) &&
1624 !test_tsk_need_resched(rq
->curr
) &&
1625 tsk_nr_cpus_allowed(p
) > 1 &&
1626 dl_task(rq
->curr
) &&
1627 (tsk_nr_cpus_allowed(rq
->curr
) < 2 ||
1628 !dl_entity_preempt(&p
->dl
, &rq
->curr
->dl
))) {
1633 static void set_cpus_allowed_dl(struct task_struct
*p
,
1634 const struct cpumask
*new_mask
)
1636 struct root_domain
*src_rd
;
1639 BUG_ON(!dl_task(p
));
1644 * Migrating a SCHED_DEADLINE task between exclusive
1645 * cpusets (different root_domains) entails a bandwidth
1646 * update. We already made space for us in the destination
1647 * domain (see cpuset_can_attach()).
1649 if (!cpumask_intersects(src_rd
->span
, new_mask
)) {
1650 struct dl_bw
*src_dl_b
;
1652 src_dl_b
= dl_bw_of(cpu_of(rq
));
1654 * We now free resources of the root_domain we are migrating
1655 * off. In the worst case, sched_setattr() may temporary fail
1656 * until we complete the update.
1658 raw_spin_lock(&src_dl_b
->lock
);
1659 __dl_clear(src_dl_b
, p
->dl
.dl_bw
);
1660 raw_spin_unlock(&src_dl_b
->lock
);
1663 set_cpus_allowed_common(p
, new_mask
);
1666 /* Assumes rq->lock is held */
1667 static void rq_online_dl(struct rq
*rq
)
1669 if (rq
->dl
.overloaded
)
1670 dl_set_overload(rq
);
1672 cpudl_set_freecpu(&rq
->rd
->cpudl
, rq
->cpu
);
1673 if (rq
->dl
.dl_nr_running
> 0)
1674 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, rq
->dl
.earliest_dl
.curr
, 1);
1677 /* Assumes rq->lock is held */
1678 static void rq_offline_dl(struct rq
*rq
)
1680 if (rq
->dl
.overloaded
)
1681 dl_clear_overload(rq
);
1683 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, 0, 0);
1684 cpudl_clear_freecpu(&rq
->rd
->cpudl
, rq
->cpu
);
1687 void __init
init_sched_dl_class(void)
1691 for_each_possible_cpu(i
)
1692 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl
, i
),
1693 GFP_KERNEL
, cpu_to_node(i
));
1696 #endif /* CONFIG_SMP */
1698 static void switched_from_dl(struct rq
*rq
, struct task_struct
*p
)
1701 * Start the deadline timer; if we switch back to dl before this we'll
1702 * continue consuming our current CBS slice. If we stay outside of
1703 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1706 if (!start_dl_timer(p
))
1707 __dl_clear_params(p
);
1710 * Since this might be the only -deadline task on the rq,
1711 * this is the right place to try to pull some other one
1712 * from an overloaded cpu, if any.
1714 if (!task_on_rq_queued(p
) || rq
->dl
.dl_nr_running
)
1717 queue_pull_task(rq
);
1721 * When switching to -deadline, we may overload the rq, then
1722 * we try to push someone off, if possible.
1724 static void switched_to_dl(struct rq
*rq
, struct task_struct
*p
)
1726 if (dl_time_before(p
->dl
.deadline
, rq_clock(rq
)))
1727 setup_new_dl_entity(&p
->dl
, &p
->dl
);
1729 if (task_on_rq_queued(p
) && rq
->curr
!= p
) {
1731 if (tsk_nr_cpus_allowed(p
) > 1 && rq
->dl
.overloaded
)
1732 queue_push_tasks(rq
);
1734 if (dl_task(rq
->curr
))
1735 check_preempt_curr_dl(rq
, p
, 0);
1743 * If the scheduling parameters of a -deadline task changed,
1744 * a push or pull operation might be needed.
1746 static void prio_changed_dl(struct rq
*rq
, struct task_struct
*p
,
1749 if (task_on_rq_queued(p
) || rq
->curr
== p
) {
1752 * This might be too much, but unfortunately
1753 * we don't have the old deadline value, and
1754 * we can't argue if the task is increasing
1755 * or lowering its prio, so...
1757 if (!rq
->dl
.overloaded
)
1758 queue_pull_task(rq
);
1761 * If we now have a earlier deadline task than p,
1762 * then reschedule, provided p is still on this
1765 if (dl_time_before(rq
->dl
.earliest_dl
.curr
, p
->dl
.deadline
))
1769 * Again, we don't know if p has a earlier
1770 * or later deadline, so let's blindly set a
1771 * (maybe not needed) rescheduling point.
1774 #endif /* CONFIG_SMP */
1778 const struct sched_class dl_sched_class
= {
1779 .next
= &rt_sched_class
,
1780 .enqueue_task
= enqueue_task_dl
,
1781 .dequeue_task
= dequeue_task_dl
,
1782 .yield_task
= yield_task_dl
,
1784 .check_preempt_curr
= check_preempt_curr_dl
,
1786 .pick_next_task
= pick_next_task_dl
,
1787 .put_prev_task
= put_prev_task_dl
,
1790 .select_task_rq
= select_task_rq_dl
,
1791 .set_cpus_allowed
= set_cpus_allowed_dl
,
1792 .rq_online
= rq_online_dl
,
1793 .rq_offline
= rq_offline_dl
,
1794 .task_woken
= task_woken_dl
,
1797 .set_curr_task
= set_curr_task_dl
,
1798 .task_tick
= task_tick_dl
,
1799 .task_fork
= task_fork_dl
,
1800 .task_dead
= task_dead_dl
,
1802 .prio_changed
= prio_changed_dl
,
1803 .switched_from
= switched_from_dl
,
1804 .switched_to
= switched_to_dl
,
1806 .update_curr
= update_curr_dl
,
1809 #ifdef CONFIG_SCHED_DEBUG
1810 extern void print_dl_rq(struct seq_file
*m
, int cpu
, struct dl_rq
*dl_rq
);
1812 void print_dl_stats(struct seq_file
*m
, int cpu
)
1814 print_dl_rq(m
, cpu
, &cpu_rq(cpu
)->dl
);
1816 #endif /* CONFIG_SCHED_DEBUG */