1 // SPDX-License-Identifier: GPL-2.0-only
5 * Core kernel CPU scheduler code
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
10 #include <linux/highmem.h>
11 #include <linux/hrtimer_api.h>
12 #include <linux/ktime_api.h>
13 #include <linux/sched/signal.h>
14 #include <linux/syscalls_api.h>
15 #include <linux/debug_locks.h>
16 #include <linux/prefetch.h>
17 #include <linux/capability.h>
18 #include <linux/pgtable_api.h>
19 #include <linux/wait_bit.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock_api.h>
22 #include <linux/cpumask_api.h>
23 #include <linux/lockdep_api.h>
24 #include <linux/hardirq.h>
25 #include <linux/softirq.h>
26 #include <linux/refcount_api.h>
27 #include <linux/topology.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/cond_resched.h>
30 #include <linux/sched/cputime.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/hotplug.h>
33 #include <linux/sched/init.h>
34 #include <linux/sched/isolation.h>
35 #include <linux/sched/loadavg.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/nohz.h>
38 #include <linux/sched/rseq_api.h>
39 #include <linux/sched/rt.h>
41 #include <linux/blkdev.h>
42 #include <linux/context_tracking.h>
43 #include <linux/cpuset.h>
44 #include <linux/delayacct.h>
45 #include <linux/init_task.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioprio.h>
48 #include <linux/kallsyms.h>
49 #include <linux/kcov.h>
50 #include <linux/kprobes.h>
51 #include <linux/llist_api.h>
52 #include <linux/mmu_context.h>
53 #include <linux/mmzone.h>
54 #include <linux/mutex_api.h>
55 #include <linux/nmi.h>
56 #include <linux/nospec.h>
57 #include <linux/perf_event_api.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcuwait_api.h>
61 #include <linux/rseq.h>
62 #include <linux/sched/wake_q.h>
63 #include <linux/scs.h>
64 #include <linux/slab.h>
65 #include <linux/syscalls.h>
66 #include <linux/vtime.h>
67 #include <linux/wait_api.h>
68 #include <linux/workqueue_api.h>
70 #ifdef CONFIG_PREEMPT_DYNAMIC
71 # ifdef CONFIG_GENERIC_ENTRY
72 # include <linux/entry-common.h>
76 #include <uapi/linux/sched/types.h>
78 #include <asm/irq_regs.h>
79 #include <asm/switch_to.h>
82 #define CREATE_TRACE_POINTS
83 #include <linux/sched/rseq_api.h>
84 #include <trace/events/sched.h>
85 #include <trace/events/ipi.h>
86 #undef CREATE_TRACE_POINTS
91 #include "autogroup.h"
96 #include "../workqueue_internal.h"
97 #include "../../io_uring/io-wq.h"
98 #include "../smpboot.h"
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu
);
101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask
);
104 * Export tracepoints that act as a bare tracehook (ie: have no trace event
105 * associated with them) to allow external modules to probe them.
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp
);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp
);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp
);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp
);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp
);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp
);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp
);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp
);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp
);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp
);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp
);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp
);
120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq
, runqueues
);
122 #ifdef CONFIG_SCHED_DEBUG
124 * Debugging: various feature bits
126 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
127 * sysctl_sched_features, defined in sched.h, to allow constants propagation
128 * at compile time and compiler optimization based on features default.
130 #define SCHED_FEAT(name, enabled) \
131 (1UL << __SCHED_FEAT_##name) * enabled |
132 const_debug
unsigned int sysctl_sched_features
=
133 #include "features.h"
138 * Print a warning if need_resched is set for the given duration (if
139 * LATENCY_WARN is enabled).
141 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
144 __read_mostly
int sysctl_resched_latency_warn_ms
= 100;
145 __read_mostly
int sysctl_resched_latency_warn_once
= 1;
146 #endif /* CONFIG_SCHED_DEBUG */
149 * Number of tasks to iterate in a single balance run.
150 * Limited because this is done with IRQs disabled.
152 const_debug
unsigned int sysctl_sched_nr_migrate
= SCHED_NR_MIGRATE_BREAK
;
154 __read_mostly
int scheduler_running
;
156 #ifdef CONFIG_SCHED_CORE
158 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled
);
160 /* kernel prio, less is more */
161 static inline int __task_prio(const struct task_struct
*p
)
163 if (p
->sched_class
== &stop_sched_class
) /* trumps deadline */
167 return -1; /* deadline */
169 if (rt_or_dl_prio(p
->prio
))
170 return p
->prio
; /* [-1, 99] */
172 if (p
->sched_class
== &idle_sched_class
)
173 return MAX_RT_PRIO
+ NICE_WIDTH
; /* 140 */
176 return MAX_RT_PRIO
+ MAX_NICE
+ 1; /* 120, squash ext */
178 return MAX_RT_PRIO
+ MAX_NICE
; /* 119, squash fair */
188 /* real prio, less is less */
189 static inline bool prio_less(const struct task_struct
*a
,
190 const struct task_struct
*b
, bool in_fi
)
193 int pa
= __task_prio(a
), pb
= __task_prio(b
);
201 if (pa
== -1) { /* dl_prio() doesn't work because of stop_class above */
202 const struct sched_dl_entity
*a_dl
, *b_dl
;
206 * Since,'a' and 'b' can be CFS tasks served by DL server,
207 * __task_prio() can return -1 (for DL) even for those. In that
208 * case, get to the dl_server's DL entity.
217 return !dl_time_before(a_dl
->deadline
, b_dl
->deadline
);
220 if (pa
== MAX_RT_PRIO
+ MAX_NICE
) /* fair */
221 return cfs_prio_less(a
, b
, in_fi
);
223 #ifdef CONFIG_SCHED_CLASS_EXT
224 if (pa
== MAX_RT_PRIO
+ MAX_NICE
+ 1) /* ext */
225 return scx_prio_less(a
, b
, in_fi
);
231 static inline bool __sched_core_less(const struct task_struct
*a
,
232 const struct task_struct
*b
)
234 if (a
->core_cookie
< b
->core_cookie
)
237 if (a
->core_cookie
> b
->core_cookie
)
240 /* flip prio, so high prio is leftmost */
241 if (prio_less(b
, a
, !!task_rq(a
)->core
->core_forceidle_count
))
247 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
249 static inline bool rb_sched_core_less(struct rb_node
*a
, const struct rb_node
*b
)
251 return __sched_core_less(__node_2_sc(a
), __node_2_sc(b
));
254 static inline int rb_sched_core_cmp(const void *key
, const struct rb_node
*node
)
256 const struct task_struct
*p
= __node_2_sc(node
);
257 unsigned long cookie
= (unsigned long)key
;
259 if (cookie
< p
->core_cookie
)
262 if (cookie
> p
->core_cookie
)
268 void sched_core_enqueue(struct rq
*rq
, struct task_struct
*p
)
270 if (p
->se
.sched_delayed
)
273 rq
->core
->core_task_seq
++;
278 rb_add(&p
->core_node
, &rq
->core_tree
, rb_sched_core_less
);
281 void sched_core_dequeue(struct rq
*rq
, struct task_struct
*p
, int flags
)
283 if (p
->se
.sched_delayed
)
286 rq
->core
->core_task_seq
++;
288 if (sched_core_enqueued(p
)) {
289 rb_erase(&p
->core_node
, &rq
->core_tree
);
290 RB_CLEAR_NODE(&p
->core_node
);
294 * Migrating the last task off the cpu, with the cpu in forced idle
295 * state. Reschedule to create an accounting edge for forced idle,
296 * and re-examine whether the core is still in forced idle state.
298 if (!(flags
& DEQUEUE_SAVE
) && rq
->nr_running
== 1 &&
299 rq
->core
->core_forceidle_count
&& rq
->curr
== rq
->idle
)
303 static int sched_task_is_throttled(struct task_struct
*p
, int cpu
)
305 if (p
->sched_class
->task_is_throttled
)
306 return p
->sched_class
->task_is_throttled(p
, cpu
);
311 static struct task_struct
*sched_core_next(struct task_struct
*p
, unsigned long cookie
)
313 struct rb_node
*node
= &p
->core_node
;
314 int cpu
= task_cpu(p
);
317 node
= rb_next(node
);
321 p
= __node_2_sc(node
);
322 if (p
->core_cookie
!= cookie
)
325 } while (sched_task_is_throttled(p
, cpu
));
331 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
332 * If no suitable task is found, NULL will be returned.
334 static struct task_struct
*sched_core_find(struct rq
*rq
, unsigned long cookie
)
336 struct task_struct
*p
;
337 struct rb_node
*node
;
339 node
= rb_find_first((void *)cookie
, &rq
->core_tree
, rb_sched_core_cmp
);
343 p
= __node_2_sc(node
);
344 if (!sched_task_is_throttled(p
, rq
->cpu
))
347 return sched_core_next(p
, cookie
);
351 * Magic required such that:
353 * raw_spin_rq_lock(rq);
355 * raw_spin_rq_unlock(rq);
357 * ends up locking and unlocking the _same_ lock, and all CPUs
358 * always agree on what rq has what lock.
360 * XXX entirely possible to selectively enable cores, don't bother for now.
363 static DEFINE_MUTEX(sched_core_mutex
);
364 static atomic_t sched_core_count
;
365 static struct cpumask sched_core_mask
;
367 static void sched_core_lock(int cpu
, unsigned long *flags
)
369 const struct cpumask
*smt_mask
= cpu_smt_mask(cpu
);
372 local_irq_save(*flags
);
373 for_each_cpu(t
, smt_mask
)
374 raw_spin_lock_nested(&cpu_rq(t
)->__lock
, i
++);
377 static void sched_core_unlock(int cpu
, unsigned long *flags
)
379 const struct cpumask
*smt_mask
= cpu_smt_mask(cpu
);
382 for_each_cpu(t
, smt_mask
)
383 raw_spin_unlock(&cpu_rq(t
)->__lock
);
384 local_irq_restore(*flags
);
387 static void __sched_core_flip(bool enabled
)
395 * Toggle the online cores, one by one.
397 cpumask_copy(&sched_core_mask
, cpu_online_mask
);
398 for_each_cpu(cpu
, &sched_core_mask
) {
399 const struct cpumask
*smt_mask
= cpu_smt_mask(cpu
);
401 sched_core_lock(cpu
, &flags
);
403 for_each_cpu(t
, smt_mask
)
404 cpu_rq(t
)->core_enabled
= enabled
;
406 cpu_rq(cpu
)->core
->core_forceidle_start
= 0;
408 sched_core_unlock(cpu
, &flags
);
410 cpumask_andnot(&sched_core_mask
, &sched_core_mask
, smt_mask
);
414 * Toggle the offline CPUs.
416 for_each_cpu_andnot(cpu
, cpu_possible_mask
, cpu_online_mask
)
417 cpu_rq(cpu
)->core_enabled
= enabled
;
422 static void sched_core_assert_empty(void)
426 for_each_possible_cpu(cpu
)
427 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu
)->core_tree
));
430 static void __sched_core_enable(void)
432 static_branch_enable(&__sched_core_enabled
);
434 * Ensure all previous instances of raw_spin_rq_*lock() have finished
435 * and future ones will observe !sched_core_disabled().
438 __sched_core_flip(true);
439 sched_core_assert_empty();
442 static void __sched_core_disable(void)
444 sched_core_assert_empty();
445 __sched_core_flip(false);
446 static_branch_disable(&__sched_core_enabled
);
449 void sched_core_get(void)
451 if (atomic_inc_not_zero(&sched_core_count
))
454 mutex_lock(&sched_core_mutex
);
455 if (!atomic_read(&sched_core_count
))
456 __sched_core_enable();
458 smp_mb__before_atomic();
459 atomic_inc(&sched_core_count
);
460 mutex_unlock(&sched_core_mutex
);
463 static void __sched_core_put(struct work_struct
*work
)
465 if (atomic_dec_and_mutex_lock(&sched_core_count
, &sched_core_mutex
)) {
466 __sched_core_disable();
467 mutex_unlock(&sched_core_mutex
);
471 void sched_core_put(void)
473 static DECLARE_WORK(_work
, __sched_core_put
);
476 * "There can be only one"
478 * Either this is the last one, or we don't actually need to do any
479 * 'work'. If it is the last *again*, we rely on
480 * WORK_STRUCT_PENDING_BIT.
482 if (!atomic_add_unless(&sched_core_count
, -1, 1))
483 schedule_work(&_work
);
486 #else /* !CONFIG_SCHED_CORE */
488 static inline void sched_core_enqueue(struct rq
*rq
, struct task_struct
*p
) { }
490 sched_core_dequeue(struct rq
*rq
, struct task_struct
*p
, int flags
) { }
492 #endif /* CONFIG_SCHED_CORE */
495 * Serialization rules:
501 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
504 * rq2->lock where: rq1 < rq2
508 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509 * local CPU's rq->lock, it optionally removes the task from the runqueue and
510 * always looks at the local rq data structures to find the most eligible task
513 * Task enqueue is also under rq->lock, possibly taken from another CPU.
514 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
515 * the local CPU to avoid bouncing the runqueue state around [ see
516 * ttwu_queue_wakelist() ]
518 * Task wakeup, specifically wakeups that involve migration, are horribly
519 * complicated to avoid having to take two rq->locks.
523 * System-calls and anything external will use task_rq_lock() which acquires
524 * both p->pi_lock and rq->lock. As a consequence the state they change is
525 * stable while holding either lock:
527 * - sched_setaffinity()/
528 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
529 * - set_user_nice(): p->se.load, p->*prio
530 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
531 * p->se.load, p->rt_priority,
532 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
533 * - sched_setnuma(): p->numa_preferred_nid
534 * - sched_move_task(): p->sched_task_group
535 * - uclamp_update_active() p->uclamp*
537 * p->state <- TASK_*:
539 * is changed locklessly using set_current_state(), __set_current_state() or
540 * set_special_state(), see their respective comments, or by
541 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
544 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
546 * is set by activate_task() and cleared by deactivate_task(), under
547 * rq->lock. Non-zero indicates the task is runnable, the special
548 * ON_RQ_MIGRATING state is used for migration without holding both
549 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
551 * Additionally it is possible to be ->on_rq but still be considered not
552 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
553 * but will be dequeued as soon as they get picked again. See the
554 * task_is_runnable() helper.
556 * p->on_cpu <- { 0, 1 }:
558 * is set by prepare_task() and cleared by finish_task() such that it will be
559 * set before p is scheduled-in and cleared after p is scheduled-out, both
560 * under rq->lock. Non-zero indicates the task is running on its CPU.
562 * [ The astute reader will observe that it is possible for two tasks on one
563 * CPU to have ->on_cpu = 1 at the same time. ]
565 * task_cpu(p): is changed by set_task_cpu(), the rules are:
567 * - Don't call set_task_cpu() on a blocked task:
569 * We don't care what CPU we're not running on, this simplifies hotplug,
570 * the CPU assignment of blocked tasks isn't required to be valid.
572 * - for try_to_wake_up(), called under p->pi_lock:
574 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
576 * - for migration called under rq->lock:
577 * [ see task_on_rq_migrating() in task_rq_lock() ]
579 * o move_queued_task()
582 * - for migration called under double_rq_lock():
584 * o __migrate_swap_task()
585 * o push_rt_task() / pull_rt_task()
586 * o push_dl_task() / pull_dl_task()
587 * o dl_task_offline_migration()
591 void raw_spin_rq_lock_nested(struct rq
*rq
, int subclass
)
593 raw_spinlock_t
*lock
;
595 /* Matches synchronize_rcu() in __sched_core_enable() */
597 if (sched_core_disabled()) {
598 raw_spin_lock_nested(&rq
->__lock
, subclass
);
599 /* preempt_count *MUST* be > 1 */
600 preempt_enable_no_resched();
605 lock
= __rq_lockp(rq
);
606 raw_spin_lock_nested(lock
, subclass
);
607 if (likely(lock
== __rq_lockp(rq
))) {
608 /* preempt_count *MUST* be > 1 */
609 preempt_enable_no_resched();
612 raw_spin_unlock(lock
);
616 bool raw_spin_rq_trylock(struct rq
*rq
)
618 raw_spinlock_t
*lock
;
621 /* Matches synchronize_rcu() in __sched_core_enable() */
623 if (sched_core_disabled()) {
624 ret
= raw_spin_trylock(&rq
->__lock
);
630 lock
= __rq_lockp(rq
);
631 ret
= raw_spin_trylock(lock
);
632 if (!ret
|| (likely(lock
== __rq_lockp(rq
)))) {
636 raw_spin_unlock(lock
);
640 void raw_spin_rq_unlock(struct rq
*rq
)
642 raw_spin_unlock(rq_lockp(rq
));
647 * double_rq_lock - safely lock two runqueues
649 void double_rq_lock(struct rq
*rq1
, struct rq
*rq2
)
651 lockdep_assert_irqs_disabled();
653 if (rq_order_less(rq2
, rq1
))
656 raw_spin_rq_lock(rq1
);
657 if (__rq_lockp(rq1
) != __rq_lockp(rq2
))
658 raw_spin_rq_lock_nested(rq2
, SINGLE_DEPTH_NESTING
);
660 double_rq_clock_clear_update(rq1
, rq2
);
665 * __task_rq_lock - lock the rq @p resides on.
667 struct rq
*__task_rq_lock(struct task_struct
*p
, struct rq_flags
*rf
)
672 lockdep_assert_held(&p
->pi_lock
);
676 raw_spin_rq_lock(rq
);
677 if (likely(rq
== task_rq(p
) && !task_on_rq_migrating(p
))) {
681 raw_spin_rq_unlock(rq
);
683 while (unlikely(task_on_rq_migrating(p
)))
689 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
691 struct rq
*task_rq_lock(struct task_struct
*p
, struct rq_flags
*rf
)
692 __acquires(p
->pi_lock
)
698 raw_spin_lock_irqsave(&p
->pi_lock
, rf
->flags
);
700 raw_spin_rq_lock(rq
);
702 * move_queued_task() task_rq_lock()
705 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
706 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
707 * [S] ->cpu = new_cpu [L] task_rq()
711 * If we observe the old CPU in task_rq_lock(), the acquire of
712 * the old rq->lock will fully serialize against the stores.
714 * If we observe the new CPU in task_rq_lock(), the address
715 * dependency headed by '[L] rq = task_rq()' and the acquire
716 * will pair with the WMB to ensure we then also see migrating.
718 if (likely(rq
== task_rq(p
) && !task_on_rq_migrating(p
))) {
722 raw_spin_rq_unlock(rq
);
723 raw_spin_unlock_irqrestore(&p
->pi_lock
, rf
->flags
);
725 while (unlikely(task_on_rq_migrating(p
)))
731 * RQ-clock updating methods:
734 static void update_rq_clock_task(struct rq
*rq
, s64 delta
)
737 * In theory, the compile should just see 0 here, and optimize out the call
738 * to sched_rt_avg_update. But I don't trust it...
740 s64 __maybe_unused steal
= 0, irq_delta
= 0;
742 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
743 irq_delta
= irq_time_read(cpu_of(rq
)) - rq
->prev_irq_time
;
746 * Since irq_time is only updated on {soft,}irq_exit, we might run into
747 * this case when a previous update_rq_clock() happened inside a
750 * When this happens, we stop ->clock_task and only update the
751 * prev_irq_time stamp to account for the part that fit, so that a next
752 * update will consume the rest. This ensures ->clock_task is
755 * It does however cause some slight miss-attribution of {soft,}IRQ
756 * time, a more accurate solution would be to update the irq_time using
757 * the current rq->clock timestamp, except that would require using
760 if (irq_delta
> delta
)
763 rq
->prev_irq_time
+= irq_delta
;
765 delayacct_irq(rq
->curr
, irq_delta
);
767 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
768 if (static_key_false((¶virt_steal_rq_enabled
))) {
769 steal
= paravirt_steal_clock(cpu_of(rq
));
770 steal
-= rq
->prev_steal_time_rq
;
772 if (unlikely(steal
> delta
))
775 rq
->prev_steal_time_rq
+= steal
;
780 rq
->clock_task
+= delta
;
782 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
783 if ((irq_delta
+ steal
) && sched_feat(NONTASK_CAPACITY
))
784 update_irq_load_avg(rq
, irq_delta
+ steal
);
786 update_rq_clock_pelt(rq
, delta
);
789 void update_rq_clock(struct rq
*rq
)
793 lockdep_assert_rq_held(rq
);
795 if (rq
->clock_update_flags
& RQCF_ACT_SKIP
)
798 #ifdef CONFIG_SCHED_DEBUG
799 if (sched_feat(WARN_DOUBLE_CLOCK
))
800 SCHED_WARN_ON(rq
->clock_update_flags
& RQCF_UPDATED
);
801 rq
->clock_update_flags
|= RQCF_UPDATED
;
804 delta
= sched_clock_cpu(cpu_of(rq
)) - rq
->clock
;
808 update_rq_clock_task(rq
, delta
);
811 #ifdef CONFIG_SCHED_HRTICK
813 * Use HR-timers to deliver accurate preemption points.
816 static void hrtick_clear(struct rq
*rq
)
818 if (hrtimer_active(&rq
->hrtick_timer
))
819 hrtimer_cancel(&rq
->hrtick_timer
);
823 * High-resolution timer tick.
824 * Runs from hardirq context with interrupts disabled.
826 static enum hrtimer_restart
hrtick(struct hrtimer
*timer
)
828 struct rq
*rq
= container_of(timer
, struct rq
, hrtick_timer
);
831 WARN_ON_ONCE(cpu_of(rq
) != smp_processor_id());
835 rq
->donor
->sched_class
->task_tick(rq
, rq
->curr
, 1);
838 return HRTIMER_NORESTART
;
843 static void __hrtick_restart(struct rq
*rq
)
845 struct hrtimer
*timer
= &rq
->hrtick_timer
;
846 ktime_t time
= rq
->hrtick_time
;
848 hrtimer_start(timer
, time
, HRTIMER_MODE_ABS_PINNED_HARD
);
852 * called from hardirq (IPI) context
854 static void __hrtick_start(void *arg
)
860 __hrtick_restart(rq
);
865 * Called to set the hrtick timer state.
867 * called with rq->lock held and IRQs disabled
869 void hrtick_start(struct rq
*rq
, u64 delay
)
871 struct hrtimer
*timer
= &rq
->hrtick_timer
;
875 * Don't schedule slices shorter than 10000ns, that just
876 * doesn't make sense and can cause timer DoS.
878 delta
= max_t(s64
, delay
, 10000LL);
879 rq
->hrtick_time
= ktime_add_ns(timer
->base
->get_time(), delta
);
882 __hrtick_restart(rq
);
884 smp_call_function_single_async(cpu_of(rq
), &rq
->hrtick_csd
);
889 * Called to set the hrtick timer state.
891 * called with rq->lock held and IRQs disabled
893 void hrtick_start(struct rq
*rq
, u64 delay
)
896 * Don't schedule slices shorter than 10000ns, that just
897 * doesn't make sense. Rely on vruntime for fairness.
899 delay
= max_t(u64
, delay
, 10000LL);
900 hrtimer_start(&rq
->hrtick_timer
, ns_to_ktime(delay
),
901 HRTIMER_MODE_REL_PINNED_HARD
);
904 #endif /* CONFIG_SMP */
906 static void hrtick_rq_init(struct rq
*rq
)
909 INIT_CSD(&rq
->hrtick_csd
, __hrtick_start
, rq
);
911 hrtimer_init(&rq
->hrtick_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_HARD
);
912 rq
->hrtick_timer
.function
= hrtick
;
914 #else /* CONFIG_SCHED_HRTICK */
915 static inline void hrtick_clear(struct rq
*rq
)
919 static inline void hrtick_rq_init(struct rq
*rq
)
922 #endif /* CONFIG_SCHED_HRTICK */
925 * try_cmpxchg based fetch_or() macro so it works for different integer types:
927 #define fetch_or(ptr, mask) \
929 typeof(ptr) _ptr = (ptr); \
930 typeof(mask) _mask = (mask); \
931 typeof(*_ptr) _val = *_ptr; \
934 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
938 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
940 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
941 * this avoids any races wrt polling state changes and thereby avoids
944 static inline bool set_nr_and_not_polling(struct thread_info
*ti
, int tif
)
946 return !(fetch_or(&ti
->flags
, 1 << tif
) & _TIF_POLLING_NRFLAG
);
950 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
952 * If this returns true, then the idle task promises to call
953 * sched_ttwu_pending() and reschedule soon.
955 static bool set_nr_if_polling(struct task_struct
*p
)
957 struct thread_info
*ti
= task_thread_info(p
);
958 typeof(ti
->flags
) val
= READ_ONCE(ti
->flags
);
961 if (!(val
& _TIF_POLLING_NRFLAG
))
963 if (val
& _TIF_NEED_RESCHED
)
965 } while (!try_cmpxchg(&ti
->flags
, &val
, val
| _TIF_NEED_RESCHED
));
971 static inline bool set_nr_and_not_polling(struct thread_info
*ti
, int tif
)
973 set_ti_thread_flag(ti
, tif
);
978 static inline bool set_nr_if_polling(struct task_struct
*p
)
985 static bool __wake_q_add(struct wake_q_head
*head
, struct task_struct
*task
)
987 struct wake_q_node
*node
= &task
->wake_q
;
990 * Atomically grab the task, if ->wake_q is !nil already it means
991 * it's already queued (either by us or someone else) and will get the
992 * wakeup due to that.
994 * In order to ensure that a pending wakeup will observe our pending
995 * state, even in the failed case, an explicit smp_mb() must be used.
997 smp_mb__before_atomic();
998 if (unlikely(cmpxchg_relaxed(&node
->next
, NULL
, WAKE_Q_TAIL
)))
1002 * The head is context local, there can be no concurrency.
1004 *head
->lastp
= node
;
1005 head
->lastp
= &node
->next
;
1010 * wake_q_add() - queue a wakeup for 'later' waking.
1011 * @head: the wake_q_head to add @task to
1012 * @task: the task to queue for 'later' wakeup
1014 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1015 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1018 * This function must be used as-if it were wake_up_process(); IOW the task
1019 * must be ready to be woken at this location.
1021 void wake_q_add(struct wake_q_head
*head
, struct task_struct
*task
)
1023 if (__wake_q_add(head
, task
))
1024 get_task_struct(task
);
1028 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1029 * @head: the wake_q_head to add @task to
1030 * @task: the task to queue for 'later' wakeup
1032 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1033 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1036 * This function must be used as-if it were wake_up_process(); IOW the task
1037 * must be ready to be woken at this location.
1039 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1040 * that already hold reference to @task can call the 'safe' version and trust
1041 * wake_q to do the right thing depending whether or not the @task is already
1042 * queued for wakeup.
1044 void wake_q_add_safe(struct wake_q_head
*head
, struct task_struct
*task
)
1046 if (!__wake_q_add(head
, task
))
1047 put_task_struct(task
);
1050 void wake_up_q(struct wake_q_head
*head
)
1052 struct wake_q_node
*node
= head
->first
;
1054 while (node
!= WAKE_Q_TAIL
) {
1055 struct task_struct
*task
;
1057 task
= container_of(node
, struct task_struct
, wake_q
);
1058 /* Task can safely be re-inserted now: */
1060 task
->wake_q
.next
= NULL
;
1063 * wake_up_process() executes a full barrier, which pairs with
1064 * the queueing in wake_q_add() so as not to miss wakeups.
1066 wake_up_process(task
);
1067 put_task_struct(task
);
1072 * resched_curr - mark rq's current task 'to be rescheduled now'.
1074 * On UP this means the setting of the need_resched flag, on SMP it
1075 * might also involve a cross-CPU call to trigger the scheduler on
1078 static void __resched_curr(struct rq
*rq
, int tif
)
1080 struct task_struct
*curr
= rq
->curr
;
1081 struct thread_info
*cti
= task_thread_info(curr
);
1084 lockdep_assert_rq_held(rq
);
1087 * Always immediately preempt the idle task; no point in delaying doing
1090 if (is_idle_task(curr
) && tif
== TIF_NEED_RESCHED_LAZY
)
1091 tif
= TIF_NEED_RESCHED
;
1093 if (cti
->flags
& ((1 << tif
) | _TIF_NEED_RESCHED
))
1098 if (cpu
== smp_processor_id()) {
1099 set_ti_thread_flag(cti
, tif
);
1100 if (tif
== TIF_NEED_RESCHED
)
1101 set_preempt_need_resched();
1105 if (set_nr_and_not_polling(cti
, tif
)) {
1106 if (tif
== TIF_NEED_RESCHED
)
1107 smp_send_reschedule(cpu
);
1109 trace_sched_wake_idle_without_ipi(cpu
);
1113 void resched_curr(struct rq
*rq
)
1115 __resched_curr(rq
, TIF_NEED_RESCHED
);
1118 #ifdef CONFIG_PREEMPT_DYNAMIC
1119 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy
);
1120 static __always_inline
bool dynamic_preempt_lazy(void)
1122 return static_branch_unlikely(&sk_dynamic_preempt_lazy
);
1125 static __always_inline
bool dynamic_preempt_lazy(void)
1127 return IS_ENABLED(CONFIG_PREEMPT_LAZY
);
1131 static __always_inline
int get_lazy_tif_bit(void)
1133 if (dynamic_preempt_lazy())
1134 return TIF_NEED_RESCHED_LAZY
;
1136 return TIF_NEED_RESCHED
;
1139 void resched_curr_lazy(struct rq
*rq
)
1141 __resched_curr(rq
, get_lazy_tif_bit());
1144 void resched_cpu(int cpu
)
1146 struct rq
*rq
= cpu_rq(cpu
);
1147 unsigned long flags
;
1149 raw_spin_rq_lock_irqsave(rq
, flags
);
1150 if (cpu_online(cpu
) || cpu
== smp_processor_id())
1152 raw_spin_rq_unlock_irqrestore(rq
, flags
);
1156 #ifdef CONFIG_NO_HZ_COMMON
1158 * In the semi idle case, use the nearest busy CPU for migrating timers
1159 * from an idle CPU. This is good for power-savings.
1161 * We don't do similar optimization for completely idle system, as
1162 * selecting an idle CPU will add more delays to the timers than intended
1163 * (as that CPU's timer base may not be up to date wrt jiffies etc).
1165 int get_nohz_timer_target(void)
1167 int i
, cpu
= smp_processor_id(), default_cpu
= -1;
1168 struct sched_domain
*sd
;
1169 const struct cpumask
*hk_mask
;
1171 if (housekeeping_cpu(cpu
, HK_TYPE_TIMER
)) {
1177 hk_mask
= housekeeping_cpumask(HK_TYPE_TIMER
);
1181 for_each_domain(cpu
, sd
) {
1182 for_each_cpu_and(i
, sched_domain_span(sd
), hk_mask
) {
1191 if (default_cpu
== -1)
1192 default_cpu
= housekeeping_any_cpu(HK_TYPE_TIMER
);
1198 * When add_timer_on() enqueues a timer into the timer wheel of an
1199 * idle CPU then this timer might expire before the next timer event
1200 * which is scheduled to wake up that CPU. In case of a completely
1201 * idle system the next event might even be infinite time into the
1202 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1203 * leaves the inner idle loop so the newly added timer is taken into
1204 * account when the CPU goes back to idle and evaluates the timer
1205 * wheel for the next timer event.
1207 static void wake_up_idle_cpu(int cpu
)
1209 struct rq
*rq
= cpu_rq(cpu
);
1211 if (cpu
== smp_processor_id())
1215 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1216 * part of the idle loop. This forces an exit from the idle loop
1217 * and a round trip to schedule(). Now this could be optimized
1218 * because a simple new idle loop iteration is enough to
1219 * re-evaluate the next tick. Provided some re-ordering of tick
1220 * nohz functions that would need to follow TIF_NR_POLLING
1223 * - On most architectures, a simple fetch_or on ti::flags with a
1224 * "0" value would be enough to know if an IPI needs to be sent.
1226 * - x86 needs to perform a last need_resched() check between
1227 * monitor and mwait which doesn't take timers into account.
1228 * There a dedicated TIF_TIMER flag would be required to
1229 * fetch_or here and be checked along with TIF_NEED_RESCHED
1232 * However, remote timer enqueue is not such a frequent event
1233 * and testing of the above solutions didn't appear to report
1236 if (set_nr_and_not_polling(task_thread_info(rq
->idle
), TIF_NEED_RESCHED
))
1237 smp_send_reschedule(cpu
);
1239 trace_sched_wake_idle_without_ipi(cpu
);
1242 static bool wake_up_full_nohz_cpu(int cpu
)
1245 * We just need the target to call irq_exit() and re-evaluate
1246 * the next tick. The nohz full kick at least implies that.
1247 * If needed we can still optimize that later with an
1250 if (cpu_is_offline(cpu
))
1251 return true; /* Don't try to wake offline CPUs. */
1252 if (tick_nohz_full_cpu(cpu
)) {
1253 if (cpu
!= smp_processor_id() ||
1254 tick_nohz_tick_stopped())
1255 tick_nohz_full_kick_cpu(cpu
);
1263 * Wake up the specified CPU. If the CPU is going offline, it is the
1264 * caller's responsibility to deal with the lost wakeup, for example,
1265 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1267 void wake_up_nohz_cpu(int cpu
)
1269 if (!wake_up_full_nohz_cpu(cpu
))
1270 wake_up_idle_cpu(cpu
);
1273 static void nohz_csd_func(void *info
)
1275 struct rq
*rq
= info
;
1276 int cpu
= cpu_of(rq
);
1280 * Release the rq::nohz_csd.
1282 flags
= atomic_fetch_andnot(NOHZ_KICK_MASK
| NOHZ_NEWILB_KICK
, nohz_flags(cpu
));
1283 WARN_ON(!(flags
& NOHZ_KICK_MASK
));
1285 rq
->idle_balance
= idle_cpu(cpu
);
1286 if (rq
->idle_balance
&& !need_resched()) {
1287 rq
->nohz_idle_balance
= flags
;
1288 raise_softirq_irqoff(SCHED_SOFTIRQ
);
1292 #endif /* CONFIG_NO_HZ_COMMON */
1294 #ifdef CONFIG_NO_HZ_FULL
1295 static inline bool __need_bw_check(struct rq
*rq
, struct task_struct
*p
)
1297 if (rq
->nr_running
!= 1)
1300 if (p
->sched_class
!= &fair_sched_class
)
1303 if (!task_on_rq_queued(p
))
1309 bool sched_can_stop_tick(struct rq
*rq
)
1311 int fifo_nr_running
;
1313 /* Deadline tasks, even if single, need the tick */
1314 if (rq
->dl
.dl_nr_running
)
1318 * If there are more than one RR tasks, we need the tick to affect the
1319 * actual RR behaviour.
1321 if (rq
->rt
.rr_nr_running
) {
1322 if (rq
->rt
.rr_nr_running
== 1)
1329 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1330 * forced preemption between FIFO tasks.
1332 fifo_nr_running
= rq
->rt
.rt_nr_running
- rq
->rt
.rr_nr_running
;
1333 if (fifo_nr_running
)
1337 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1338 * left. For CFS, if there's more than one we need the tick for
1339 * involuntary preemption. For SCX, ask.
1341 if (scx_enabled() && !scx_can_stop_tick(rq
))
1344 if (rq
->cfs
.nr_running
> 1)
1348 * If there is one task and it has CFS runtime bandwidth constraints
1349 * and it's on the cpu now we don't want to stop the tick.
1350 * This check prevents clearing the bit if a newly enqueued task here is
1351 * dequeued by migrating while the constrained task continues to run.
1352 * E.g. going from 2->1 without going through pick_next_task().
1354 if (__need_bw_check(rq
, rq
->curr
)) {
1355 if (cfs_task_bw_constrained(rq
->curr
))
1361 #endif /* CONFIG_NO_HZ_FULL */
1362 #endif /* CONFIG_SMP */
1364 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1365 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1367 * Iterate task_group tree rooted at *from, calling @down when first entering a
1368 * node and @up when leaving it for the final time.
1370 * Caller must hold rcu_lock or sufficient equivalent.
1372 int walk_tg_tree_from(struct task_group
*from
,
1373 tg_visitor down
, tg_visitor up
, void *data
)
1375 struct task_group
*parent
, *child
;
1381 ret
= (*down
)(parent
, data
);
1384 list_for_each_entry_rcu(child
, &parent
->children
, siblings
) {
1391 ret
= (*up
)(parent
, data
);
1392 if (ret
|| parent
== from
)
1396 parent
= parent
->parent
;
1403 int tg_nop(struct task_group
*tg
, void *data
)
1409 void set_load_weight(struct task_struct
*p
, bool update_load
)
1411 int prio
= p
->static_prio
- MAX_RT_PRIO
;
1412 struct load_weight lw
;
1414 if (task_has_idle_policy(p
)) {
1415 lw
.weight
= scale_load(WEIGHT_IDLEPRIO
);
1416 lw
.inv_weight
= WMULT_IDLEPRIO
;
1418 lw
.weight
= scale_load(sched_prio_to_weight
[prio
]);
1419 lw
.inv_weight
= sched_prio_to_wmult
[prio
];
1423 * SCHED_OTHER tasks have to update their load when changing their
1426 if (update_load
&& p
->sched_class
->reweight_task
)
1427 p
->sched_class
->reweight_task(task_rq(p
), p
, &lw
);
1432 #ifdef CONFIG_UCLAMP_TASK
1434 * Serializes updates of utilization clamp values
1436 * The (slow-path) user-space triggers utilization clamp value updates which
1437 * can require updates on (fast-path) scheduler's data structures used to
1438 * support enqueue/dequeue operations.
1439 * While the per-CPU rq lock protects fast-path update operations, user-space
1440 * requests are serialized using a mutex to reduce the risk of conflicting
1441 * updates or API abuses.
1443 static __maybe_unused
DEFINE_MUTEX(uclamp_mutex
);
1445 /* Max allowed minimum utilization */
1446 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min
= SCHED_CAPACITY_SCALE
;
1448 /* Max allowed maximum utilization */
1449 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max
= SCHED_CAPACITY_SCALE
;
1452 * By default RT tasks run at the maximum performance point/capacity of the
1453 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1454 * SCHED_CAPACITY_SCALE.
1456 * This knob allows admins to change the default behavior when uclamp is being
1457 * used. In battery powered devices, particularly, running at the maximum
1458 * capacity and frequency will increase energy consumption and shorten the
1461 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1463 * This knob will not override the system default sched_util_clamp_min defined
1466 unsigned int sysctl_sched_uclamp_util_min_rt_default
= SCHED_CAPACITY_SCALE
;
1468 /* All clamps are required to be less or equal than these values */
1469 static struct uclamp_se uclamp_default
[UCLAMP_CNT
];
1472 * This static key is used to reduce the uclamp overhead in the fast path. It
1473 * primarily disables the call to uclamp_rq_{inc, dec}() in
1474 * enqueue/dequeue_task().
1476 * This allows users to continue to enable uclamp in their kernel config with
1477 * minimum uclamp overhead in the fast path.
1479 * As soon as userspace modifies any of the uclamp knobs, the static key is
1480 * enabled, since we have an actual users that make use of uclamp
1483 * The knobs that would enable this static key are:
1485 * * A task modifying its uclamp value with sched_setattr().
1486 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1487 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1489 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used
);
1491 static inline unsigned int
1492 uclamp_idle_value(struct rq
*rq
, enum uclamp_id clamp_id
,
1493 unsigned int clamp_value
)
1496 * Avoid blocked utilization pushing up the frequency when we go
1497 * idle (which drops the max-clamp) by retaining the last known
1500 if (clamp_id
== UCLAMP_MAX
) {
1501 rq
->uclamp_flags
|= UCLAMP_FLAG_IDLE
;
1505 return uclamp_none(UCLAMP_MIN
);
1508 static inline void uclamp_idle_reset(struct rq
*rq
, enum uclamp_id clamp_id
,
1509 unsigned int clamp_value
)
1511 /* Reset max-clamp retention only on idle exit */
1512 if (!(rq
->uclamp_flags
& UCLAMP_FLAG_IDLE
))
1515 uclamp_rq_set(rq
, clamp_id
, clamp_value
);
1519 unsigned int uclamp_rq_max_value(struct rq
*rq
, enum uclamp_id clamp_id
,
1520 unsigned int clamp_value
)
1522 struct uclamp_bucket
*bucket
= rq
->uclamp
[clamp_id
].bucket
;
1523 int bucket_id
= UCLAMP_BUCKETS
- 1;
1526 * Since both min and max clamps are max aggregated, find the
1527 * top most bucket with tasks in.
1529 for ( ; bucket_id
>= 0; bucket_id
--) {
1530 if (!bucket
[bucket_id
].tasks
)
1532 return bucket
[bucket_id
].value
;
1535 /* No tasks -- default clamp values */
1536 return uclamp_idle_value(rq
, clamp_id
, clamp_value
);
1539 static void __uclamp_update_util_min_rt_default(struct task_struct
*p
)
1541 unsigned int default_util_min
;
1542 struct uclamp_se
*uc_se
;
1544 lockdep_assert_held(&p
->pi_lock
);
1546 uc_se
= &p
->uclamp_req
[UCLAMP_MIN
];
1548 /* Only sync if user didn't override the default */
1549 if (uc_se
->user_defined
)
1552 default_util_min
= sysctl_sched_uclamp_util_min_rt_default
;
1553 uclamp_se_set(uc_se
, default_util_min
, false);
1556 static void uclamp_update_util_min_rt_default(struct task_struct
*p
)
1561 /* Protect updates to p->uclamp_* */
1562 guard(task_rq_lock
)(p
);
1563 __uclamp_update_util_min_rt_default(p
);
1566 static inline struct uclamp_se
1567 uclamp_tg_restrict(struct task_struct
*p
, enum uclamp_id clamp_id
)
1569 /* Copy by value as we could modify it */
1570 struct uclamp_se uc_req
= p
->uclamp_req
[clamp_id
];
1571 #ifdef CONFIG_UCLAMP_TASK_GROUP
1572 unsigned int tg_min
, tg_max
, value
;
1575 * Tasks in autogroups or root task group will be
1576 * restricted by system defaults.
1578 if (task_group_is_autogroup(task_group(p
)))
1580 if (task_group(p
) == &root_task_group
)
1583 tg_min
= task_group(p
)->uclamp
[UCLAMP_MIN
].value
;
1584 tg_max
= task_group(p
)->uclamp
[UCLAMP_MAX
].value
;
1585 value
= uc_req
.value
;
1586 value
= clamp(value
, tg_min
, tg_max
);
1587 uclamp_se_set(&uc_req
, value
, false);
1594 * The effective clamp bucket index of a task depends on, by increasing
1596 * - the task specific clamp value, when explicitly requested from userspace
1597 * - the task group effective clamp value, for tasks not either in the root
1598 * group or in an autogroup
1599 * - the system default clamp value, defined by the sysadmin
1601 static inline struct uclamp_se
1602 uclamp_eff_get(struct task_struct
*p
, enum uclamp_id clamp_id
)
1604 struct uclamp_se uc_req
= uclamp_tg_restrict(p
, clamp_id
);
1605 struct uclamp_se uc_max
= uclamp_default
[clamp_id
];
1607 /* System default restrictions always apply */
1608 if (unlikely(uc_req
.value
> uc_max
.value
))
1614 unsigned long uclamp_eff_value(struct task_struct
*p
, enum uclamp_id clamp_id
)
1616 struct uclamp_se uc_eff
;
1618 /* Task currently refcounted: use back-annotated (effective) value */
1619 if (p
->uclamp
[clamp_id
].active
)
1620 return (unsigned long)p
->uclamp
[clamp_id
].value
;
1622 uc_eff
= uclamp_eff_get(p
, clamp_id
);
1624 return (unsigned long)uc_eff
.value
;
1628 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1629 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1630 * updates the rq's clamp value if required.
1632 * Tasks can have a task-specific value requested from user-space, track
1633 * within each bucket the maximum value for tasks refcounted in it.
1634 * This "local max aggregation" allows to track the exact "requested" value
1635 * for each bucket when all its RUNNABLE tasks require the same clamp.
1637 static inline void uclamp_rq_inc_id(struct rq
*rq
, struct task_struct
*p
,
1638 enum uclamp_id clamp_id
)
1640 struct uclamp_rq
*uc_rq
= &rq
->uclamp
[clamp_id
];
1641 struct uclamp_se
*uc_se
= &p
->uclamp
[clamp_id
];
1642 struct uclamp_bucket
*bucket
;
1644 lockdep_assert_rq_held(rq
);
1646 /* Update task effective clamp */
1647 p
->uclamp
[clamp_id
] = uclamp_eff_get(p
, clamp_id
);
1649 bucket
= &uc_rq
->bucket
[uc_se
->bucket_id
];
1651 uc_se
->active
= true;
1653 uclamp_idle_reset(rq
, clamp_id
, uc_se
->value
);
1656 * Local max aggregation: rq buckets always track the max
1657 * "requested" clamp value of its RUNNABLE tasks.
1659 if (bucket
->tasks
== 1 || uc_se
->value
> bucket
->value
)
1660 bucket
->value
= uc_se
->value
;
1662 if (uc_se
->value
> uclamp_rq_get(rq
, clamp_id
))
1663 uclamp_rq_set(rq
, clamp_id
, uc_se
->value
);
1667 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1668 * is released. If this is the last task reference counting the rq's max
1669 * active clamp value, then the rq's clamp value is updated.
1671 * Both refcounted tasks and rq's cached clamp values are expected to be
1672 * always valid. If it's detected they are not, as defensive programming,
1673 * enforce the expected state and warn.
1675 static inline void uclamp_rq_dec_id(struct rq
*rq
, struct task_struct
*p
,
1676 enum uclamp_id clamp_id
)
1678 struct uclamp_rq
*uc_rq
= &rq
->uclamp
[clamp_id
];
1679 struct uclamp_se
*uc_se
= &p
->uclamp
[clamp_id
];
1680 struct uclamp_bucket
*bucket
;
1681 unsigned int bkt_clamp
;
1682 unsigned int rq_clamp
;
1684 lockdep_assert_rq_held(rq
);
1687 * If sched_uclamp_used was enabled after task @p was enqueued,
1688 * we could end up with unbalanced call to uclamp_rq_dec_id().
1690 * In this case the uc_se->active flag should be false since no uclamp
1691 * accounting was performed at enqueue time and we can just return
1694 * Need to be careful of the following enqueue/dequeue ordering
1698 * // sched_uclamp_used gets enabled
1701 * // Must not decrement bucket->tasks here
1704 * where we could end up with stale data in uc_se and
1705 * bucket[uc_se->bucket_id].
1707 * The following check here eliminates the possibility of such race.
1709 if (unlikely(!uc_se
->active
))
1712 bucket
= &uc_rq
->bucket
[uc_se
->bucket_id
];
1714 SCHED_WARN_ON(!bucket
->tasks
);
1715 if (likely(bucket
->tasks
))
1718 uc_se
->active
= false;
1721 * Keep "local max aggregation" simple and accept to (possibly)
1722 * overboost some RUNNABLE tasks in the same bucket.
1723 * The rq clamp bucket value is reset to its base value whenever
1724 * there are no more RUNNABLE tasks refcounting it.
1726 if (likely(bucket
->tasks
))
1729 rq_clamp
= uclamp_rq_get(rq
, clamp_id
);
1731 * Defensive programming: this should never happen. If it happens,
1732 * e.g. due to future modification, warn and fix up the expected value.
1734 SCHED_WARN_ON(bucket
->value
> rq_clamp
);
1735 if (bucket
->value
>= rq_clamp
) {
1736 bkt_clamp
= uclamp_rq_max_value(rq
, clamp_id
, uc_se
->value
);
1737 uclamp_rq_set(rq
, clamp_id
, bkt_clamp
);
1741 static inline void uclamp_rq_inc(struct rq
*rq
, struct task_struct
*p
)
1743 enum uclamp_id clamp_id
;
1746 * Avoid any overhead until uclamp is actually used by the userspace.
1748 * The condition is constructed such that a NOP is generated when
1749 * sched_uclamp_used is disabled.
1751 if (!static_branch_unlikely(&sched_uclamp_used
))
1754 if (unlikely(!p
->sched_class
->uclamp_enabled
))
1757 if (p
->se
.sched_delayed
)
1760 for_each_clamp_id(clamp_id
)
1761 uclamp_rq_inc_id(rq
, p
, clamp_id
);
1763 /* Reset clamp idle holding when there is one RUNNABLE task */
1764 if (rq
->uclamp_flags
& UCLAMP_FLAG_IDLE
)
1765 rq
->uclamp_flags
&= ~UCLAMP_FLAG_IDLE
;
1768 static inline void uclamp_rq_dec(struct rq
*rq
, struct task_struct
*p
)
1770 enum uclamp_id clamp_id
;
1773 * Avoid any overhead until uclamp is actually used by the userspace.
1775 * The condition is constructed such that a NOP is generated when
1776 * sched_uclamp_used is disabled.
1778 if (!static_branch_unlikely(&sched_uclamp_used
))
1781 if (unlikely(!p
->sched_class
->uclamp_enabled
))
1784 if (p
->se
.sched_delayed
)
1787 for_each_clamp_id(clamp_id
)
1788 uclamp_rq_dec_id(rq
, p
, clamp_id
);
1791 static inline void uclamp_rq_reinc_id(struct rq
*rq
, struct task_struct
*p
,
1792 enum uclamp_id clamp_id
)
1794 if (!p
->uclamp
[clamp_id
].active
)
1797 uclamp_rq_dec_id(rq
, p
, clamp_id
);
1798 uclamp_rq_inc_id(rq
, p
, clamp_id
);
1801 * Make sure to clear the idle flag if we've transiently reached 0
1802 * active tasks on rq.
1804 if (clamp_id
== UCLAMP_MAX
&& (rq
->uclamp_flags
& UCLAMP_FLAG_IDLE
))
1805 rq
->uclamp_flags
&= ~UCLAMP_FLAG_IDLE
;
1809 uclamp_update_active(struct task_struct
*p
)
1811 enum uclamp_id clamp_id
;
1816 * Lock the task and the rq where the task is (or was) queued.
1818 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1819 * price to pay to safely serialize util_{min,max} updates with
1820 * enqueues, dequeues and migration operations.
1821 * This is the same locking schema used by __set_cpus_allowed_ptr().
1823 rq
= task_rq_lock(p
, &rf
);
1826 * Setting the clamp bucket is serialized by task_rq_lock().
1827 * If the task is not yet RUNNABLE and its task_struct is not
1828 * affecting a valid clamp bucket, the next time it's enqueued,
1829 * it will already see the updated clamp bucket value.
1831 for_each_clamp_id(clamp_id
)
1832 uclamp_rq_reinc_id(rq
, p
, clamp_id
);
1834 task_rq_unlock(rq
, p
, &rf
);
1837 #ifdef CONFIG_UCLAMP_TASK_GROUP
1839 uclamp_update_active_tasks(struct cgroup_subsys_state
*css
)
1841 struct css_task_iter it
;
1842 struct task_struct
*p
;
1844 css_task_iter_start(css
, 0, &it
);
1845 while ((p
= css_task_iter_next(&it
)))
1846 uclamp_update_active(p
);
1847 css_task_iter_end(&it
);
1850 static void cpu_util_update_eff(struct cgroup_subsys_state
*css
);
1853 #ifdef CONFIG_SYSCTL
1854 #ifdef CONFIG_UCLAMP_TASK_GROUP
1855 static void uclamp_update_root_tg(void)
1857 struct task_group
*tg
= &root_task_group
;
1859 uclamp_se_set(&tg
->uclamp_req
[UCLAMP_MIN
],
1860 sysctl_sched_uclamp_util_min
, false);
1861 uclamp_se_set(&tg
->uclamp_req
[UCLAMP_MAX
],
1862 sysctl_sched_uclamp_util_max
, false);
1865 cpu_util_update_eff(&root_task_group
.css
);
1868 static void uclamp_update_root_tg(void) { }
1871 static void uclamp_sync_util_min_rt_default(void)
1873 struct task_struct
*g
, *p
;
1876 * copy_process() sysctl_uclamp
1877 * uclamp_min_rt = X;
1878 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1879 * // link thread smp_mb__after_spinlock()
1880 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1881 * sched_post_fork() for_each_process_thread()
1882 * __uclamp_sync_rt() __uclamp_sync_rt()
1884 * Ensures that either sched_post_fork() will observe the new
1885 * uclamp_min_rt or for_each_process_thread() will observe the new
1888 read_lock(&tasklist_lock
);
1889 smp_mb__after_spinlock();
1890 read_unlock(&tasklist_lock
);
1893 for_each_process_thread(g
, p
)
1894 uclamp_update_util_min_rt_default(p
);
1897 static int sysctl_sched_uclamp_handler(const struct ctl_table
*table
, int write
,
1898 void *buffer
, size_t *lenp
, loff_t
*ppos
)
1900 bool update_root_tg
= false;
1901 int old_min
, old_max
, old_min_rt
;
1904 guard(mutex
)(&uclamp_mutex
);
1906 old_min
= sysctl_sched_uclamp_util_min
;
1907 old_max
= sysctl_sched_uclamp_util_max
;
1908 old_min_rt
= sysctl_sched_uclamp_util_min_rt_default
;
1910 result
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
1916 if (sysctl_sched_uclamp_util_min
> sysctl_sched_uclamp_util_max
||
1917 sysctl_sched_uclamp_util_max
> SCHED_CAPACITY_SCALE
||
1918 sysctl_sched_uclamp_util_min_rt_default
> SCHED_CAPACITY_SCALE
) {
1924 if (old_min
!= sysctl_sched_uclamp_util_min
) {
1925 uclamp_se_set(&uclamp_default
[UCLAMP_MIN
],
1926 sysctl_sched_uclamp_util_min
, false);
1927 update_root_tg
= true;
1929 if (old_max
!= sysctl_sched_uclamp_util_max
) {
1930 uclamp_se_set(&uclamp_default
[UCLAMP_MAX
],
1931 sysctl_sched_uclamp_util_max
, false);
1932 update_root_tg
= true;
1935 if (update_root_tg
) {
1936 static_branch_enable(&sched_uclamp_used
);
1937 uclamp_update_root_tg();
1940 if (old_min_rt
!= sysctl_sched_uclamp_util_min_rt_default
) {
1941 static_branch_enable(&sched_uclamp_used
);
1942 uclamp_sync_util_min_rt_default();
1946 * We update all RUNNABLE tasks only when task groups are in use.
1947 * Otherwise, keep it simple and do just a lazy update at each next
1948 * task enqueue time.
1953 sysctl_sched_uclamp_util_min
= old_min
;
1954 sysctl_sched_uclamp_util_max
= old_max
;
1955 sysctl_sched_uclamp_util_min_rt_default
= old_min_rt
;
1960 static void uclamp_fork(struct task_struct
*p
)
1962 enum uclamp_id clamp_id
;
1965 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1966 * as the task is still at its early fork stages.
1968 for_each_clamp_id(clamp_id
)
1969 p
->uclamp
[clamp_id
].active
= false;
1971 if (likely(!p
->sched_reset_on_fork
))
1974 for_each_clamp_id(clamp_id
) {
1975 uclamp_se_set(&p
->uclamp_req
[clamp_id
],
1976 uclamp_none(clamp_id
), false);
1980 static void uclamp_post_fork(struct task_struct
*p
)
1982 uclamp_update_util_min_rt_default(p
);
1985 static void __init
init_uclamp_rq(struct rq
*rq
)
1987 enum uclamp_id clamp_id
;
1988 struct uclamp_rq
*uc_rq
= rq
->uclamp
;
1990 for_each_clamp_id(clamp_id
) {
1991 uc_rq
[clamp_id
] = (struct uclamp_rq
) {
1992 .value
= uclamp_none(clamp_id
)
1996 rq
->uclamp_flags
= UCLAMP_FLAG_IDLE
;
1999 static void __init
init_uclamp(void)
2001 struct uclamp_se uc_max
= {};
2002 enum uclamp_id clamp_id
;
2005 for_each_possible_cpu(cpu
)
2006 init_uclamp_rq(cpu_rq(cpu
));
2008 for_each_clamp_id(clamp_id
) {
2009 uclamp_se_set(&init_task
.uclamp_req
[clamp_id
],
2010 uclamp_none(clamp_id
), false);
2013 /* System defaults allow max clamp values for both indexes */
2014 uclamp_se_set(&uc_max
, uclamp_none(UCLAMP_MAX
), false);
2015 for_each_clamp_id(clamp_id
) {
2016 uclamp_default
[clamp_id
] = uc_max
;
2017 #ifdef CONFIG_UCLAMP_TASK_GROUP
2018 root_task_group
.uclamp_req
[clamp_id
] = uc_max
;
2019 root_task_group
.uclamp
[clamp_id
] = uc_max
;
2024 #else /* !CONFIG_UCLAMP_TASK */
2025 static inline void uclamp_rq_inc(struct rq
*rq
, struct task_struct
*p
) { }
2026 static inline void uclamp_rq_dec(struct rq
*rq
, struct task_struct
*p
) { }
2027 static inline void uclamp_fork(struct task_struct
*p
) { }
2028 static inline void uclamp_post_fork(struct task_struct
*p
) { }
2029 static inline void init_uclamp(void) { }
2030 #endif /* CONFIG_UCLAMP_TASK */
2032 bool sched_task_on_rq(struct task_struct
*p
)
2034 return task_on_rq_queued(p
);
2037 unsigned long get_wchan(struct task_struct
*p
)
2039 unsigned long ip
= 0;
2042 if (!p
|| p
== current
)
2045 /* Only get wchan if task is blocked and we can keep it that way. */
2046 raw_spin_lock_irq(&p
->pi_lock
);
2047 state
= READ_ONCE(p
->__state
);
2048 smp_rmb(); /* see try_to_wake_up() */
2049 if (state
!= TASK_RUNNING
&& state
!= TASK_WAKING
&& !p
->on_rq
)
2050 ip
= __get_wchan(p
);
2051 raw_spin_unlock_irq(&p
->pi_lock
);
2056 void enqueue_task(struct rq
*rq
, struct task_struct
*p
, int flags
)
2058 if (!(flags
& ENQUEUE_NOCLOCK
))
2059 update_rq_clock(rq
);
2061 p
->sched_class
->enqueue_task(rq
, p
, flags
);
2063 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
2066 uclamp_rq_inc(rq
, p
);
2068 psi_enqueue(p
, flags
);
2070 if (!(flags
& ENQUEUE_RESTORE
))
2071 sched_info_enqueue(rq
, p
);
2073 if (sched_core_enabled(rq
))
2074 sched_core_enqueue(rq
, p
);
2078 * Must only return false when DEQUEUE_SLEEP.
2080 inline bool dequeue_task(struct rq
*rq
, struct task_struct
*p
, int flags
)
2082 if (sched_core_enabled(rq
))
2083 sched_core_dequeue(rq
, p
, flags
);
2085 if (!(flags
& DEQUEUE_NOCLOCK
))
2086 update_rq_clock(rq
);
2088 if (!(flags
& DEQUEUE_SAVE
))
2089 sched_info_dequeue(rq
, p
);
2091 psi_dequeue(p
, flags
);
2094 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2095 * and mark the task ->sched_delayed.
2097 uclamp_rq_dec(rq
, p
);
2098 return p
->sched_class
->dequeue_task(rq
, p
, flags
);
2101 void activate_task(struct rq
*rq
, struct task_struct
*p
, int flags
)
2103 if (task_on_rq_migrating(p
))
2104 flags
|= ENQUEUE_MIGRATED
;
2105 if (flags
& ENQUEUE_MIGRATED
)
2106 sched_mm_cid_migrate_to(rq
, p
);
2108 enqueue_task(rq
, p
, flags
);
2110 WRITE_ONCE(p
->on_rq
, TASK_ON_RQ_QUEUED
);
2111 ASSERT_EXCLUSIVE_WRITER(p
->on_rq
);
2114 void deactivate_task(struct rq
*rq
, struct task_struct
*p
, int flags
)
2116 SCHED_WARN_ON(flags
& DEQUEUE_SLEEP
);
2118 WRITE_ONCE(p
->on_rq
, TASK_ON_RQ_MIGRATING
);
2119 ASSERT_EXCLUSIVE_WRITER(p
->on_rq
);
2122 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2123 * dequeue_task() and cleared *after* enqueue_task().
2126 dequeue_task(rq
, p
, flags
);
2129 static void block_task(struct rq
*rq
, struct task_struct
*p
, int flags
)
2131 if (dequeue_task(rq
, p
, DEQUEUE_SLEEP
| flags
))
2132 __block_task(rq
, p
);
2136 * task_curr - is this task currently executing on a CPU?
2137 * @p: the task in question.
2139 * Return: 1 if the task is currently executing. 0 otherwise.
2141 inline int task_curr(const struct task_struct
*p
)
2143 return cpu_curr(task_cpu(p
)) == p
;
2147 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2148 * mess with locking.
2150 void check_class_changing(struct rq
*rq
, struct task_struct
*p
,
2151 const struct sched_class
*prev_class
)
2153 if (prev_class
!= p
->sched_class
&& p
->sched_class
->switching_to
)
2154 p
->sched_class
->switching_to(rq
, p
);
2158 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2159 * use the balance_callback list if you want balancing.
2161 * this means any call to check_class_changed() must be followed by a call to
2162 * balance_callback().
2164 void check_class_changed(struct rq
*rq
, struct task_struct
*p
,
2165 const struct sched_class
*prev_class
,
2168 if (prev_class
!= p
->sched_class
) {
2169 if (prev_class
->switched_from
)
2170 prev_class
->switched_from(rq
, p
);
2172 p
->sched_class
->switched_to(rq
, p
);
2173 } else if (oldprio
!= p
->prio
|| dl_task(p
))
2174 p
->sched_class
->prio_changed(rq
, p
, oldprio
);
2177 void wakeup_preempt(struct rq
*rq
, struct task_struct
*p
, int flags
)
2179 struct task_struct
*donor
= rq
->donor
;
2181 if (p
->sched_class
== donor
->sched_class
)
2182 donor
->sched_class
->wakeup_preempt(rq
, p
, flags
);
2183 else if (sched_class_above(p
->sched_class
, donor
->sched_class
))
2187 * A queue event has occurred, and we're going to schedule. In
2188 * this case, we can save a useless back to back clock update.
2190 if (task_on_rq_queued(donor
) && test_tsk_need_resched(rq
->curr
))
2191 rq_clock_skip_update(rq
);
2194 static __always_inline
2195 int __task_state_match(struct task_struct
*p
, unsigned int state
)
2197 if (READ_ONCE(p
->__state
) & state
)
2200 if (READ_ONCE(p
->saved_state
) & state
)
2206 static __always_inline
2207 int task_state_match(struct task_struct
*p
, unsigned int state
)
2210 * Serialize against current_save_and_set_rtlock_wait_state(),
2211 * current_restore_rtlock_saved_state(), and __refrigerator().
2213 guard(raw_spinlock_irq
)(&p
->pi_lock
);
2214 return __task_state_match(p
, state
);
2218 * wait_task_inactive - wait for a thread to unschedule.
2220 * Wait for the thread to block in any of the states set in @match_state.
2221 * If it changes, i.e. @p might have woken up, then return zero. When we
2222 * succeed in waiting for @p to be off its CPU, we return a positive number
2223 * (its total switch count). If a second call a short while later returns the
2224 * same number, the caller can be sure that @p has remained unscheduled the
2227 * The caller must ensure that the task *will* unschedule sometime soon,
2228 * else this function might spin for a *long* time. This function can't
2229 * be called with interrupts off, or it may introduce deadlock with
2230 * smp_call_function() if an IPI is sent by the same process we are
2231 * waiting to become inactive.
2233 unsigned long wait_task_inactive(struct task_struct
*p
, unsigned int match_state
)
2235 int running
, queued
, match
;
2242 * We do the initial early heuristics without holding
2243 * any task-queue locks at all. We'll only try to get
2244 * the runqueue lock when things look like they will
2250 * If the task is actively running on another CPU
2251 * still, just relax and busy-wait without holding
2254 * NOTE! Since we don't hold any locks, it's not
2255 * even sure that "rq" stays as the right runqueue!
2256 * But we don't care, since "task_on_cpu()" will
2257 * return false if the runqueue has changed and p
2258 * is actually now running somewhere else!
2260 while (task_on_cpu(rq
, p
)) {
2261 if (!task_state_match(p
, match_state
))
2267 * Ok, time to look more closely! We need the rq
2268 * lock now, to be *sure*. If we're wrong, we'll
2269 * just go back and repeat.
2271 rq
= task_rq_lock(p
, &rf
);
2272 trace_sched_wait_task(p
);
2273 running
= task_on_cpu(rq
, p
);
2274 queued
= task_on_rq_queued(p
);
2276 if ((match
= __task_state_match(p
, match_state
))) {
2278 * When matching on p->saved_state, consider this task
2279 * still queued so it will wait.
2283 ncsw
= p
->nvcsw
| LONG_MIN
; /* sets MSB */
2285 task_rq_unlock(rq
, p
, &rf
);
2288 * If it changed from the expected state, bail out now.
2290 if (unlikely(!ncsw
))
2294 * Was it really running after all now that we
2295 * checked with the proper locks actually held?
2297 * Oops. Go back and try again..
2299 if (unlikely(running
)) {
2305 * It's not enough that it's not actively running,
2306 * it must be off the runqueue _entirely_, and not
2309 * So if it was still runnable (but just not actively
2310 * running right now), it's preempted, and we should
2311 * yield - it could be a while.
2313 if (unlikely(queued
)) {
2314 ktime_t to
= NSEC_PER_SEC
/ HZ
;
2316 set_current_state(TASK_UNINTERRUPTIBLE
);
2317 schedule_hrtimeout(&to
, HRTIMER_MODE_REL_HARD
);
2322 * Ahh, all good. It wasn't running, and it wasn't
2323 * runnable, which means that it will never become
2324 * running in the future either. We're all done!
2335 __do_set_cpus_allowed(struct task_struct
*p
, struct affinity_context
*ctx
);
2337 static void migrate_disable_switch(struct rq
*rq
, struct task_struct
*p
)
2339 struct affinity_context ac
= {
2340 .new_mask
= cpumask_of(rq
->cpu
),
2341 .flags
= SCA_MIGRATE_DISABLE
,
2344 if (likely(!p
->migration_disabled
))
2347 if (p
->cpus_ptr
!= &p
->cpus_mask
)
2351 * Violates locking rules! See comment in __do_set_cpus_allowed().
2353 __do_set_cpus_allowed(p
, &ac
);
2356 void migrate_disable(void)
2358 struct task_struct
*p
= current
;
2360 if (p
->migration_disabled
) {
2361 #ifdef CONFIG_DEBUG_PREEMPT
2363 *Warn about overflow half-way through the range.
2365 WARN_ON_ONCE((s16
)p
->migration_disabled
< 0);
2367 p
->migration_disabled
++;
2372 this_rq()->nr_pinned
++;
2373 p
->migration_disabled
= 1;
2375 EXPORT_SYMBOL_GPL(migrate_disable
);
2377 void migrate_enable(void)
2379 struct task_struct
*p
= current
;
2380 struct affinity_context ac
= {
2381 .new_mask
= &p
->cpus_mask
,
2382 .flags
= SCA_MIGRATE_ENABLE
,
2385 #ifdef CONFIG_DEBUG_PREEMPT
2387 * Check both overflow from migrate_disable() and superfluous
2390 if (WARN_ON_ONCE((s16
)p
->migration_disabled
<= 0))
2394 if (p
->migration_disabled
> 1) {
2395 p
->migration_disabled
--;
2400 * Ensure stop_task runs either before or after this, and that
2401 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2404 if (p
->cpus_ptr
!= &p
->cpus_mask
)
2405 __set_cpus_allowed_ptr(p
, &ac
);
2407 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2408 * regular cpus_mask, otherwise things that race (eg.
2409 * select_fallback_rq) get confused.
2412 p
->migration_disabled
= 0;
2413 this_rq()->nr_pinned
--;
2415 EXPORT_SYMBOL_GPL(migrate_enable
);
2417 static inline bool rq_has_pinned_tasks(struct rq
*rq
)
2419 return rq
->nr_pinned
;
2423 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2424 * __set_cpus_allowed_ptr() and select_fallback_rq().
2426 static inline bool is_cpu_allowed(struct task_struct
*p
, int cpu
)
2428 /* When not in the task's cpumask, no point in looking further. */
2429 if (!task_allowed_on_cpu(p
, cpu
))
2432 /* migrate_disabled() must be allowed to finish. */
2433 if (is_migration_disabled(p
))
2434 return cpu_online(cpu
);
2436 /* Non kernel threads are not allowed during either online or offline. */
2437 if (!(p
->flags
& PF_KTHREAD
))
2438 return cpu_active(cpu
);
2440 /* KTHREAD_IS_PER_CPU is always allowed. */
2441 if (kthread_is_per_cpu(p
))
2442 return cpu_online(cpu
);
2444 /* Regular kernel threads don't get to stay during offline. */
2448 /* But are allowed during online. */
2449 return cpu_online(cpu
);
2453 * This is how migration works:
2455 * 1) we invoke migration_cpu_stop() on the target CPU using
2457 * 2) stopper starts to run (implicitly forcing the migrated thread
2459 * 3) it checks whether the migrated task is still in the wrong runqueue.
2460 * 4) if it's in the wrong runqueue then the migration thread removes
2461 * it and puts it into the right queue.
2462 * 5) stopper completes and stop_one_cpu() returns and the migration
2467 * move_queued_task - move a queued task to new rq.
2469 * Returns (locked) new rq. Old rq's lock is released.
2471 static struct rq
*move_queued_task(struct rq
*rq
, struct rq_flags
*rf
,
2472 struct task_struct
*p
, int new_cpu
)
2474 lockdep_assert_rq_held(rq
);
2476 deactivate_task(rq
, p
, DEQUEUE_NOCLOCK
);
2477 set_task_cpu(p
, new_cpu
);
2480 rq
= cpu_rq(new_cpu
);
2483 WARN_ON_ONCE(task_cpu(p
) != new_cpu
);
2484 activate_task(rq
, p
, 0);
2485 wakeup_preempt(rq
, p
, 0);
2490 struct migration_arg
{
2491 struct task_struct
*task
;
2493 struct set_affinity_pending
*pending
;
2497 * @refs: number of wait_for_completion()
2498 * @stop_pending: is @stop_work in use
2500 struct set_affinity_pending
{
2502 unsigned int stop_pending
;
2503 struct completion done
;
2504 struct cpu_stop_work stop_work
;
2505 struct migration_arg arg
;
2509 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2510 * this because either it can't run here any more (set_cpus_allowed()
2511 * away from this CPU, or CPU going down), or because we're
2512 * attempting to rebalance this task on exec (sched_exec).
2514 * So we race with normal scheduler movements, but that's OK, as long
2515 * as the task is no longer on this CPU.
2517 static struct rq
*__migrate_task(struct rq
*rq
, struct rq_flags
*rf
,
2518 struct task_struct
*p
, int dest_cpu
)
2520 /* Affinity changed (again). */
2521 if (!is_cpu_allowed(p
, dest_cpu
))
2524 rq
= move_queued_task(rq
, rf
, p
, dest_cpu
);
2530 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2531 * and performs thread migration by bumping thread off CPU then
2532 * 'pushing' onto another runqueue.
2534 static int migration_cpu_stop(void *data
)
2536 struct migration_arg
*arg
= data
;
2537 struct set_affinity_pending
*pending
= arg
->pending
;
2538 struct task_struct
*p
= arg
->task
;
2539 struct rq
*rq
= this_rq();
2540 bool complete
= false;
2544 * The original target CPU might have gone down and we might
2545 * be on another CPU but it doesn't matter.
2547 local_irq_save(rf
.flags
);
2549 * We need to explicitly wake pending tasks before running
2550 * __migrate_task() such that we will not miss enforcing cpus_ptr
2551 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2553 flush_smp_call_function_queue();
2555 raw_spin_lock(&p
->pi_lock
);
2559 * If we were passed a pending, then ->stop_pending was set, thus
2560 * p->migration_pending must have remained stable.
2562 WARN_ON_ONCE(pending
&& pending
!= p
->migration_pending
);
2565 * If task_rq(p) != rq, it cannot be migrated here, because we're
2566 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2567 * we're holding p->pi_lock.
2569 if (task_rq(p
) == rq
) {
2570 if (is_migration_disabled(p
))
2574 p
->migration_pending
= NULL
;
2577 if (cpumask_test_cpu(task_cpu(p
), &p
->cpus_mask
))
2581 if (task_on_rq_queued(p
)) {
2582 update_rq_clock(rq
);
2583 rq
= __migrate_task(rq
, &rf
, p
, arg
->dest_cpu
);
2585 p
->wake_cpu
= arg
->dest_cpu
;
2589 * XXX __migrate_task() can fail, at which point we might end
2590 * up running on a dodgy CPU, AFAICT this can only happen
2591 * during CPU hotplug, at which point we'll get pushed out
2592 * anyway, so it's probably not a big deal.
2595 } else if (pending
) {
2597 * This happens when we get migrated between migrate_enable()'s
2598 * preempt_enable() and scheduling the stopper task. At that
2599 * point we're a regular task again and not current anymore.
2601 * A !PREEMPT kernel has a giant hole here, which makes it far
2606 * The task moved before the stopper got to run. We're holding
2607 * ->pi_lock, so the allowed mask is stable - if it got
2608 * somewhere allowed, we're done.
2610 if (cpumask_test_cpu(task_cpu(p
), p
->cpus_ptr
)) {
2611 p
->migration_pending
= NULL
;
2617 * When migrate_enable() hits a rq mis-match we can't reliably
2618 * determine is_migration_disabled() and so have to chase after
2621 WARN_ON_ONCE(!pending
->stop_pending
);
2623 task_rq_unlock(rq
, p
, &rf
);
2624 stop_one_cpu_nowait(task_cpu(p
), migration_cpu_stop
,
2625 &pending
->arg
, &pending
->stop_work
);
2631 pending
->stop_pending
= false;
2632 task_rq_unlock(rq
, p
, &rf
);
2635 complete_all(&pending
->done
);
2640 int push_cpu_stop(void *arg
)
2642 struct rq
*lowest_rq
= NULL
, *rq
= this_rq();
2643 struct task_struct
*p
= arg
;
2645 raw_spin_lock_irq(&p
->pi_lock
);
2646 raw_spin_rq_lock(rq
);
2648 if (task_rq(p
) != rq
)
2651 if (is_migration_disabled(p
)) {
2652 p
->migration_flags
|= MDF_PUSH
;
2656 p
->migration_flags
&= ~MDF_PUSH
;
2658 if (p
->sched_class
->find_lock_rq
)
2659 lowest_rq
= p
->sched_class
->find_lock_rq(p
, rq
);
2664 // XXX validate p is still the highest prio task
2665 if (task_rq(p
) == rq
) {
2666 move_queued_task_locked(rq
, lowest_rq
, p
);
2667 resched_curr(lowest_rq
);
2670 double_unlock_balance(rq
, lowest_rq
);
2673 rq
->push_busy
= false;
2674 raw_spin_rq_unlock(rq
);
2675 raw_spin_unlock_irq(&p
->pi_lock
);
2682 * sched_class::set_cpus_allowed must do the below, but is not required to
2683 * actually call this function.
2685 void set_cpus_allowed_common(struct task_struct
*p
, struct affinity_context
*ctx
)
2687 if (ctx
->flags
& (SCA_MIGRATE_ENABLE
| SCA_MIGRATE_DISABLE
)) {
2688 p
->cpus_ptr
= ctx
->new_mask
;
2692 cpumask_copy(&p
->cpus_mask
, ctx
->new_mask
);
2693 p
->nr_cpus_allowed
= cpumask_weight(ctx
->new_mask
);
2696 * Swap in a new user_cpus_ptr if SCA_USER flag set
2698 if (ctx
->flags
& SCA_USER
)
2699 swap(p
->user_cpus_ptr
, ctx
->user_mask
);
2703 __do_set_cpus_allowed(struct task_struct
*p
, struct affinity_context
*ctx
)
2705 struct rq
*rq
= task_rq(p
);
2706 bool queued
, running
;
2709 * This here violates the locking rules for affinity, since we're only
2710 * supposed to change these variables while holding both rq->lock and
2713 * HOWEVER, it magically works, because ttwu() is the only code that
2714 * accesses these variables under p->pi_lock and only does so after
2715 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2716 * before finish_task().
2718 * XXX do further audits, this smells like something putrid.
2720 if (ctx
->flags
& SCA_MIGRATE_DISABLE
)
2721 SCHED_WARN_ON(!p
->on_cpu
);
2723 lockdep_assert_held(&p
->pi_lock
);
2725 queued
= task_on_rq_queued(p
);
2726 running
= task_current_donor(rq
, p
);
2730 * Because __kthread_bind() calls this on blocked tasks without
2733 lockdep_assert_rq_held(rq
);
2734 dequeue_task(rq
, p
, DEQUEUE_SAVE
| DEQUEUE_NOCLOCK
);
2737 put_prev_task(rq
, p
);
2739 p
->sched_class
->set_cpus_allowed(p
, ctx
);
2740 mm_set_cpus_allowed(p
->mm
, ctx
->new_mask
);
2743 enqueue_task(rq
, p
, ENQUEUE_RESTORE
| ENQUEUE_NOCLOCK
);
2745 set_next_task(rq
, p
);
2749 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2750 * affinity (if any) should be destroyed too.
2752 void do_set_cpus_allowed(struct task_struct
*p
, const struct cpumask
*new_mask
)
2754 struct affinity_context ac
= {
2755 .new_mask
= new_mask
,
2757 .flags
= SCA_USER
, /* clear the user requested mask */
2759 union cpumask_rcuhead
{
2761 struct rcu_head rcu
;
2764 __do_set_cpus_allowed(p
, &ac
);
2767 * Because this is called with p->pi_lock held, it is not possible
2768 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2771 kfree_rcu((union cpumask_rcuhead
*)ac
.user_mask
, rcu
);
2774 int dup_user_cpus_ptr(struct task_struct
*dst
, struct task_struct
*src
,
2777 cpumask_t
*user_mask
;
2778 unsigned long flags
;
2781 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2782 * may differ by now due to racing.
2784 dst
->user_cpus_ptr
= NULL
;
2787 * This check is racy and losing the race is a valid situation.
2788 * It is not worth the extra overhead of taking the pi_lock on
2791 if (data_race(!src
->user_cpus_ptr
))
2794 user_mask
= alloc_user_cpus_ptr(node
);
2799 * Use pi_lock to protect content of user_cpus_ptr
2801 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2802 * do_set_cpus_allowed().
2804 raw_spin_lock_irqsave(&src
->pi_lock
, flags
);
2805 if (src
->user_cpus_ptr
) {
2806 swap(dst
->user_cpus_ptr
, user_mask
);
2807 cpumask_copy(dst
->user_cpus_ptr
, src
->user_cpus_ptr
);
2809 raw_spin_unlock_irqrestore(&src
->pi_lock
, flags
);
2811 if (unlikely(user_mask
))
2817 static inline struct cpumask
*clear_user_cpus_ptr(struct task_struct
*p
)
2819 struct cpumask
*user_mask
= NULL
;
2821 swap(p
->user_cpus_ptr
, user_mask
);
2826 void release_user_cpus_ptr(struct task_struct
*p
)
2828 kfree(clear_user_cpus_ptr(p
));
2832 * This function is wildly self concurrent; here be dragons.
2835 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2836 * designated task is enqueued on an allowed CPU. If that task is currently
2837 * running, we have to kick it out using the CPU stopper.
2839 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2842 * Initial conditions: P0->cpus_mask = [0, 1]
2846 * migrate_disable();
2848 * set_cpus_allowed_ptr(P0, [1]);
2850 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2851 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2852 * This means we need the following scheme:
2856 * migrate_disable();
2858 * set_cpus_allowed_ptr(P0, [1]);
2862 * __set_cpus_allowed_ptr();
2863 * <wakes local stopper>
2864 * `--> <woken on migration completion>
2866 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2867 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2868 * task p are serialized by p->pi_lock, which we can leverage: the one that
2869 * should come into effect at the end of the Migrate-Disable region is the last
2870 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2871 * but we still need to properly signal those waiting tasks at the appropriate
2874 * This is implemented using struct set_affinity_pending. The first
2875 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2876 * setup an instance of that struct and install it on the targeted task_struct.
2877 * Any and all further callers will reuse that instance. Those then wait for
2878 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2879 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2882 * (1) In the cases covered above. There is one more where the completion is
2883 * signaled within affine_move_task() itself: when a subsequent affinity request
2884 * occurs after the stopper bailed out due to the targeted task still being
2885 * Migrate-Disable. Consider:
2887 * Initial conditions: P0->cpus_mask = [0, 1]
2891 * migrate_disable();
2893 * set_cpus_allowed_ptr(P0, [1]);
2896 * migration_cpu_stop()
2897 * is_migration_disabled()
2899 * set_cpus_allowed_ptr(P0, [0, 1]);
2900 * <signal completion>
2903 * Note that the above is safe vs a concurrent migrate_enable(), as any
2904 * pending affinity completion is preceded by an uninstallation of
2905 * p->migration_pending done with p->pi_lock held.
2907 static int affine_move_task(struct rq
*rq
, struct task_struct
*p
, struct rq_flags
*rf
,
2908 int dest_cpu
, unsigned int flags
)
2909 __releases(rq
->lock
)
2910 __releases(p
->pi_lock
)
2912 struct set_affinity_pending my_pending
= { }, *pending
= NULL
;
2913 bool stop_pending
, complete
= false;
2915 /* Can the task run on the task's current CPU? If so, we're done */
2916 if (cpumask_test_cpu(task_cpu(p
), &p
->cpus_mask
)) {
2917 struct task_struct
*push_task
= NULL
;
2919 if ((flags
& SCA_MIGRATE_ENABLE
) &&
2920 (p
->migration_flags
& MDF_PUSH
) && !rq
->push_busy
) {
2921 rq
->push_busy
= true;
2922 push_task
= get_task_struct(p
);
2926 * If there are pending waiters, but no pending stop_work,
2927 * then complete now.
2929 pending
= p
->migration_pending
;
2930 if (pending
&& !pending
->stop_pending
) {
2931 p
->migration_pending
= NULL
;
2936 task_rq_unlock(rq
, p
, rf
);
2938 stop_one_cpu_nowait(rq
->cpu
, push_cpu_stop
,
2944 complete_all(&pending
->done
);
2949 if (!(flags
& SCA_MIGRATE_ENABLE
)) {
2950 /* serialized by p->pi_lock */
2951 if (!p
->migration_pending
) {
2952 /* Install the request */
2953 refcount_set(&my_pending
.refs
, 1);
2954 init_completion(&my_pending
.done
);
2955 my_pending
.arg
= (struct migration_arg
) {
2957 .dest_cpu
= dest_cpu
,
2958 .pending
= &my_pending
,
2961 p
->migration_pending
= &my_pending
;
2963 pending
= p
->migration_pending
;
2964 refcount_inc(&pending
->refs
);
2966 * Affinity has changed, but we've already installed a
2967 * pending. migration_cpu_stop() *must* see this, else
2968 * we risk a completion of the pending despite having a
2969 * task on a disallowed CPU.
2971 * Serialized by p->pi_lock, so this is safe.
2973 pending
->arg
.dest_cpu
= dest_cpu
;
2976 pending
= p
->migration_pending
;
2978 * - !MIGRATE_ENABLE:
2979 * we'll have installed a pending if there wasn't one already.
2982 * we're here because the current CPU isn't matching anymore,
2983 * the only way that can happen is because of a concurrent
2984 * set_cpus_allowed_ptr() call, which should then still be
2985 * pending completion.
2987 * Either way, we really should have a @pending here.
2989 if (WARN_ON_ONCE(!pending
)) {
2990 task_rq_unlock(rq
, p
, rf
);
2994 if (task_on_cpu(rq
, p
) || READ_ONCE(p
->__state
) == TASK_WAKING
) {
2996 * MIGRATE_ENABLE gets here because 'p == current', but for
2997 * anything else we cannot do is_migration_disabled(), punt
2998 * and have the stopper function handle it all race-free.
3000 stop_pending
= pending
->stop_pending
;
3002 pending
->stop_pending
= true;
3004 if (flags
& SCA_MIGRATE_ENABLE
)
3005 p
->migration_flags
&= ~MDF_PUSH
;
3008 task_rq_unlock(rq
, p
, rf
);
3009 if (!stop_pending
) {
3010 stop_one_cpu_nowait(cpu_of(rq
), migration_cpu_stop
,
3011 &pending
->arg
, &pending
->stop_work
);
3015 if (flags
& SCA_MIGRATE_ENABLE
)
3019 if (!is_migration_disabled(p
)) {
3020 if (task_on_rq_queued(p
))
3021 rq
= move_queued_task(rq
, rf
, p
, dest_cpu
);
3023 if (!pending
->stop_pending
) {
3024 p
->migration_pending
= NULL
;
3028 task_rq_unlock(rq
, p
, rf
);
3031 complete_all(&pending
->done
);
3034 wait_for_completion(&pending
->done
);
3036 if (refcount_dec_and_test(&pending
->refs
))
3037 wake_up_var(&pending
->refs
); /* No UaF, just an address */
3040 * Block the original owner of &pending until all subsequent callers
3041 * have seen the completion and decremented the refcount
3043 wait_var_event(&my_pending
.refs
, !refcount_read(&my_pending
.refs
));
3046 WARN_ON_ONCE(my_pending
.stop_pending
);
3052 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3054 static int __set_cpus_allowed_ptr_locked(struct task_struct
*p
,
3055 struct affinity_context
*ctx
,
3057 struct rq_flags
*rf
)
3058 __releases(rq
->lock
)
3059 __releases(p
->pi_lock
)
3061 const struct cpumask
*cpu_allowed_mask
= task_cpu_possible_mask(p
);
3062 const struct cpumask
*cpu_valid_mask
= cpu_active_mask
;
3063 bool kthread
= p
->flags
& PF_KTHREAD
;
3064 unsigned int dest_cpu
;
3067 update_rq_clock(rq
);
3069 if (kthread
|| is_migration_disabled(p
)) {
3071 * Kernel threads are allowed on online && !active CPUs,
3072 * however, during cpu-hot-unplug, even these might get pushed
3073 * away if not KTHREAD_IS_PER_CPU.
3075 * Specifically, migration_disabled() tasks must not fail the
3076 * cpumask_any_and_distribute() pick below, esp. so on
3077 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3078 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3080 cpu_valid_mask
= cpu_online_mask
;
3083 if (!kthread
&& !cpumask_subset(ctx
->new_mask
, cpu_allowed_mask
)) {
3089 * Must re-check here, to close a race against __kthread_bind(),
3090 * sched_setaffinity() is not guaranteed to observe the flag.
3092 if ((ctx
->flags
& SCA_CHECK
) && (p
->flags
& PF_NO_SETAFFINITY
)) {
3097 if (!(ctx
->flags
& SCA_MIGRATE_ENABLE
)) {
3098 if (cpumask_equal(&p
->cpus_mask
, ctx
->new_mask
)) {
3099 if (ctx
->flags
& SCA_USER
)
3100 swap(p
->user_cpus_ptr
, ctx
->user_mask
);
3104 if (WARN_ON_ONCE(p
== current
&&
3105 is_migration_disabled(p
) &&
3106 !cpumask_test_cpu(task_cpu(p
), ctx
->new_mask
))) {
3113 * Picking a ~random cpu helps in cases where we are changing affinity
3114 * for groups of tasks (ie. cpuset), so that load balancing is not
3115 * immediately required to distribute the tasks within their new mask.
3117 dest_cpu
= cpumask_any_and_distribute(cpu_valid_mask
, ctx
->new_mask
);
3118 if (dest_cpu
>= nr_cpu_ids
) {
3123 __do_set_cpus_allowed(p
, ctx
);
3125 return affine_move_task(rq
, p
, rf
, dest_cpu
, ctx
->flags
);
3128 task_rq_unlock(rq
, p
, rf
);
3134 * Change a given task's CPU affinity. Migrate the thread to a
3135 * proper CPU and schedule it away if the CPU it's executing on
3136 * is removed from the allowed bitmask.
3138 * NOTE: the caller must have a valid reference to the task, the
3139 * task must not exit() & deallocate itself prematurely. The
3140 * call is not atomic; no spinlocks may be held.
3142 int __set_cpus_allowed_ptr(struct task_struct
*p
, struct affinity_context
*ctx
)
3147 rq
= task_rq_lock(p
, &rf
);
3149 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3152 if (p
->user_cpus_ptr
&&
3153 !(ctx
->flags
& (SCA_USER
| SCA_MIGRATE_ENABLE
| SCA_MIGRATE_DISABLE
)) &&
3154 cpumask_and(rq
->scratch_mask
, ctx
->new_mask
, p
->user_cpus_ptr
))
3155 ctx
->new_mask
= rq
->scratch_mask
;
3157 return __set_cpus_allowed_ptr_locked(p
, ctx
, rq
, &rf
);
3160 int set_cpus_allowed_ptr(struct task_struct
*p
, const struct cpumask
*new_mask
)
3162 struct affinity_context ac
= {
3163 .new_mask
= new_mask
,
3167 return __set_cpus_allowed_ptr(p
, &ac
);
3169 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr
);
3172 * Change a given task's CPU affinity to the intersection of its current
3173 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3174 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3175 * affinity or use cpu_online_mask instead.
3177 * If the resulting mask is empty, leave the affinity unchanged and return
3180 static int restrict_cpus_allowed_ptr(struct task_struct
*p
,
3181 struct cpumask
*new_mask
,
3182 const struct cpumask
*subset_mask
)
3184 struct affinity_context ac
= {
3185 .new_mask
= new_mask
,
3192 rq
= task_rq_lock(p
, &rf
);
3195 * Forcefully restricting the affinity of a deadline task is
3196 * likely to cause problems, so fail and noisily override the
3199 if (task_has_dl_policy(p
) && dl_bandwidth_enabled()) {
3204 if (!cpumask_and(new_mask
, task_user_cpus(p
), subset_mask
)) {
3209 return __set_cpus_allowed_ptr_locked(p
, &ac
, rq
, &rf
);
3212 task_rq_unlock(rq
, p
, &rf
);
3217 * Restrict the CPU affinity of task @p so that it is a subset of
3218 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3219 * old affinity mask. If the resulting mask is empty, we warn and walk
3220 * up the cpuset hierarchy until we find a suitable mask.
3222 void force_compatible_cpus_allowed_ptr(struct task_struct
*p
)
3224 cpumask_var_t new_mask
;
3225 const struct cpumask
*override_mask
= task_cpu_possible_mask(p
);
3227 alloc_cpumask_var(&new_mask
, GFP_KERNEL
);
3230 * __migrate_task() can fail silently in the face of concurrent
3231 * offlining of the chosen destination CPU, so take the hotplug
3232 * lock to ensure that the migration succeeds.
3235 if (!cpumask_available(new_mask
))
3238 if (!restrict_cpus_allowed_ptr(p
, new_mask
, override_mask
))
3242 * We failed to find a valid subset of the affinity mask for the
3243 * task, so override it based on its cpuset hierarchy.
3245 cpuset_cpus_allowed(p
, new_mask
);
3246 override_mask
= new_mask
;
3249 if (printk_ratelimit()) {
3250 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3251 task_pid_nr(p
), p
->comm
,
3252 cpumask_pr_args(override_mask
));
3255 WARN_ON(set_cpus_allowed_ptr(p
, override_mask
));
3258 free_cpumask_var(new_mask
);
3262 * Restore the affinity of a task @p which was previously restricted by a
3263 * call to force_compatible_cpus_allowed_ptr().
3265 * It is the caller's responsibility to serialise this with any calls to
3266 * force_compatible_cpus_allowed_ptr(@p).
3268 void relax_compatible_cpus_allowed_ptr(struct task_struct
*p
)
3270 struct affinity_context ac
= {
3271 .new_mask
= task_user_cpus(p
),
3277 * Try to restore the old affinity mask with __sched_setaffinity().
3278 * Cpuset masking will be done there too.
3280 ret
= __sched_setaffinity(p
, &ac
);
3284 void set_task_cpu(struct task_struct
*p
, unsigned int new_cpu
)
3286 #ifdef CONFIG_SCHED_DEBUG
3287 unsigned int state
= READ_ONCE(p
->__state
);
3290 * We should never call set_task_cpu() on a blocked task,
3291 * ttwu() will sort out the placement.
3293 WARN_ON_ONCE(state
!= TASK_RUNNING
&& state
!= TASK_WAKING
&& !p
->on_rq
);
3296 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3297 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3298 * time relying on p->on_rq.
3300 WARN_ON_ONCE(state
== TASK_RUNNING
&&
3301 p
->sched_class
== &fair_sched_class
&&
3302 (p
->on_rq
&& !task_on_rq_migrating(p
)));
3304 #ifdef CONFIG_LOCKDEP
3306 * The caller should hold either p->pi_lock or rq->lock, when changing
3307 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3309 * sched_move_task() holds both and thus holding either pins the cgroup,
3312 * Furthermore, all task_rq users should acquire both locks, see
3315 WARN_ON_ONCE(debug_locks
&& !(lockdep_is_held(&p
->pi_lock
) ||
3316 lockdep_is_held(__rq_lockp(task_rq(p
)))));
3319 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3321 WARN_ON_ONCE(!cpu_online(new_cpu
));
3323 WARN_ON_ONCE(is_migration_disabled(p
));
3326 trace_sched_migrate_task(p
, new_cpu
);
3328 if (task_cpu(p
) != new_cpu
) {
3329 if (p
->sched_class
->migrate_task_rq
)
3330 p
->sched_class
->migrate_task_rq(p
, new_cpu
);
3331 p
->se
.nr_migrations
++;
3333 sched_mm_cid_migrate_from(p
);
3334 perf_event_task_migrate(p
);
3337 __set_task_cpu(p
, new_cpu
);
3340 #ifdef CONFIG_NUMA_BALANCING
3341 static void __migrate_swap_task(struct task_struct
*p
, int cpu
)
3343 if (task_on_rq_queued(p
)) {
3344 struct rq
*src_rq
, *dst_rq
;
3345 struct rq_flags srf
, drf
;
3347 src_rq
= task_rq(p
);
3348 dst_rq
= cpu_rq(cpu
);
3350 rq_pin_lock(src_rq
, &srf
);
3351 rq_pin_lock(dst_rq
, &drf
);
3353 move_queued_task_locked(src_rq
, dst_rq
, p
);
3354 wakeup_preempt(dst_rq
, p
, 0);
3356 rq_unpin_lock(dst_rq
, &drf
);
3357 rq_unpin_lock(src_rq
, &srf
);
3361 * Task isn't running anymore; make it appear like we migrated
3362 * it before it went to sleep. This means on wakeup we make the
3363 * previous CPU our target instead of where it really is.
3369 struct migration_swap_arg
{
3370 struct task_struct
*src_task
, *dst_task
;
3371 int src_cpu
, dst_cpu
;
3374 static int migrate_swap_stop(void *data
)
3376 struct migration_swap_arg
*arg
= data
;
3377 struct rq
*src_rq
, *dst_rq
;
3379 if (!cpu_active(arg
->src_cpu
) || !cpu_active(arg
->dst_cpu
))
3382 src_rq
= cpu_rq(arg
->src_cpu
);
3383 dst_rq
= cpu_rq(arg
->dst_cpu
);
3385 guard(double_raw_spinlock
)(&arg
->src_task
->pi_lock
, &arg
->dst_task
->pi_lock
);
3386 guard(double_rq_lock
)(src_rq
, dst_rq
);
3388 if (task_cpu(arg
->dst_task
) != arg
->dst_cpu
)
3391 if (task_cpu(arg
->src_task
) != arg
->src_cpu
)
3394 if (!cpumask_test_cpu(arg
->dst_cpu
, arg
->src_task
->cpus_ptr
))
3397 if (!cpumask_test_cpu(arg
->src_cpu
, arg
->dst_task
->cpus_ptr
))
3400 __migrate_swap_task(arg
->src_task
, arg
->dst_cpu
);
3401 __migrate_swap_task(arg
->dst_task
, arg
->src_cpu
);
3407 * Cross migrate two tasks
3409 int migrate_swap(struct task_struct
*cur
, struct task_struct
*p
,
3410 int target_cpu
, int curr_cpu
)
3412 struct migration_swap_arg arg
;
3415 arg
= (struct migration_swap_arg
){
3417 .src_cpu
= curr_cpu
,
3419 .dst_cpu
= target_cpu
,
3422 if (arg
.src_cpu
== arg
.dst_cpu
)
3426 * These three tests are all lockless; this is OK since all of them
3427 * will be re-checked with proper locks held further down the line.
3429 if (!cpu_active(arg
.src_cpu
) || !cpu_active(arg
.dst_cpu
))
3432 if (!cpumask_test_cpu(arg
.dst_cpu
, arg
.src_task
->cpus_ptr
))
3435 if (!cpumask_test_cpu(arg
.src_cpu
, arg
.dst_task
->cpus_ptr
))
3438 trace_sched_swap_numa(cur
, arg
.src_cpu
, p
, arg
.dst_cpu
);
3439 ret
= stop_two_cpus(arg
.dst_cpu
, arg
.src_cpu
, migrate_swap_stop
, &arg
);
3444 #endif /* CONFIG_NUMA_BALANCING */
3447 * kick_process - kick a running thread to enter/exit the kernel
3448 * @p: the to-be-kicked thread
3450 * Cause a process which is running on another CPU to enter
3451 * kernel-mode, without any delay. (to get signals handled.)
3453 * NOTE: this function doesn't have to take the runqueue lock,
3454 * because all it wants to ensure is that the remote task enters
3455 * the kernel. If the IPI races and the task has been migrated
3456 * to another CPU then no harm is done and the purpose has been
3459 void kick_process(struct task_struct
*p
)
3462 int cpu
= task_cpu(p
);
3464 if ((cpu
!= smp_processor_id()) && task_curr(p
))
3465 smp_send_reschedule(cpu
);
3467 EXPORT_SYMBOL_GPL(kick_process
);
3470 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3472 * A few notes on cpu_active vs cpu_online:
3474 * - cpu_active must be a subset of cpu_online
3476 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3477 * see __set_cpus_allowed_ptr(). At this point the newly online
3478 * CPU isn't yet part of the sched domains, and balancing will not
3481 * - on CPU-down we clear cpu_active() to mask the sched domains and
3482 * avoid the load balancer to place new tasks on the to be removed
3483 * CPU. Existing tasks will remain running there and will be taken
3486 * This means that fallback selection must not select !active CPUs.
3487 * And can assume that any active CPU must be online. Conversely
3488 * select_task_rq() below may allow selection of !active CPUs in order
3489 * to satisfy the above rules.
3491 static int select_fallback_rq(int cpu
, struct task_struct
*p
)
3493 int nid
= cpu_to_node(cpu
);
3494 const struct cpumask
*nodemask
= NULL
;
3495 enum { cpuset
, possible
, fail
} state
= cpuset
;
3499 * If the node that the CPU is on has been offlined, cpu_to_node()
3500 * will return -1. There is no CPU on the node, and we should
3501 * select the CPU on the other node.
3504 nodemask
= cpumask_of_node(nid
);
3506 /* Look for allowed, online CPU in same node. */
3507 for_each_cpu(dest_cpu
, nodemask
) {
3508 if (is_cpu_allowed(p
, dest_cpu
))
3514 /* Any allowed, online CPU? */
3515 for_each_cpu(dest_cpu
, p
->cpus_ptr
) {
3516 if (!is_cpu_allowed(p
, dest_cpu
))
3522 /* No more Mr. Nice Guy. */
3525 if (cpuset_cpus_allowed_fallback(p
)) {
3532 * XXX When called from select_task_rq() we only
3533 * hold p->pi_lock and again violate locking order.
3535 * More yuck to audit.
3537 do_set_cpus_allowed(p
, task_cpu_possible_mask(p
));
3547 if (state
!= cpuset
) {
3549 * Don't tell them about moving exiting tasks or
3550 * kernel threads (both mm NULL), since they never
3553 if (p
->mm
&& printk_ratelimit()) {
3554 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3555 task_pid_nr(p
), p
->comm
, cpu
);
3563 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3566 int select_task_rq(struct task_struct
*p
, int cpu
, int *wake_flags
)
3568 lockdep_assert_held(&p
->pi_lock
);
3570 if (p
->nr_cpus_allowed
> 1 && !is_migration_disabled(p
)) {
3571 cpu
= p
->sched_class
->select_task_rq(p
, cpu
, *wake_flags
);
3572 *wake_flags
|= WF_RQ_SELECTED
;
3574 cpu
= cpumask_any(p
->cpus_ptr
);
3578 * In order not to call set_task_cpu() on a blocking task we need
3579 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3582 * Since this is common to all placement strategies, this lives here.
3584 * [ this allows ->select_task() to simply return task_cpu(p) and
3585 * not worry about this generic constraint ]
3587 if (unlikely(!is_cpu_allowed(p
, cpu
)))
3588 cpu
= select_fallback_rq(task_cpu(p
), p
);
3593 void sched_set_stop_task(int cpu
, struct task_struct
*stop
)
3595 static struct lock_class_key stop_pi_lock
;
3596 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
- 1 };
3597 struct task_struct
*old_stop
= cpu_rq(cpu
)->stop
;
3601 * Make it appear like a SCHED_FIFO task, its something
3602 * userspace knows about and won't get confused about.
3604 * Also, it will make PI more or less work without too
3605 * much confusion -- but then, stop work should not
3606 * rely on PI working anyway.
3608 sched_setscheduler_nocheck(stop
, SCHED_FIFO
, ¶m
);
3610 stop
->sched_class
= &stop_sched_class
;
3613 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3614 * adjust the effective priority of a task. As a result,
3615 * rt_mutex_setprio() can trigger (RT) balancing operations,
3616 * which can then trigger wakeups of the stop thread to push
3617 * around the current task.
3619 * The stop task itself will never be part of the PI-chain, it
3620 * never blocks, therefore that ->pi_lock recursion is safe.
3621 * Tell lockdep about this by placing the stop->pi_lock in its
3624 lockdep_set_class(&stop
->pi_lock
, &stop_pi_lock
);
3627 cpu_rq(cpu
)->stop
= stop
;
3631 * Reset it back to a normal scheduling class so that
3632 * it can die in pieces.
3634 old_stop
->sched_class
= &rt_sched_class
;
3638 #else /* CONFIG_SMP */
3640 static inline void migrate_disable_switch(struct rq
*rq
, struct task_struct
*p
) { }
3642 static inline bool rq_has_pinned_tasks(struct rq
*rq
)
3647 #endif /* !CONFIG_SMP */
3650 ttwu_stat(struct task_struct
*p
, int cpu
, int wake_flags
)
3654 if (!schedstat_enabled())
3660 if (cpu
== rq
->cpu
) {
3661 __schedstat_inc(rq
->ttwu_local
);
3662 __schedstat_inc(p
->stats
.nr_wakeups_local
);
3664 struct sched_domain
*sd
;
3666 __schedstat_inc(p
->stats
.nr_wakeups_remote
);
3669 for_each_domain(rq
->cpu
, sd
) {
3670 if (cpumask_test_cpu(cpu
, sched_domain_span(sd
))) {
3671 __schedstat_inc(sd
->ttwu_wake_remote
);
3677 if (wake_flags
& WF_MIGRATED
)
3678 __schedstat_inc(p
->stats
.nr_wakeups_migrate
);
3679 #endif /* CONFIG_SMP */
3681 __schedstat_inc(rq
->ttwu_count
);
3682 __schedstat_inc(p
->stats
.nr_wakeups
);
3684 if (wake_flags
& WF_SYNC
)
3685 __schedstat_inc(p
->stats
.nr_wakeups_sync
);
3689 * Mark the task runnable.
3691 static inline void ttwu_do_wakeup(struct task_struct
*p
)
3693 WRITE_ONCE(p
->__state
, TASK_RUNNING
);
3694 trace_sched_wakeup(p
);
3698 ttwu_do_activate(struct rq
*rq
, struct task_struct
*p
, int wake_flags
,
3699 struct rq_flags
*rf
)
3701 int en_flags
= ENQUEUE_WAKEUP
| ENQUEUE_NOCLOCK
;
3703 lockdep_assert_rq_held(rq
);
3705 if (p
->sched_contributes_to_load
)
3706 rq
->nr_uninterruptible
--;
3709 if (wake_flags
& WF_RQ_SELECTED
)
3710 en_flags
|= ENQUEUE_RQ_SELECTED
;
3711 if (wake_flags
& WF_MIGRATED
)
3712 en_flags
|= ENQUEUE_MIGRATED
;
3716 delayacct_blkio_end(p
);
3717 atomic_dec(&task_rq(p
)->nr_iowait
);
3720 activate_task(rq
, p
, en_flags
);
3721 wakeup_preempt(rq
, p
, wake_flags
);
3726 if (p
->sched_class
->task_woken
) {
3728 * Our task @p is fully woken up and running; so it's safe to
3729 * drop the rq->lock, hereafter rq is only used for statistics.
3731 rq_unpin_lock(rq
, rf
);
3732 p
->sched_class
->task_woken(rq
, p
);
3733 rq_repin_lock(rq
, rf
);
3736 if (rq
->idle_stamp
) {
3737 u64 delta
= rq_clock(rq
) - rq
->idle_stamp
;
3738 u64 max
= 2*rq
->max_idle_balance_cost
;
3740 update_avg(&rq
->avg_idle
, delta
);
3742 if (rq
->avg_idle
> max
)
3751 * Consider @p being inside a wait loop:
3754 * set_current_state(TASK_UNINTERRUPTIBLE);
3761 * __set_current_state(TASK_RUNNING);
3763 * between set_current_state() and schedule(). In this case @p is still
3764 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3767 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3768 * then schedule() must still happen and p->state can be changed to
3769 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3770 * need to do a full wakeup with enqueue.
3772 * Returns: %true when the wakeup is done,
3775 static int ttwu_runnable(struct task_struct
*p
, int wake_flags
)
3781 rq
= __task_rq_lock(p
, &rf
);
3782 if (task_on_rq_queued(p
)) {
3783 update_rq_clock(rq
);
3784 if (p
->se
.sched_delayed
)
3785 enqueue_task(rq
, p
, ENQUEUE_NOCLOCK
| ENQUEUE_DELAYED
);
3786 if (!task_on_cpu(rq
, p
)) {
3788 * When on_rq && !on_cpu the task is preempted, see if
3789 * it should preempt the task that is current now.
3791 wakeup_preempt(rq
, p
, wake_flags
);
3796 __task_rq_unlock(rq
, &rf
);
3802 void sched_ttwu_pending(void *arg
)
3804 struct llist_node
*llist
= arg
;
3805 struct rq
*rq
= this_rq();
3806 struct task_struct
*p
, *t
;
3812 rq_lock_irqsave(rq
, &rf
);
3813 update_rq_clock(rq
);
3815 llist_for_each_entry_safe(p
, t
, llist
, wake_entry
.llist
) {
3816 if (WARN_ON_ONCE(p
->on_cpu
))
3817 smp_cond_load_acquire(&p
->on_cpu
, !VAL
);
3819 if (WARN_ON_ONCE(task_cpu(p
) != cpu_of(rq
)))
3820 set_task_cpu(p
, cpu_of(rq
));
3822 ttwu_do_activate(rq
, p
, p
->sched_remote_wakeup
? WF_MIGRATED
: 0, &rf
);
3826 * Must be after enqueueing at least once task such that
3827 * idle_cpu() does not observe a false-negative -- if it does,
3828 * it is possible for select_idle_siblings() to stack a number
3829 * of tasks on this CPU during that window.
3831 * It is OK to clear ttwu_pending when another task pending.
3832 * We will receive IPI after local IRQ enabled and then enqueue it.
3833 * Since now nr_running > 0, idle_cpu() will always get correct result.
3835 WRITE_ONCE(rq
->ttwu_pending
, 0);
3836 rq_unlock_irqrestore(rq
, &rf
);
3840 * Prepare the scene for sending an IPI for a remote smp_call
3842 * Returns true if the caller can proceed with sending the IPI.
3843 * Returns false otherwise.
3845 bool call_function_single_prep_ipi(int cpu
)
3847 if (set_nr_if_polling(cpu_rq(cpu
)->idle
)) {
3848 trace_sched_wake_idle_without_ipi(cpu
);
3856 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3857 * necessary. The wakee CPU on receipt of the IPI will queue the task
3858 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3859 * of the wakeup instead of the waker.
3861 static void __ttwu_queue_wakelist(struct task_struct
*p
, int cpu
, int wake_flags
)
3863 struct rq
*rq
= cpu_rq(cpu
);
3865 p
->sched_remote_wakeup
= !!(wake_flags
& WF_MIGRATED
);
3867 WRITE_ONCE(rq
->ttwu_pending
, 1);
3868 __smp_call_single_queue(cpu
, &p
->wake_entry
.llist
);
3871 void wake_up_if_idle(int cpu
)
3873 struct rq
*rq
= cpu_rq(cpu
);
3876 if (is_idle_task(rcu_dereference(rq
->curr
))) {
3877 guard(rq_lock_irqsave
)(rq
);
3878 if (is_idle_task(rq
->curr
))
3883 bool cpus_equal_capacity(int this_cpu
, int that_cpu
)
3885 if (!sched_asym_cpucap_active())
3888 if (this_cpu
== that_cpu
)
3891 return arch_scale_cpu_capacity(this_cpu
) == arch_scale_cpu_capacity(that_cpu
);
3894 bool cpus_share_cache(int this_cpu
, int that_cpu
)
3896 if (this_cpu
== that_cpu
)
3899 return per_cpu(sd_llc_id
, this_cpu
) == per_cpu(sd_llc_id
, that_cpu
);
3903 * Whether CPUs are share cache resources, which means LLC on non-cluster
3904 * machines and LLC tag or L2 on machines with clusters.
3906 bool cpus_share_resources(int this_cpu
, int that_cpu
)
3908 if (this_cpu
== that_cpu
)
3911 return per_cpu(sd_share_id
, this_cpu
) == per_cpu(sd_share_id
, that_cpu
);
3914 static inline bool ttwu_queue_cond(struct task_struct
*p
, int cpu
)
3917 * The BPF scheduler may depend on select_task_rq() being invoked during
3918 * wakeups. In addition, @p may end up executing on a different CPU
3919 * regardless of what happens in the wakeup path making the ttwu_queue
3920 * optimization less meaningful. Skip if on SCX.
3926 * Do not complicate things with the async wake_list while the CPU is
3929 if (!cpu_active(cpu
))
3932 /* Ensure the task will still be allowed to run on the CPU. */
3933 if (!cpumask_test_cpu(cpu
, p
->cpus_ptr
))
3937 * If the CPU does not share cache, then queue the task on the
3938 * remote rqs wakelist to avoid accessing remote data.
3940 if (!cpus_share_cache(smp_processor_id(), cpu
))
3943 if (cpu
== smp_processor_id())
3947 * If the wakee cpu is idle, or the task is descheduling and the
3948 * only running task on the CPU, then use the wakelist to offload
3949 * the task activation to the idle (or soon-to-be-idle) CPU as
3950 * the current CPU is likely busy. nr_running is checked to
3951 * avoid unnecessary task stacking.
3953 * Note that we can only get here with (wakee) p->on_rq=0,
3954 * p->on_cpu can be whatever, we've done the dequeue, so
3955 * the wakee has been accounted out of ->nr_running.
3957 if (!cpu_rq(cpu
)->nr_running
)
3963 static bool ttwu_queue_wakelist(struct task_struct
*p
, int cpu
, int wake_flags
)
3965 if (sched_feat(TTWU_QUEUE
) && ttwu_queue_cond(p
, cpu
)) {
3966 sched_clock_cpu(cpu
); /* Sync clocks across CPUs */
3967 __ttwu_queue_wakelist(p
, cpu
, wake_flags
);
3974 #else /* !CONFIG_SMP */
3976 static inline bool ttwu_queue_wakelist(struct task_struct
*p
, int cpu
, int wake_flags
)
3981 #endif /* CONFIG_SMP */
3983 static void ttwu_queue(struct task_struct
*p
, int cpu
, int wake_flags
)
3985 struct rq
*rq
= cpu_rq(cpu
);
3988 if (ttwu_queue_wakelist(p
, cpu
, wake_flags
))
3992 update_rq_clock(rq
);
3993 ttwu_do_activate(rq
, p
, wake_flags
, &rf
);
3998 * Invoked from try_to_wake_up() to check whether the task can be woken up.
4000 * The caller holds p::pi_lock if p != current or has preemption
4001 * disabled when p == current.
4003 * The rules of saved_state:
4005 * The related locking code always holds p::pi_lock when updating
4006 * p::saved_state, which means the code is fully serialized in both cases.
4008 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
4009 * No other bits set. This allows to distinguish all wakeup scenarios.
4011 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
4012 * allows us to prevent early wakeup of tasks before they can be run on
4013 * asymmetric ISA architectures (eg ARMv9).
4015 static __always_inline
4016 bool ttwu_state_match(struct task_struct
*p
, unsigned int state
, int *success
)
4020 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT
)) {
4021 WARN_ON_ONCE((state
& TASK_RTLOCK_WAIT
) &&
4022 state
!= TASK_RTLOCK_WAIT
);
4025 *success
= !!(match
= __task_state_match(p
, state
));
4028 * Saved state preserves the task state across blocking on
4029 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
4030 * set p::saved_state to TASK_RUNNING, but do not wake the task
4031 * because it waits for a lock wakeup or __thaw_task(). Also
4032 * indicate success because from the regular waker's point of
4033 * view this has succeeded.
4035 * After acquiring the lock the task will restore p::__state
4036 * from p::saved_state which ensures that the regular
4037 * wakeup is not lost. The restore will also set
4038 * p::saved_state to TASK_RUNNING so any further tests will
4039 * not result in false positives vs. @success
4042 p
->saved_state
= TASK_RUNNING
;
4048 * Notes on Program-Order guarantees on SMP systems.
4052 * The basic program-order guarantee on SMP systems is that when a task [t]
4053 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4054 * execution on its new CPU [c1].
4056 * For migration (of runnable tasks) this is provided by the following means:
4058 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4059 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4060 * rq(c1)->lock (if not at the same time, then in that order).
4061 * C) LOCK of the rq(c1)->lock scheduling in task
4063 * Release/acquire chaining guarantees that B happens after A and C after B.
4064 * Note: the CPU doing B need not be c0 or c1
4073 * UNLOCK rq(0)->lock
4075 * LOCK rq(0)->lock // orders against CPU0
4077 * UNLOCK rq(0)->lock
4081 * UNLOCK rq(1)->lock
4083 * LOCK rq(1)->lock // orders against CPU2
4086 * UNLOCK rq(1)->lock
4089 * BLOCKING -- aka. SLEEP + WAKEUP
4091 * For blocking we (obviously) need to provide the same guarantee as for
4092 * migration. However the means are completely different as there is no lock
4093 * chain to provide order. Instead we do:
4095 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4096 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4100 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4102 * LOCK rq(0)->lock LOCK X->pi_lock
4105 * smp_store_release(X->on_cpu, 0);
4107 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4113 * X->state = RUNNING
4114 * UNLOCK rq(2)->lock
4116 * LOCK rq(2)->lock // orders against CPU1
4119 * UNLOCK rq(2)->lock
4122 * UNLOCK rq(0)->lock
4125 * However, for wakeups there is a second guarantee we must provide, namely we
4126 * must ensure that CONDITION=1 done by the caller can not be reordered with
4127 * accesses to the task state; see try_to_wake_up() and set_current_state().
4131 * try_to_wake_up - wake up a thread
4132 * @p: the thread to be awakened
4133 * @state: the mask of task states that can be woken
4134 * @wake_flags: wake modifier flags (WF_*)
4136 * Conceptually does:
4138 * If (@state & @p->state) @p->state = TASK_RUNNING.
4140 * If the task was not queued/runnable, also place it back on a runqueue.
4142 * This function is atomic against schedule() which would dequeue the task.
4144 * It issues a full memory barrier before accessing @p->state, see the comment
4145 * with set_current_state().
4147 * Uses p->pi_lock to serialize against concurrent wake-ups.
4149 * Relies on p->pi_lock stabilizing:
4152 * - p->sched_task_group
4153 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4155 * Tries really hard to only take one task_rq(p)->lock for performance.
4156 * Takes rq->lock in:
4157 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4158 * - ttwu_queue() -- new rq, for enqueue of the task;
4159 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4161 * As a consequence we race really badly with just about everything. See the
4162 * many memory barriers and their comments for details.
4164 * Return: %true if @p->state changes (an actual wakeup was done),
4167 int try_to_wake_up(struct task_struct
*p
, unsigned int state
, int wake_flags
)
4170 int cpu
, success
= 0;
4172 wake_flags
|= WF_TTWU
;
4176 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4177 * == smp_processor_id()'. Together this means we can special
4178 * case the whole 'p->on_rq && ttwu_runnable()' case below
4179 * without taking any locks.
4181 * Specifically, given current runs ttwu() we must be before
4182 * schedule()'s block_task(), as such this must not observe
4186 * - we rely on Program-Order guarantees for all the ordering,
4187 * - we're serialized against set_special_state() by virtue of
4188 * it disabling IRQs (this allows not taking ->pi_lock).
4190 SCHED_WARN_ON(p
->se
.sched_delayed
);
4191 if (!ttwu_state_match(p
, state
, &success
))
4194 trace_sched_waking(p
);
4200 * If we are going to wake up a thread waiting for CONDITION we
4201 * need to ensure that CONDITION=1 done by the caller can not be
4202 * reordered with p->state check below. This pairs with smp_store_mb()
4203 * in set_current_state() that the waiting thread does.
4205 scoped_guard (raw_spinlock_irqsave
, &p
->pi_lock
) {
4206 smp_mb__after_spinlock();
4207 if (!ttwu_state_match(p
, state
, &success
))
4210 trace_sched_waking(p
);
4213 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4214 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4215 * in smp_cond_load_acquire() below.
4217 * sched_ttwu_pending() try_to_wake_up()
4218 * STORE p->on_rq = 1 LOAD p->state
4221 * __schedule() (switch to task 'p')
4222 * LOCK rq->lock smp_rmb();
4223 * smp_mb__after_spinlock();
4227 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4229 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4230 * __schedule(). See the comment for smp_mb__after_spinlock().
4232 * A similar smp_rmb() lives in __task_needs_rq_lock().
4235 if (READ_ONCE(p
->on_rq
) && ttwu_runnable(p
, wake_flags
))
4240 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4241 * possible to, falsely, observe p->on_cpu == 0.
4243 * One must be running (->on_cpu == 1) in order to remove oneself
4244 * from the runqueue.
4246 * __schedule() (switch to task 'p') try_to_wake_up()
4247 * STORE p->on_cpu = 1 LOAD p->on_rq
4250 * __schedule() (put 'p' to sleep)
4251 * LOCK rq->lock smp_rmb();
4252 * smp_mb__after_spinlock();
4253 * STORE p->on_rq = 0 LOAD p->on_cpu
4255 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4256 * __schedule(). See the comment for smp_mb__after_spinlock().
4258 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4259 * schedule()'s deactivate_task() has 'happened' and p will no longer
4260 * care about it's own p->state. See the comment in __schedule().
4262 smp_acquire__after_ctrl_dep();
4265 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4266 * == 0), which means we need to do an enqueue, change p->state to
4267 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4268 * enqueue, such as ttwu_queue_wakelist().
4270 WRITE_ONCE(p
->__state
, TASK_WAKING
);
4273 * If the owning (remote) CPU is still in the middle of schedule() with
4274 * this task as prev, considering queueing p on the remote CPUs wake_list
4275 * which potentially sends an IPI instead of spinning on p->on_cpu to
4276 * let the waker make forward progress. This is safe because IRQs are
4277 * disabled and the IPI will deliver after on_cpu is cleared.
4279 * Ensure we load task_cpu(p) after p->on_cpu:
4281 * set_task_cpu(p, cpu);
4282 * STORE p->cpu = @cpu
4283 * __schedule() (switch to task 'p')
4285 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4286 * STORE p->on_cpu = 1 LOAD p->cpu
4288 * to ensure we observe the correct CPU on which the task is currently
4291 if (smp_load_acquire(&p
->on_cpu
) &&
4292 ttwu_queue_wakelist(p
, task_cpu(p
), wake_flags
))
4296 * If the owning (remote) CPU is still in the middle of schedule() with
4297 * this task as prev, wait until it's done referencing the task.
4299 * Pairs with the smp_store_release() in finish_task().
4301 * This ensures that tasks getting woken will be fully ordered against
4302 * their previous state and preserve Program Order.
4304 smp_cond_load_acquire(&p
->on_cpu
, !VAL
);
4306 cpu
= select_task_rq(p
, p
->wake_cpu
, &wake_flags
);
4307 if (task_cpu(p
) != cpu
) {
4309 delayacct_blkio_end(p
);
4310 atomic_dec(&task_rq(p
)->nr_iowait
);
4313 wake_flags
|= WF_MIGRATED
;
4314 psi_ttwu_dequeue(p
);
4315 set_task_cpu(p
, cpu
);
4319 #endif /* CONFIG_SMP */
4321 ttwu_queue(p
, cpu
, wake_flags
);
4325 ttwu_stat(p
, task_cpu(p
), wake_flags
);
4330 static bool __task_needs_rq_lock(struct task_struct
*p
)
4332 unsigned int state
= READ_ONCE(p
->__state
);
4335 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4336 * the task is blocked. Make sure to check @state since ttwu() can drop
4337 * locks at the end, see ttwu_queue_wakelist().
4339 if (state
== TASK_RUNNING
|| state
== TASK_WAKING
)
4343 * Ensure we load p->on_rq after p->__state, otherwise it would be
4344 * possible to, falsely, observe p->on_rq == 0.
4346 * See try_to_wake_up() for a longer comment.
4354 * Ensure the task has finished __schedule() and will not be referenced
4355 * anymore. Again, see try_to_wake_up() for a longer comment.
4358 smp_cond_load_acquire(&p
->on_cpu
, !VAL
);
4365 * task_call_func - Invoke a function on task in fixed state
4366 * @p: Process for which the function is to be invoked, can be @current.
4367 * @func: Function to invoke.
4368 * @arg: Argument to function.
4370 * Fix the task in it's current state by avoiding wakeups and or rq operations
4371 * and call @func(@arg) on it. This function can use task_is_runnable() and
4372 * task_curr() to work out what the state is, if required. Given that @func
4373 * can be invoked with a runqueue lock held, it had better be quite
4377 * Whatever @func returns
4379 int task_call_func(struct task_struct
*p
, task_call_f func
, void *arg
)
4381 struct rq
*rq
= NULL
;
4385 raw_spin_lock_irqsave(&p
->pi_lock
, rf
.flags
);
4387 if (__task_needs_rq_lock(p
))
4388 rq
= __task_rq_lock(p
, &rf
);
4391 * At this point the task is pinned; either:
4392 * - blocked and we're holding off wakeups (pi->lock)
4393 * - woken, and we're holding off enqueue (rq->lock)
4394 * - queued, and we're holding off schedule (rq->lock)
4395 * - running, and we're holding off de-schedule (rq->lock)
4397 * The called function (@func) can use: task_curr(), p->on_rq and
4398 * p->__state to differentiate between these states.
4405 raw_spin_unlock_irqrestore(&p
->pi_lock
, rf
.flags
);
4410 * cpu_curr_snapshot - Return a snapshot of the currently running task
4411 * @cpu: The CPU on which to snapshot the task.
4413 * Returns the task_struct pointer of the task "currently" running on
4414 * the specified CPU.
4416 * If the specified CPU was offline, the return value is whatever it
4417 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4418 * task, but there is no guarantee. Callers wishing a useful return
4419 * value must take some action to ensure that the specified CPU remains
4420 * online throughout.
4422 * This function executes full memory barriers before and after fetching
4423 * the pointer, which permits the caller to confine this function's fetch
4424 * with respect to the caller's accesses to other shared variables.
4426 struct task_struct
*cpu_curr_snapshot(int cpu
)
4428 struct rq
*rq
= cpu_rq(cpu
);
4429 struct task_struct
*t
;
4432 rq_lock_irqsave(rq
, &rf
);
4433 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4434 t
= rcu_dereference(cpu_curr(cpu
));
4435 rq_unlock_irqrestore(rq
, &rf
);
4436 smp_mb(); /* Pairing determined by caller's synchronization design. */
4442 * wake_up_process - Wake up a specific process
4443 * @p: The process to be woken up.
4445 * Attempt to wake up the nominated process and move it to the set of runnable
4448 * Return: 1 if the process was woken up, 0 if it was already running.
4450 * This function executes a full memory barrier before accessing the task state.
4452 int wake_up_process(struct task_struct
*p
)
4454 return try_to_wake_up(p
, TASK_NORMAL
, 0);
4456 EXPORT_SYMBOL(wake_up_process
);
4458 int wake_up_state(struct task_struct
*p
, unsigned int state
)
4460 return try_to_wake_up(p
, state
, 0);
4464 * Perform scheduler related setup for a newly forked process p.
4465 * p is forked by current.
4467 * __sched_fork() is basic setup which is also used by sched_init() to
4468 * initialize the boot CPU's idle task.
4470 static void __sched_fork(unsigned long clone_flags
, struct task_struct
*p
)
4475 p
->se
.exec_start
= 0;
4476 p
->se
.sum_exec_runtime
= 0;
4477 p
->se
.prev_sum_exec_runtime
= 0;
4478 p
->se
.nr_migrations
= 0;
4481 INIT_LIST_HEAD(&p
->se
.group_node
);
4483 /* A delayed task cannot be in clone(). */
4484 SCHED_WARN_ON(p
->se
.sched_delayed
);
4486 #ifdef CONFIG_FAIR_GROUP_SCHED
4487 p
->se
.cfs_rq
= NULL
;
4490 #ifdef CONFIG_SCHEDSTATS
4491 /* Even if schedstat is disabled, there should not be garbage */
4492 memset(&p
->stats
, 0, sizeof(p
->stats
));
4495 init_dl_entity(&p
->dl
);
4497 INIT_LIST_HEAD(&p
->rt
.run_list
);
4499 p
->rt
.time_slice
= sched_rr_timeslice
;
4503 #ifdef CONFIG_SCHED_CLASS_EXT
4504 init_scx_entity(&p
->scx
);
4507 #ifdef CONFIG_PREEMPT_NOTIFIERS
4508 INIT_HLIST_HEAD(&p
->preempt_notifiers
);
4511 #ifdef CONFIG_COMPACTION
4512 p
->capture_control
= NULL
;
4514 init_numa_balancing(clone_flags
, p
);
4516 p
->wake_entry
.u_flags
= CSD_TYPE_TTWU
;
4517 p
->migration_pending
= NULL
;
4519 init_sched_mm_cid(p
);
4522 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing
);
4524 #ifdef CONFIG_NUMA_BALANCING
4526 int sysctl_numa_balancing_mode
;
4528 static void __set_numabalancing_state(bool enabled
)
4531 static_branch_enable(&sched_numa_balancing
);
4533 static_branch_disable(&sched_numa_balancing
);
4536 void set_numabalancing_state(bool enabled
)
4539 sysctl_numa_balancing_mode
= NUMA_BALANCING_NORMAL
;
4541 sysctl_numa_balancing_mode
= NUMA_BALANCING_DISABLED
;
4542 __set_numabalancing_state(enabled
);
4545 #ifdef CONFIG_PROC_SYSCTL
4546 static void reset_memory_tiering(void)
4548 struct pglist_data
*pgdat
;
4550 for_each_online_pgdat(pgdat
) {
4551 pgdat
->nbp_threshold
= 0;
4552 pgdat
->nbp_th_nr_cand
= node_page_state(pgdat
, PGPROMOTE_CANDIDATE
);
4553 pgdat
->nbp_th_start
= jiffies_to_msecs(jiffies
);
4557 static int sysctl_numa_balancing(const struct ctl_table
*table
, int write
,
4558 void *buffer
, size_t *lenp
, loff_t
*ppos
)
4562 int state
= sysctl_numa_balancing_mode
;
4564 if (write
&& !capable(CAP_SYS_ADMIN
))
4569 err
= proc_dointvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
4573 if (!(sysctl_numa_balancing_mode
& NUMA_BALANCING_MEMORY_TIERING
) &&
4574 (state
& NUMA_BALANCING_MEMORY_TIERING
))
4575 reset_memory_tiering();
4576 sysctl_numa_balancing_mode
= state
;
4577 __set_numabalancing_state(state
);
4584 #ifdef CONFIG_SCHEDSTATS
4586 DEFINE_STATIC_KEY_FALSE(sched_schedstats
);
4588 static void set_schedstats(bool enabled
)
4591 static_branch_enable(&sched_schedstats
);
4593 static_branch_disable(&sched_schedstats
);
4596 void force_schedstat_enabled(void)
4598 if (!schedstat_enabled()) {
4599 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4600 static_branch_enable(&sched_schedstats
);
4604 static int __init
setup_schedstats(char *str
)
4610 if (!strcmp(str
, "enable")) {
4611 set_schedstats(true);
4613 } else if (!strcmp(str
, "disable")) {
4614 set_schedstats(false);
4619 pr_warn("Unable to parse schedstats=\n");
4623 __setup("schedstats=", setup_schedstats
);
4625 #ifdef CONFIG_PROC_SYSCTL
4626 static int sysctl_schedstats(const struct ctl_table
*table
, int write
, void *buffer
,
4627 size_t *lenp
, loff_t
*ppos
)
4631 int state
= static_branch_likely(&sched_schedstats
);
4633 if (write
&& !capable(CAP_SYS_ADMIN
))
4638 err
= proc_dointvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
4642 set_schedstats(state
);
4645 #endif /* CONFIG_PROC_SYSCTL */
4646 #endif /* CONFIG_SCHEDSTATS */
4648 #ifdef CONFIG_SYSCTL
4649 static struct ctl_table sched_core_sysctls
[] = {
4650 #ifdef CONFIG_SCHEDSTATS
4652 .procname
= "sched_schedstats",
4654 .maxlen
= sizeof(unsigned int),
4656 .proc_handler
= sysctl_schedstats
,
4657 .extra1
= SYSCTL_ZERO
,
4658 .extra2
= SYSCTL_ONE
,
4660 #endif /* CONFIG_SCHEDSTATS */
4661 #ifdef CONFIG_UCLAMP_TASK
4663 .procname
= "sched_util_clamp_min",
4664 .data
= &sysctl_sched_uclamp_util_min
,
4665 .maxlen
= sizeof(unsigned int),
4667 .proc_handler
= sysctl_sched_uclamp_handler
,
4670 .procname
= "sched_util_clamp_max",
4671 .data
= &sysctl_sched_uclamp_util_max
,
4672 .maxlen
= sizeof(unsigned int),
4674 .proc_handler
= sysctl_sched_uclamp_handler
,
4677 .procname
= "sched_util_clamp_min_rt_default",
4678 .data
= &sysctl_sched_uclamp_util_min_rt_default
,
4679 .maxlen
= sizeof(unsigned int),
4681 .proc_handler
= sysctl_sched_uclamp_handler
,
4683 #endif /* CONFIG_UCLAMP_TASK */
4684 #ifdef CONFIG_NUMA_BALANCING
4686 .procname
= "numa_balancing",
4687 .data
= NULL
, /* filled in by handler */
4688 .maxlen
= sizeof(unsigned int),
4690 .proc_handler
= sysctl_numa_balancing
,
4691 .extra1
= SYSCTL_ZERO
,
4692 .extra2
= SYSCTL_FOUR
,
4694 #endif /* CONFIG_NUMA_BALANCING */
4696 static int __init
sched_core_sysctl_init(void)
4698 register_sysctl_init("kernel", sched_core_sysctls
);
4701 late_initcall(sched_core_sysctl_init
);
4702 #endif /* CONFIG_SYSCTL */
4705 * fork()/clone()-time setup:
4707 int sched_fork(unsigned long clone_flags
, struct task_struct
*p
)
4709 __sched_fork(clone_flags
, p
);
4711 * We mark the process as NEW here. This guarantees that
4712 * nobody will actually run it, and a signal or other external
4713 * event cannot wake it up and insert it on the runqueue either.
4715 p
->__state
= TASK_NEW
;
4718 * Make sure we do not leak PI boosting priority to the child.
4720 p
->prio
= current
->normal_prio
;
4725 * Revert to default priority/policy on fork if requested.
4727 if (unlikely(p
->sched_reset_on_fork
)) {
4728 if (task_has_dl_policy(p
) || task_has_rt_policy(p
)) {
4729 p
->policy
= SCHED_NORMAL
;
4730 p
->static_prio
= NICE_TO_PRIO(0);
4732 } else if (PRIO_TO_NICE(p
->static_prio
) < 0)
4733 p
->static_prio
= NICE_TO_PRIO(0);
4735 p
->prio
= p
->normal_prio
= p
->static_prio
;
4736 set_load_weight(p
, false);
4737 p
->se
.custom_slice
= 0;
4738 p
->se
.slice
= sysctl_sched_base_slice
;
4741 * We don't need the reset flag anymore after the fork. It has
4742 * fulfilled its duty:
4744 p
->sched_reset_on_fork
= 0;
4747 if (dl_prio(p
->prio
))
4752 if (rt_prio(p
->prio
)) {
4753 p
->sched_class
= &rt_sched_class
;
4754 #ifdef CONFIG_SCHED_CLASS_EXT
4755 } else if (task_should_scx(p
->policy
)) {
4756 p
->sched_class
= &ext_sched_class
;
4759 p
->sched_class
= &fair_sched_class
;
4762 init_entity_runnable_average(&p
->se
);
4765 #ifdef CONFIG_SCHED_INFO
4766 if (likely(sched_info_on()))
4767 memset(&p
->sched_info
, 0, sizeof(p
->sched_info
));
4769 #if defined(CONFIG_SMP)
4772 init_task_preempt_count(p
);
4774 plist_node_init(&p
->pushable_tasks
, MAX_PRIO
);
4775 RB_CLEAR_NODE(&p
->pushable_dl_tasks
);
4780 int sched_cgroup_fork(struct task_struct
*p
, struct kernel_clone_args
*kargs
)
4782 unsigned long flags
;
4785 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4786 * required yet, but lockdep gets upset if rules are violated.
4788 raw_spin_lock_irqsave(&p
->pi_lock
, flags
);
4789 #ifdef CONFIG_CGROUP_SCHED
4791 struct task_group
*tg
;
4792 tg
= container_of(kargs
->cset
->subsys
[cpu_cgrp_id
],
4793 struct task_group
, css
);
4794 tg
= autogroup_task_group(p
, tg
);
4795 p
->sched_task_group
= tg
;
4800 * We're setting the CPU for the first time, we don't migrate,
4801 * so use __set_task_cpu().
4803 __set_task_cpu(p
, smp_processor_id());
4804 if (p
->sched_class
->task_fork
)
4805 p
->sched_class
->task_fork(p
);
4806 raw_spin_unlock_irqrestore(&p
->pi_lock
, flags
);
4811 void sched_cancel_fork(struct task_struct
*p
)
4816 void sched_post_fork(struct task_struct
*p
)
4818 uclamp_post_fork(p
);
4822 unsigned long to_ratio(u64 period
, u64 runtime
)
4824 if (runtime
== RUNTIME_INF
)
4828 * Doing this here saves a lot of checks in all
4829 * the calling paths, and returning zero seems
4830 * safe for them anyway.
4835 return div64_u64(runtime
<< BW_SHIFT
, period
);
4839 * wake_up_new_task - wake up a newly created task for the first time.
4841 * This function will do some initial scheduler statistics housekeeping
4842 * that must be done for every newly created context, then puts the task
4843 * on the runqueue and wakes it.
4845 void wake_up_new_task(struct task_struct
*p
)
4849 int wake_flags
= WF_FORK
;
4851 raw_spin_lock_irqsave(&p
->pi_lock
, rf
.flags
);
4852 WRITE_ONCE(p
->__state
, TASK_RUNNING
);
4855 * Fork balancing, do it here and not earlier because:
4856 * - cpus_ptr can change in the fork path
4857 * - any previously selected CPU might disappear through hotplug
4859 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4860 * as we're not fully set-up yet.
4862 p
->recent_used_cpu
= task_cpu(p
);
4864 __set_task_cpu(p
, select_task_rq(p
, task_cpu(p
), &wake_flags
));
4866 rq
= __task_rq_lock(p
, &rf
);
4867 update_rq_clock(rq
);
4868 post_init_entity_util_avg(p
);
4870 activate_task(rq
, p
, ENQUEUE_NOCLOCK
| ENQUEUE_INITIAL
);
4871 trace_sched_wakeup_new(p
);
4872 wakeup_preempt(rq
, p
, wake_flags
);
4874 if (p
->sched_class
->task_woken
) {
4876 * Nothing relies on rq->lock after this, so it's fine to
4879 rq_unpin_lock(rq
, &rf
);
4880 p
->sched_class
->task_woken(rq
, p
);
4881 rq_repin_lock(rq
, &rf
);
4884 task_rq_unlock(rq
, p
, &rf
);
4887 #ifdef CONFIG_PREEMPT_NOTIFIERS
4889 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key
);
4891 void preempt_notifier_inc(void)
4893 static_branch_inc(&preempt_notifier_key
);
4895 EXPORT_SYMBOL_GPL(preempt_notifier_inc
);
4897 void preempt_notifier_dec(void)
4899 static_branch_dec(&preempt_notifier_key
);
4901 EXPORT_SYMBOL_GPL(preempt_notifier_dec
);
4904 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4905 * @notifier: notifier struct to register
4907 void preempt_notifier_register(struct preempt_notifier
*notifier
)
4909 if (!static_branch_unlikely(&preempt_notifier_key
))
4910 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4912 hlist_add_head(¬ifier
->link
, ¤t
->preempt_notifiers
);
4914 EXPORT_SYMBOL_GPL(preempt_notifier_register
);
4917 * preempt_notifier_unregister - no longer interested in preemption notifications
4918 * @notifier: notifier struct to unregister
4920 * This is *not* safe to call from within a preemption notifier.
4922 void preempt_notifier_unregister(struct preempt_notifier
*notifier
)
4924 hlist_del(¬ifier
->link
);
4926 EXPORT_SYMBOL_GPL(preempt_notifier_unregister
);
4928 static void __fire_sched_in_preempt_notifiers(struct task_struct
*curr
)
4930 struct preempt_notifier
*notifier
;
4932 hlist_for_each_entry(notifier
, &curr
->preempt_notifiers
, link
)
4933 notifier
->ops
->sched_in(notifier
, raw_smp_processor_id());
4936 static __always_inline
void fire_sched_in_preempt_notifiers(struct task_struct
*curr
)
4938 if (static_branch_unlikely(&preempt_notifier_key
))
4939 __fire_sched_in_preempt_notifiers(curr
);
4943 __fire_sched_out_preempt_notifiers(struct task_struct
*curr
,
4944 struct task_struct
*next
)
4946 struct preempt_notifier
*notifier
;
4948 hlist_for_each_entry(notifier
, &curr
->preempt_notifiers
, link
)
4949 notifier
->ops
->sched_out(notifier
, next
);
4952 static __always_inline
void
4953 fire_sched_out_preempt_notifiers(struct task_struct
*curr
,
4954 struct task_struct
*next
)
4956 if (static_branch_unlikely(&preempt_notifier_key
))
4957 __fire_sched_out_preempt_notifiers(curr
, next
);
4960 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4962 static inline void fire_sched_in_preempt_notifiers(struct task_struct
*curr
)
4967 fire_sched_out_preempt_notifiers(struct task_struct
*curr
,
4968 struct task_struct
*next
)
4972 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4974 static inline void prepare_task(struct task_struct
*next
)
4978 * Claim the task as running, we do this before switching to it
4979 * such that any running task will have this set.
4981 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4982 * its ordering comment.
4984 WRITE_ONCE(next
->on_cpu
, 1);
4988 static inline void finish_task(struct task_struct
*prev
)
4992 * This must be the very last reference to @prev from this CPU. After
4993 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4994 * must ensure this doesn't happen until the switch is completely
4997 * In particular, the load of prev->state in finish_task_switch() must
4998 * happen before this.
5000 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
5002 smp_store_release(&prev
->on_cpu
, 0);
5008 static void do_balance_callbacks(struct rq
*rq
, struct balance_callback
*head
)
5010 void (*func
)(struct rq
*rq
);
5011 struct balance_callback
*next
;
5013 lockdep_assert_rq_held(rq
);
5016 func
= (void (*)(struct rq
*))head
->func
;
5025 static void balance_push(struct rq
*rq
);
5028 * balance_push_callback is a right abuse of the callback interface and plays
5029 * by significantly different rules.
5031 * Where the normal balance_callback's purpose is to be ran in the same context
5032 * that queued it (only later, when it's safe to drop rq->lock again),
5033 * balance_push_callback is specifically targeted at __schedule().
5035 * This abuse is tolerated because it places all the unlikely/odd cases behind
5036 * a single test, namely: rq->balance_callback == NULL.
5038 struct balance_callback balance_push_callback
= {
5040 .func
= balance_push
,
5043 static inline struct balance_callback
*
5044 __splice_balance_callbacks(struct rq
*rq
, bool split
)
5046 struct balance_callback
*head
= rq
->balance_callback
;
5051 lockdep_assert_rq_held(rq
);
5053 * Must not take balance_push_callback off the list when
5054 * splice_balance_callbacks() and balance_callbacks() are not
5055 * in the same rq->lock section.
5057 * In that case it would be possible for __schedule() to interleave
5058 * and observe the list empty.
5060 if (split
&& head
== &balance_push_callback
)
5063 rq
->balance_callback
= NULL
;
5068 struct balance_callback
*splice_balance_callbacks(struct rq
*rq
)
5070 return __splice_balance_callbacks(rq
, true);
5073 static void __balance_callbacks(struct rq
*rq
)
5075 do_balance_callbacks(rq
, __splice_balance_callbacks(rq
, false));
5078 void balance_callbacks(struct rq
*rq
, struct balance_callback
*head
)
5080 unsigned long flags
;
5082 if (unlikely(head
)) {
5083 raw_spin_rq_lock_irqsave(rq
, flags
);
5084 do_balance_callbacks(rq
, head
);
5085 raw_spin_rq_unlock_irqrestore(rq
, flags
);
5091 static inline void __balance_callbacks(struct rq
*rq
)
5098 prepare_lock_switch(struct rq
*rq
, struct task_struct
*next
, struct rq_flags
*rf
)
5101 * Since the runqueue lock will be released by the next
5102 * task (which is an invalid locking op but in the case
5103 * of the scheduler it's an obvious special-case), so we
5104 * do an early lockdep release here:
5106 rq_unpin_lock(rq
, rf
);
5107 spin_release(&__rq_lockp(rq
)->dep_map
, _THIS_IP_
);
5108 #ifdef CONFIG_DEBUG_SPINLOCK
5109 /* this is a valid case when another task releases the spinlock */
5110 rq_lockp(rq
)->owner
= next
;
5114 static inline void finish_lock_switch(struct rq
*rq
)
5117 * If we are tracking spinlock dependencies then we have to
5118 * fix up the runqueue lock - which gets 'carried over' from
5119 * prev into current:
5121 spin_acquire(&__rq_lockp(rq
)->dep_map
, 0, 0, _THIS_IP_
);
5122 __balance_callbacks(rq
);
5123 raw_spin_rq_unlock_irq(rq
);
5127 * NOP if the arch has not defined these:
5130 #ifndef prepare_arch_switch
5131 # define prepare_arch_switch(next) do { } while (0)
5134 #ifndef finish_arch_post_lock_switch
5135 # define finish_arch_post_lock_switch() do { } while (0)
5138 static inline void kmap_local_sched_out(void)
5140 #ifdef CONFIG_KMAP_LOCAL
5141 if (unlikely(current
->kmap_ctrl
.idx
))
5142 __kmap_local_sched_out();
5146 static inline void kmap_local_sched_in(void)
5148 #ifdef CONFIG_KMAP_LOCAL
5149 if (unlikely(current
->kmap_ctrl
.idx
))
5150 __kmap_local_sched_in();
5155 * prepare_task_switch - prepare to switch tasks
5156 * @rq: the runqueue preparing to switch
5157 * @prev: the current task that is being switched out
5158 * @next: the task we are going to switch to.
5160 * This is called with the rq lock held and interrupts off. It must
5161 * be paired with a subsequent finish_task_switch after the context
5164 * prepare_task_switch sets up locking and calls architecture specific
5168 prepare_task_switch(struct rq
*rq
, struct task_struct
*prev
,
5169 struct task_struct
*next
)
5171 kcov_prepare_switch(prev
);
5172 sched_info_switch(rq
, prev
, next
);
5173 perf_event_task_sched_out(prev
, next
);
5175 fire_sched_out_preempt_notifiers(prev
, next
);
5176 kmap_local_sched_out();
5178 prepare_arch_switch(next
);
5182 * finish_task_switch - clean up after a task-switch
5183 * @prev: the thread we just switched away from.
5185 * finish_task_switch must be called after the context switch, paired
5186 * with a prepare_task_switch call before the context switch.
5187 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5188 * and do any other architecture-specific cleanup actions.
5190 * Note that we may have delayed dropping an mm in context_switch(). If
5191 * so, we finish that here outside of the runqueue lock. (Doing it
5192 * with the lock held can cause deadlocks; see schedule() for
5195 * The context switch have flipped the stack from under us and restored the
5196 * local variables which were saved when this task called schedule() in the
5197 * past. 'prev == current' is still correct but we need to recalculate this_rq
5198 * because prev may have moved to another CPU.
5200 static struct rq
*finish_task_switch(struct task_struct
*prev
)
5201 __releases(rq
->lock
)
5203 struct rq
*rq
= this_rq();
5204 struct mm_struct
*mm
= rq
->prev_mm
;
5205 unsigned int prev_state
;
5208 * The previous task will have left us with a preempt_count of 2
5209 * because it left us after:
5212 * preempt_disable(); // 1
5214 * raw_spin_lock_irq(&rq->lock) // 2
5216 * Also, see FORK_PREEMPT_COUNT.
5218 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET
,
5219 "corrupted preempt_count: %s/%d/0x%x\n",
5220 current
->comm
, current
->pid
, preempt_count()))
5221 preempt_count_set(FORK_PREEMPT_COUNT
);
5226 * A task struct has one reference for the use as "current".
5227 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5228 * schedule one last time. The schedule call will never return, and
5229 * the scheduled task must drop that reference.
5231 * We must observe prev->state before clearing prev->on_cpu (in
5232 * finish_task), otherwise a concurrent wakeup can get prev
5233 * running on another CPU and we could rave with its RUNNING -> DEAD
5234 * transition, resulting in a double drop.
5236 prev_state
= READ_ONCE(prev
->__state
);
5237 vtime_task_switch(prev
);
5238 perf_event_task_sched_in(prev
, current
);
5240 tick_nohz_task_switch();
5241 finish_lock_switch(rq
);
5242 finish_arch_post_lock_switch();
5243 kcov_finish_switch(current
);
5245 * kmap_local_sched_out() is invoked with rq::lock held and
5246 * interrupts disabled. There is no requirement for that, but the
5247 * sched out code does not have an interrupt enabled section.
5248 * Restoring the maps on sched in does not require interrupts being
5251 kmap_local_sched_in();
5253 fire_sched_in_preempt_notifiers(current
);
5255 * When switching through a kernel thread, the loop in
5256 * membarrier_{private,global}_expedited() may have observed that
5257 * kernel thread and not issued an IPI. It is therefore possible to
5258 * schedule between user->kernel->user threads without passing though
5259 * switch_mm(). Membarrier requires a barrier after storing to
5260 * rq->curr, before returning to userspace, so provide them here:
5262 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5263 * provided by mmdrop_lazy_tlb(),
5264 * - a sync_core for SYNC_CORE.
5267 membarrier_mm_sync_core_before_usermode(mm
);
5268 mmdrop_lazy_tlb_sched(mm
);
5271 if (unlikely(prev_state
== TASK_DEAD
)) {
5272 if (prev
->sched_class
->task_dead
)
5273 prev
->sched_class
->task_dead(prev
);
5275 /* Task is done with its stack. */
5276 put_task_stack(prev
);
5278 put_task_struct_rcu_user(prev
);
5285 * schedule_tail - first thing a freshly forked thread must call.
5286 * @prev: the thread we just switched away from.
5288 asmlinkage __visible
void schedule_tail(struct task_struct
*prev
)
5289 __releases(rq
->lock
)
5292 * New tasks start with FORK_PREEMPT_COUNT, see there and
5293 * finish_task_switch() for details.
5295 * finish_task_switch() will drop rq->lock() and lower preempt_count
5296 * and the preempt_enable() will end up enabling preemption (on
5297 * PREEMPT_COUNT kernels).
5300 finish_task_switch(prev
);
5303 if (current
->set_child_tid
)
5304 put_user(task_pid_vnr(current
), current
->set_child_tid
);
5306 calculate_sigpending();
5310 * context_switch - switch to the new MM and the new thread's register state.
5312 static __always_inline
struct rq
*
5313 context_switch(struct rq
*rq
, struct task_struct
*prev
,
5314 struct task_struct
*next
, struct rq_flags
*rf
)
5316 prepare_task_switch(rq
, prev
, next
);
5319 * For paravirt, this is coupled with an exit in switch_to to
5320 * combine the page table reload and the switch backend into
5323 arch_start_context_switch(prev
);
5326 * kernel -> kernel lazy + transfer active
5327 * user -> kernel lazy + mmgrab_lazy_tlb() active
5329 * kernel -> user switch + mmdrop_lazy_tlb() active
5330 * user -> user switch
5332 * switch_mm_cid() needs to be updated if the barriers provided
5333 * by context_switch() are modified.
5335 if (!next
->mm
) { // to kernel
5336 enter_lazy_tlb(prev
->active_mm
, next
);
5338 next
->active_mm
= prev
->active_mm
;
5339 if (prev
->mm
) // from user
5340 mmgrab_lazy_tlb(prev
->active_mm
);
5342 prev
->active_mm
= NULL
;
5344 membarrier_switch_mm(rq
, prev
->active_mm
, next
->mm
);
5346 * sys_membarrier() requires an smp_mb() between setting
5347 * rq->curr / membarrier_switch_mm() and returning to userspace.
5349 * The below provides this either through switch_mm(), or in
5350 * case 'prev->active_mm == next->mm' through
5351 * finish_task_switch()'s mmdrop().
5353 switch_mm_irqs_off(prev
->active_mm
, next
->mm
, next
);
5354 lru_gen_use_mm(next
->mm
);
5356 if (!prev
->mm
) { // from kernel
5357 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5358 rq
->prev_mm
= prev
->active_mm
;
5359 prev
->active_mm
= NULL
;
5363 /* switch_mm_cid() requires the memory barriers above. */
5364 switch_mm_cid(rq
, prev
, next
);
5366 prepare_lock_switch(rq
, next
, rf
);
5368 /* Here we just switch the register state and the stack. */
5369 switch_to(prev
, next
, prev
);
5372 return finish_task_switch(prev
);
5376 * nr_running and nr_context_switches:
5378 * externally visible scheduler statistics: current number of runnable
5379 * threads, total number of context switches performed since bootup.
5381 unsigned int nr_running(void)
5383 unsigned int i
, sum
= 0;
5385 for_each_online_cpu(i
)
5386 sum
+= cpu_rq(i
)->nr_running
;
5392 * Check if only the current task is running on the CPU.
5394 * Caution: this function does not check that the caller has disabled
5395 * preemption, thus the result might have a time-of-check-to-time-of-use
5396 * race. The caller is responsible to use it correctly, for example:
5398 * - from a non-preemptible section (of course)
5400 * - from a thread that is bound to a single CPU
5402 * - in a loop with very short iterations (e.g. a polling loop)
5404 bool single_task_running(void)
5406 return raw_rq()->nr_running
== 1;
5408 EXPORT_SYMBOL(single_task_running
);
5410 unsigned long long nr_context_switches_cpu(int cpu
)
5412 return cpu_rq(cpu
)->nr_switches
;
5415 unsigned long long nr_context_switches(void)
5418 unsigned long long sum
= 0;
5420 for_each_possible_cpu(i
)
5421 sum
+= cpu_rq(i
)->nr_switches
;
5427 * Consumers of these two interfaces, like for example the cpuidle menu
5428 * governor, are using nonsensical data. Preferring shallow idle state selection
5429 * for a CPU that has IO-wait which might not even end up running the task when
5430 * it does become runnable.
5433 unsigned int nr_iowait_cpu(int cpu
)
5435 return atomic_read(&cpu_rq(cpu
)->nr_iowait
);
5439 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5441 * The idea behind IO-wait account is to account the idle time that we could
5442 * have spend running if it were not for IO. That is, if we were to improve the
5443 * storage performance, we'd have a proportional reduction in IO-wait time.
5445 * This all works nicely on UP, where, when a task blocks on IO, we account
5446 * idle time as IO-wait, because if the storage were faster, it could've been
5447 * running and we'd not be idle.
5449 * This has been extended to SMP, by doing the same for each CPU. This however
5452 * Imagine for instance the case where two tasks block on one CPU, only the one
5453 * CPU will have IO-wait accounted, while the other has regular idle. Even
5454 * though, if the storage were faster, both could've ran at the same time,
5455 * utilising both CPUs.
5457 * This means, that when looking globally, the current IO-wait accounting on
5458 * SMP is a lower bound, by reason of under accounting.
5460 * Worse, since the numbers are provided per CPU, they are sometimes
5461 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5462 * associated with any one particular CPU, it can wake to another CPU than it
5463 * blocked on. This means the per CPU IO-wait number is meaningless.
5465 * Task CPU affinities can make all that even more 'interesting'.
5468 unsigned int nr_iowait(void)
5470 unsigned int i
, sum
= 0;
5472 for_each_possible_cpu(i
)
5473 sum
+= nr_iowait_cpu(i
);
5481 * sched_exec - execve() is a valuable balancing opportunity, because at
5482 * this point the task has the smallest effective memory and cache footprint.
5484 void sched_exec(void)
5486 struct task_struct
*p
= current
;
5487 struct migration_arg arg
;
5490 scoped_guard (raw_spinlock_irqsave
, &p
->pi_lock
) {
5491 dest_cpu
= p
->sched_class
->select_task_rq(p
, task_cpu(p
), WF_EXEC
);
5492 if (dest_cpu
== smp_processor_id())
5495 if (unlikely(!cpu_active(dest_cpu
)))
5498 arg
= (struct migration_arg
){ p
, dest_cpu
};
5500 stop_one_cpu(task_cpu(p
), migration_cpu_stop
, &arg
);
5505 DEFINE_PER_CPU(struct kernel_stat
, kstat
);
5506 DEFINE_PER_CPU(struct kernel_cpustat
, kernel_cpustat
);
5508 EXPORT_PER_CPU_SYMBOL(kstat
);
5509 EXPORT_PER_CPU_SYMBOL(kernel_cpustat
);
5512 * The function fair_sched_class.update_curr accesses the struct curr
5513 * and its field curr->exec_start; when called from task_sched_runtime(),
5514 * we observe a high rate of cache misses in practice.
5515 * Prefetching this data results in improved performance.
5517 static inline void prefetch_curr_exec_start(struct task_struct
*p
)
5519 #ifdef CONFIG_FAIR_GROUP_SCHED
5520 struct sched_entity
*curr
= p
->se
.cfs_rq
->curr
;
5522 struct sched_entity
*curr
= task_rq(p
)->cfs
.curr
;
5525 prefetch(&curr
->exec_start
);
5529 * Return accounted runtime for the task.
5530 * In case the task is currently running, return the runtime plus current's
5531 * pending runtime that have not been accounted yet.
5533 unsigned long long task_sched_runtime(struct task_struct
*p
)
5539 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5541 * 64-bit doesn't need locks to atomically read a 64-bit value.
5542 * So we have a optimization chance when the task's delta_exec is 0.
5543 * Reading ->on_cpu is racy, but this is OK.
5545 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5546 * If we race with it entering CPU, unaccounted time is 0. This is
5547 * indistinguishable from the read occurring a few cycles earlier.
5548 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5549 * been accounted, so we're correct here as well.
5551 if (!p
->on_cpu
|| !task_on_rq_queued(p
))
5552 return p
->se
.sum_exec_runtime
;
5555 rq
= task_rq_lock(p
, &rf
);
5557 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5558 * project cycles that may never be accounted to this
5559 * thread, breaking clock_gettime().
5561 if (task_current_donor(rq
, p
) && task_on_rq_queued(p
)) {
5562 prefetch_curr_exec_start(p
);
5563 update_rq_clock(rq
);
5564 p
->sched_class
->update_curr(rq
);
5566 ns
= p
->se
.sum_exec_runtime
;
5567 task_rq_unlock(rq
, p
, &rf
);
5572 #ifdef CONFIG_SCHED_DEBUG
5573 static u64
cpu_resched_latency(struct rq
*rq
)
5575 int latency_warn_ms
= READ_ONCE(sysctl_resched_latency_warn_ms
);
5576 u64 resched_latency
, now
= rq_clock(rq
);
5577 static bool warned_once
;
5579 if (sysctl_resched_latency_warn_once
&& warned_once
)
5582 if (!need_resched() || !latency_warn_ms
)
5585 if (system_state
== SYSTEM_BOOTING
)
5588 if (!rq
->last_seen_need_resched_ns
) {
5589 rq
->last_seen_need_resched_ns
= now
;
5590 rq
->ticks_without_resched
= 0;
5594 rq
->ticks_without_resched
++;
5595 resched_latency
= now
- rq
->last_seen_need_resched_ns
;
5596 if (resched_latency
<= latency_warn_ms
* NSEC_PER_MSEC
)
5601 return resched_latency
;
5604 static int __init
setup_resched_latency_warn_ms(char *str
)
5608 if ((kstrtol(str
, 0, &val
))) {
5609 pr_warn("Unable to set resched_latency_warn_ms\n");
5613 sysctl_resched_latency_warn_ms
= val
;
5616 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms
);
5618 static inline u64
cpu_resched_latency(struct rq
*rq
) { return 0; }
5619 #endif /* CONFIG_SCHED_DEBUG */
5622 * This function gets called by the timer code, with HZ frequency.
5623 * We call it with interrupts disabled.
5625 void sched_tick(void)
5627 int cpu
= smp_processor_id();
5628 struct rq
*rq
= cpu_rq(cpu
);
5629 /* accounting goes to the donor task */
5630 struct task_struct
*donor
;
5632 unsigned long hw_pressure
;
5633 u64 resched_latency
;
5635 if (housekeeping_cpu(cpu
, HK_TYPE_TICK
))
5636 arch_scale_freq_tick();
5643 psi_account_irqtime(rq
, donor
, NULL
);
5645 update_rq_clock(rq
);
5646 hw_pressure
= arch_scale_hw_pressure(cpu_of(rq
));
5647 update_hw_load_avg(rq_clock_task(rq
), rq
, hw_pressure
);
5649 if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY
))
5652 donor
->sched_class
->task_tick(rq
, donor
, 0);
5653 if (sched_feat(LATENCY_WARN
))
5654 resched_latency
= cpu_resched_latency(rq
);
5655 calc_global_load_tick(rq
);
5656 sched_core_tick(rq
);
5657 task_tick_mm_cid(rq
, donor
);
5662 if (sched_feat(LATENCY_WARN
) && resched_latency
)
5663 resched_latency_warn(cpu
, resched_latency
);
5665 perf_event_task_tick();
5667 if (donor
->flags
& PF_WQ_WORKER
)
5668 wq_worker_tick(donor
);
5671 if (!scx_switched_all()) {
5672 rq
->idle_balance
= idle_cpu(cpu
);
5673 sched_balance_trigger(rq
);
5678 #ifdef CONFIG_NO_HZ_FULL
5683 struct delayed_work work
;
5685 /* Values for ->state, see diagram below. */
5686 #define TICK_SCHED_REMOTE_OFFLINE 0
5687 #define TICK_SCHED_REMOTE_OFFLINING 1
5688 #define TICK_SCHED_REMOTE_RUNNING 2
5691 * State diagram for ->state:
5694 * TICK_SCHED_REMOTE_OFFLINE
5697 * | | sched_tick_remote()
5700 * +--TICK_SCHED_REMOTE_OFFLINING
5703 * sched_tick_start() | | sched_tick_stop()
5706 * TICK_SCHED_REMOTE_RUNNING
5709 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5710 * and sched_tick_start() are happy to leave the state in RUNNING.
5713 static struct tick_work __percpu
*tick_work_cpu
;
5715 static void sched_tick_remote(struct work_struct
*work
)
5717 struct delayed_work
*dwork
= to_delayed_work(work
);
5718 struct tick_work
*twork
= container_of(dwork
, struct tick_work
, work
);
5719 int cpu
= twork
->cpu
;
5720 struct rq
*rq
= cpu_rq(cpu
);
5724 * Handle the tick only if it appears the remote CPU is running in full
5725 * dynticks mode. The check is racy by nature, but missing a tick or
5726 * having one too much is no big deal because the scheduler tick updates
5727 * statistics and checks timeslices in a time-independent way, regardless
5728 * of when exactly it is running.
5730 if (tick_nohz_tick_stopped_cpu(cpu
)) {
5731 guard(rq_lock_irq
)(rq
);
5732 struct task_struct
*curr
= rq
->curr
;
5734 if (cpu_online(cpu
)) {
5736 * Since this is a remote tick for full dynticks mode,
5737 * we are always sure that there is no proxy (only a
5738 * single task is running).
5740 SCHED_WARN_ON(rq
->curr
!= rq
->donor
);
5741 update_rq_clock(rq
);
5743 if (!is_idle_task(curr
)) {
5745 * Make sure the next tick runs within a
5746 * reasonable amount of time.
5748 u64 delta
= rq_clock_task(rq
) - curr
->se
.exec_start
;
5749 WARN_ON_ONCE(delta
> (u64
)NSEC_PER_SEC
* 3);
5751 curr
->sched_class
->task_tick(rq
, curr
, 0);
5753 calc_load_nohz_remote(rq
);
5758 * Run the remote tick once per second (1Hz). This arbitrary
5759 * frequency is large enough to avoid overload but short enough
5760 * to keep scheduler internal stats reasonably up to date. But
5761 * first update state to reflect hotplug activity if required.
5763 os
= atomic_fetch_add_unless(&twork
->state
, -1, TICK_SCHED_REMOTE_RUNNING
);
5764 WARN_ON_ONCE(os
== TICK_SCHED_REMOTE_OFFLINE
);
5765 if (os
== TICK_SCHED_REMOTE_RUNNING
)
5766 queue_delayed_work(system_unbound_wq
, dwork
, HZ
);
5769 static void sched_tick_start(int cpu
)
5772 struct tick_work
*twork
;
5774 if (housekeeping_cpu(cpu
, HK_TYPE_TICK
))
5777 WARN_ON_ONCE(!tick_work_cpu
);
5779 twork
= per_cpu_ptr(tick_work_cpu
, cpu
);
5780 os
= atomic_xchg(&twork
->state
, TICK_SCHED_REMOTE_RUNNING
);
5781 WARN_ON_ONCE(os
== TICK_SCHED_REMOTE_RUNNING
);
5782 if (os
== TICK_SCHED_REMOTE_OFFLINE
) {
5784 INIT_DELAYED_WORK(&twork
->work
, sched_tick_remote
);
5785 queue_delayed_work(system_unbound_wq
, &twork
->work
, HZ
);
5789 #ifdef CONFIG_HOTPLUG_CPU
5790 static void sched_tick_stop(int cpu
)
5792 struct tick_work
*twork
;
5795 if (housekeeping_cpu(cpu
, HK_TYPE_TICK
))
5798 WARN_ON_ONCE(!tick_work_cpu
);
5800 twork
= per_cpu_ptr(tick_work_cpu
, cpu
);
5801 /* There cannot be competing actions, but don't rely on stop-machine. */
5802 os
= atomic_xchg(&twork
->state
, TICK_SCHED_REMOTE_OFFLINING
);
5803 WARN_ON_ONCE(os
!= TICK_SCHED_REMOTE_RUNNING
);
5804 /* Don't cancel, as this would mess up the state machine. */
5806 #endif /* CONFIG_HOTPLUG_CPU */
5808 int __init
sched_tick_offload_init(void)
5810 tick_work_cpu
= alloc_percpu(struct tick_work
);
5811 BUG_ON(!tick_work_cpu
);
5815 #else /* !CONFIG_NO_HZ_FULL */
5816 static inline void sched_tick_start(int cpu
) { }
5817 static inline void sched_tick_stop(int cpu
) { }
5820 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5821 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5823 * If the value passed in is equal to the current preempt count
5824 * then we just disabled preemption. Start timing the latency.
5826 static inline void preempt_latency_start(int val
)
5828 if (preempt_count() == val
) {
5829 unsigned long ip
= get_lock_parent_ip();
5830 #ifdef CONFIG_DEBUG_PREEMPT
5831 current
->preempt_disable_ip
= ip
;
5833 trace_preempt_off(CALLER_ADDR0
, ip
);
5837 void preempt_count_add(int val
)
5839 #ifdef CONFIG_DEBUG_PREEMPT
5843 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5846 __preempt_count_add(val
);
5847 #ifdef CONFIG_DEBUG_PREEMPT
5849 * Spinlock count overflowing soon?
5851 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK
) >=
5854 preempt_latency_start(val
);
5856 EXPORT_SYMBOL(preempt_count_add
);
5857 NOKPROBE_SYMBOL(preempt_count_add
);
5860 * If the value passed in equals to the current preempt count
5861 * then we just enabled preemption. Stop timing the latency.
5863 static inline void preempt_latency_stop(int val
)
5865 if (preempt_count() == val
)
5866 trace_preempt_on(CALLER_ADDR0
, get_lock_parent_ip());
5869 void preempt_count_sub(int val
)
5871 #ifdef CONFIG_DEBUG_PREEMPT
5875 if (DEBUG_LOCKS_WARN_ON(val
> preempt_count()))
5878 * Is the spinlock portion underflowing?
5880 if (DEBUG_LOCKS_WARN_ON((val
< PREEMPT_MASK
) &&
5881 !(preempt_count() & PREEMPT_MASK
)))
5885 preempt_latency_stop(val
);
5886 __preempt_count_sub(val
);
5888 EXPORT_SYMBOL(preempt_count_sub
);
5889 NOKPROBE_SYMBOL(preempt_count_sub
);
5892 static inline void preempt_latency_start(int val
) { }
5893 static inline void preempt_latency_stop(int val
) { }
5896 static inline unsigned long get_preempt_disable_ip(struct task_struct
*p
)
5898 #ifdef CONFIG_DEBUG_PREEMPT
5899 return p
->preempt_disable_ip
;
5906 * Print scheduling while atomic bug:
5908 static noinline
void __schedule_bug(struct task_struct
*prev
)
5910 /* Save this before calling printk(), since that will clobber it */
5911 unsigned long preempt_disable_ip
= get_preempt_disable_ip(current
);
5913 if (oops_in_progress
)
5916 printk(KERN_ERR
"BUG: scheduling while atomic: %s/%d/0x%08x\n",
5917 prev
->comm
, prev
->pid
, preempt_count());
5919 debug_show_held_locks(prev
);
5921 if (irqs_disabled())
5922 print_irqtrace_events(prev
);
5923 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT
)) {
5924 pr_err("Preemption disabled at:");
5925 print_ip_sym(KERN_ERR
, preempt_disable_ip
);
5927 check_panic_on_warn("scheduling while atomic");
5930 add_taint(TAINT_WARN
, LOCKDEP_STILL_OK
);
5934 * Various schedule()-time debugging checks and statistics:
5936 static inline void schedule_debug(struct task_struct
*prev
, bool preempt
)
5938 #ifdef CONFIG_SCHED_STACK_END_CHECK
5939 if (task_stack_end_corrupted(prev
))
5940 panic("corrupted stack end detected inside scheduler\n");
5942 if (task_scs_end_corrupted(prev
))
5943 panic("corrupted shadow stack detected inside scheduler\n");
5946 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5947 if (!preempt
&& READ_ONCE(prev
->__state
) && prev
->non_block_count
) {
5948 printk(KERN_ERR
"BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5949 prev
->comm
, prev
->pid
, prev
->non_block_count
);
5951 add_taint(TAINT_WARN
, LOCKDEP_STILL_OK
);
5955 if (unlikely(in_atomic_preempt_off())) {
5956 __schedule_bug(prev
);
5957 preempt_count_set(PREEMPT_DISABLED
);
5960 SCHED_WARN_ON(ct_state() == CT_STATE_USER
);
5962 profile_hit(SCHED_PROFILING
, __builtin_return_address(0));
5964 schedstat_inc(this_rq()->sched_count
);
5967 static void prev_balance(struct rq
*rq
, struct task_struct
*prev
,
5968 struct rq_flags
*rf
)
5970 const struct sched_class
*start_class
= prev
->sched_class
;
5971 const struct sched_class
*class;
5973 #ifdef CONFIG_SCHED_CLASS_EXT
5975 * SCX requires a balance() call before every pick_task() including when
5976 * waking up from SCHED_IDLE. If @start_class is below SCX, start from
5977 * SCX instead. Also, set a flag to detect missing balance() call.
5979 if (scx_enabled()) {
5980 rq
->scx
.flags
|= SCX_RQ_BAL_PENDING
;
5981 if (sched_class_above(&ext_sched_class
, start_class
))
5982 start_class
= &ext_sched_class
;
5987 * We must do the balancing pass before put_prev_task(), such
5988 * that when we release the rq->lock the task is in the same
5989 * state as before we took rq->lock.
5991 * We can terminate the balance pass as soon as we know there is
5992 * a runnable task of @class priority or higher.
5994 for_active_class_range(class, start_class
, &idle_sched_class
) {
5995 if (class->balance
&& class->balance(rq
, prev
, rf
))
6001 * Pick up the highest-prio task:
6003 static inline struct task_struct
*
6004 __pick_next_task(struct rq
*rq
, struct task_struct
*prev
, struct rq_flags
*rf
)
6006 const struct sched_class
*class;
6007 struct task_struct
*p
;
6009 rq
->dl_server
= NULL
;
6015 * Optimization: we know that if all tasks are in the fair class we can
6016 * call that function directly, but only if the @prev task wasn't of a
6017 * higher scheduling class, because otherwise those lose the
6018 * opportunity to pull in more work from other CPUs.
6020 if (likely(!sched_class_above(prev
->sched_class
, &fair_sched_class
) &&
6021 rq
->nr_running
== rq
->cfs
.h_nr_running
)) {
6023 p
= pick_next_task_fair(rq
, prev
, rf
);
6024 if (unlikely(p
== RETRY_TASK
))
6027 /* Assume the next prioritized class is idle_sched_class */
6029 p
= pick_task_idle(rq
);
6030 put_prev_set_next_task(rq
, prev
, p
);
6037 prev_balance(rq
, prev
, rf
);
6039 for_each_active_class(class) {
6040 if (class->pick_next_task
) {
6041 p
= class->pick_next_task(rq
, prev
);
6045 p
= class->pick_task(rq
);
6047 put_prev_set_next_task(rq
, prev
, p
);
6053 BUG(); /* The idle class should always have a runnable task. */
6056 #ifdef CONFIG_SCHED_CORE
6057 static inline bool is_task_rq_idle(struct task_struct
*t
)
6059 return (task_rq(t
)->idle
== t
);
6062 static inline bool cookie_equals(struct task_struct
*a
, unsigned long cookie
)
6064 return is_task_rq_idle(a
) || (a
->core_cookie
== cookie
);
6067 static inline bool cookie_match(struct task_struct
*a
, struct task_struct
*b
)
6069 if (is_task_rq_idle(a
) || is_task_rq_idle(b
))
6072 return a
->core_cookie
== b
->core_cookie
;
6075 static inline struct task_struct
*pick_task(struct rq
*rq
)
6077 const struct sched_class
*class;
6078 struct task_struct
*p
;
6080 rq
->dl_server
= NULL
;
6082 for_each_active_class(class) {
6083 p
= class->pick_task(rq
);
6088 BUG(); /* The idle class should always have a runnable task. */
6091 extern void task_vruntime_update(struct rq
*rq
, struct task_struct
*p
, bool in_fi
);
6093 static void queue_core_balance(struct rq
*rq
);
6095 static struct task_struct
*
6096 pick_next_task(struct rq
*rq
, struct task_struct
*prev
, struct rq_flags
*rf
)
6098 struct task_struct
*next
, *p
, *max
= NULL
;
6099 const struct cpumask
*smt_mask
;
6100 bool fi_before
= false;
6101 bool core_clock_updated
= (rq
== rq
->core
);
6102 unsigned long cookie
;
6103 int i
, cpu
, occ
= 0;
6107 if (!sched_core_enabled(rq
))
6108 return __pick_next_task(rq
, prev
, rf
);
6112 /* Stopper task is switching into idle, no need core-wide selection. */
6113 if (cpu_is_offline(cpu
)) {
6115 * Reset core_pick so that we don't enter the fastpath when
6116 * coming online. core_pick would already be migrated to
6117 * another cpu during offline.
6119 rq
->core_pick
= NULL
;
6120 rq
->core_dl_server
= NULL
;
6121 return __pick_next_task(rq
, prev
, rf
);
6125 * If there were no {en,de}queues since we picked (IOW, the task
6126 * pointers are all still valid), and we haven't scheduled the last
6127 * pick yet, do so now.
6129 * rq->core_pick can be NULL if no selection was made for a CPU because
6130 * it was either offline or went offline during a sibling's core-wide
6131 * selection. In this case, do a core-wide selection.
6133 if (rq
->core
->core_pick_seq
== rq
->core
->core_task_seq
&&
6134 rq
->core
->core_pick_seq
!= rq
->core_sched_seq
&&
6136 WRITE_ONCE(rq
->core_sched_seq
, rq
->core
->core_pick_seq
);
6138 next
= rq
->core_pick
;
6139 rq
->dl_server
= rq
->core_dl_server
;
6140 rq
->core_pick
= NULL
;
6141 rq
->core_dl_server
= NULL
;
6145 prev_balance(rq
, prev
, rf
);
6147 smt_mask
= cpu_smt_mask(cpu
);
6148 need_sync
= !!rq
->core
->core_cookie
;
6151 rq
->core
->core_cookie
= 0UL;
6152 if (rq
->core
->core_forceidle_count
) {
6153 if (!core_clock_updated
) {
6154 update_rq_clock(rq
->core
);
6155 core_clock_updated
= true;
6157 sched_core_account_forceidle(rq
);
6158 /* reset after accounting force idle */
6159 rq
->core
->core_forceidle_start
= 0;
6160 rq
->core
->core_forceidle_count
= 0;
6161 rq
->core
->core_forceidle_occupation
= 0;
6167 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6169 * @task_seq guards the task state ({en,de}queues)
6170 * @pick_seq is the @task_seq we did a selection on
6171 * @sched_seq is the @pick_seq we scheduled
6173 * However, preemptions can cause multiple picks on the same task set.
6174 * 'Fix' this by also increasing @task_seq for every pick.
6176 rq
->core
->core_task_seq
++;
6179 * Optimize for common case where this CPU has no cookies
6180 * and there are no cookied tasks running on siblings.
6183 next
= pick_task(rq
);
6184 if (!next
->core_cookie
) {
6185 rq
->core_pick
= NULL
;
6186 rq
->core_dl_server
= NULL
;
6188 * For robustness, update the min_vruntime_fi for
6189 * unconstrained picks as well.
6191 WARN_ON_ONCE(fi_before
);
6192 task_vruntime_update(rq
, next
, false);
6198 * For each thread: do the regular task pick and find the max prio task
6201 * Tie-break prio towards the current CPU
6203 for_each_cpu_wrap(i
, smt_mask
, cpu
) {
6207 * Current cpu always has its clock updated on entrance to
6208 * pick_next_task(). If the current cpu is not the core,
6209 * the core may also have been updated above.
6211 if (i
!= cpu
&& (rq_i
!= rq
->core
|| !core_clock_updated
))
6212 update_rq_clock(rq_i
);
6214 rq_i
->core_pick
= p
= pick_task(rq_i
);
6215 rq_i
->core_dl_server
= rq_i
->dl_server
;
6217 if (!max
|| prio_less(max
, p
, fi_before
))
6221 cookie
= rq
->core
->core_cookie
= max
->core_cookie
;
6224 * For each thread: try and find a runnable task that matches @max or
6227 for_each_cpu(i
, smt_mask
) {
6229 p
= rq_i
->core_pick
;
6231 if (!cookie_equals(p
, cookie
)) {
6234 p
= sched_core_find(rq_i
, cookie
);
6236 p
= idle_sched_class
.pick_task(rq_i
);
6239 rq_i
->core_pick
= p
;
6240 rq_i
->core_dl_server
= NULL
;
6242 if (p
== rq_i
->idle
) {
6243 if (rq_i
->nr_running
) {
6244 rq
->core
->core_forceidle_count
++;
6246 rq
->core
->core_forceidle_seq
++;
6253 if (schedstat_enabled() && rq
->core
->core_forceidle_count
) {
6254 rq
->core
->core_forceidle_start
= rq_clock(rq
->core
);
6255 rq
->core
->core_forceidle_occupation
= occ
;
6258 rq
->core
->core_pick_seq
= rq
->core
->core_task_seq
;
6259 next
= rq
->core_pick
;
6260 rq
->core_sched_seq
= rq
->core
->core_pick_seq
;
6262 /* Something should have been selected for current CPU */
6263 WARN_ON_ONCE(!next
);
6266 * Reschedule siblings
6268 * NOTE: L1TF -- at this point we're no longer running the old task and
6269 * sending an IPI (below) ensures the sibling will no longer be running
6270 * their task. This ensures there is no inter-sibling overlap between
6271 * non-matching user state.
6273 for_each_cpu(i
, smt_mask
) {
6277 * An online sibling might have gone offline before a task
6278 * could be picked for it, or it might be offline but later
6279 * happen to come online, but its too late and nothing was
6280 * picked for it. That's Ok - it will pick tasks for itself,
6283 if (!rq_i
->core_pick
)
6287 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6288 * fi_before fi update?
6294 if (!(fi_before
&& rq
->core
->core_forceidle_count
))
6295 task_vruntime_update(rq_i
, rq_i
->core_pick
, !!rq
->core
->core_forceidle_count
);
6297 rq_i
->core_pick
->core_occupation
= occ
;
6300 rq_i
->core_pick
= NULL
;
6301 rq_i
->core_dl_server
= NULL
;
6305 /* Did we break L1TF mitigation requirements? */
6306 WARN_ON_ONCE(!cookie_match(next
, rq_i
->core_pick
));
6308 if (rq_i
->curr
== rq_i
->core_pick
) {
6309 rq_i
->core_pick
= NULL
;
6310 rq_i
->core_dl_server
= NULL
;
6318 put_prev_set_next_task(rq
, prev
, next
);
6319 if (rq
->core
->core_forceidle_count
&& next
== rq
->idle
)
6320 queue_core_balance(rq
);
6325 static bool try_steal_cookie(int this, int that
)
6327 struct rq
*dst
= cpu_rq(this), *src
= cpu_rq(that
);
6328 struct task_struct
*p
;
6329 unsigned long cookie
;
6330 bool success
= false;
6333 guard(double_rq_lock
)(dst
, src
);
6335 cookie
= dst
->core
->core_cookie
;
6339 if (dst
->curr
!= dst
->idle
)
6342 p
= sched_core_find(src
, cookie
);
6347 if (p
== src
->core_pick
|| p
== src
->curr
)
6350 if (!is_cpu_allowed(p
, this))
6353 if (p
->core_occupation
> dst
->idle
->core_occupation
)
6356 * sched_core_find() and sched_core_next() will ensure
6357 * that task @p is not throttled now, we also need to
6358 * check whether the runqueue of the destination CPU is
6361 if (sched_task_is_throttled(p
, this))
6364 move_queued_task_locked(src
, dst
, p
);
6371 p
= sched_core_next(p
, cookie
);
6377 static bool steal_cookie_task(int cpu
, struct sched_domain
*sd
)
6381 for_each_cpu_wrap(i
, sched_domain_span(sd
), cpu
+ 1) {
6388 if (try_steal_cookie(cpu
, i
))
6395 static void sched_core_balance(struct rq
*rq
)
6397 struct sched_domain
*sd
;
6398 int cpu
= cpu_of(rq
);
6403 raw_spin_rq_unlock_irq(rq
);
6404 for_each_domain(cpu
, sd
) {
6408 if (steal_cookie_task(cpu
, sd
))
6411 raw_spin_rq_lock_irq(rq
);
6414 static DEFINE_PER_CPU(struct balance_callback
, core_balance_head
);
6416 static void queue_core_balance(struct rq
*rq
)
6418 if (!sched_core_enabled(rq
))
6421 if (!rq
->core
->core_cookie
)
6424 if (!rq
->nr_running
) /* not forced idle */
6427 queue_balance_callback(rq
, &per_cpu(core_balance_head
, rq
->cpu
), sched_core_balance
);
6430 DEFINE_LOCK_GUARD_1(core_lock
, int,
6431 sched_core_lock(*_T
->lock
, &_T
->flags
),
6432 sched_core_unlock(*_T
->lock
, &_T
->flags
),
6433 unsigned long flags
)
6435 static void sched_core_cpu_starting(unsigned int cpu
)
6437 const struct cpumask
*smt_mask
= cpu_smt_mask(cpu
);
6438 struct rq
*rq
= cpu_rq(cpu
), *core_rq
= NULL
;
6441 guard(core_lock
)(&cpu
);
6443 WARN_ON_ONCE(rq
->core
!= rq
);
6445 /* if we're the first, we'll be our own leader */
6446 if (cpumask_weight(smt_mask
) == 1)
6449 /* find the leader */
6450 for_each_cpu(t
, smt_mask
) {
6454 if (rq
->core
== rq
) {
6460 if (WARN_ON_ONCE(!core_rq
)) /* whoopsie */
6463 /* install and validate core_rq */
6464 for_each_cpu(t
, smt_mask
) {
6470 WARN_ON_ONCE(rq
->core
!= core_rq
);
6474 static void sched_core_cpu_deactivate(unsigned int cpu
)
6476 const struct cpumask
*smt_mask
= cpu_smt_mask(cpu
);
6477 struct rq
*rq
= cpu_rq(cpu
), *core_rq
= NULL
;
6480 guard(core_lock
)(&cpu
);
6482 /* if we're the last man standing, nothing to do */
6483 if (cpumask_weight(smt_mask
) == 1) {
6484 WARN_ON_ONCE(rq
->core
!= rq
);
6488 /* if we're not the leader, nothing to do */
6492 /* find a new leader */
6493 for_each_cpu(t
, smt_mask
) {
6496 core_rq
= cpu_rq(t
);
6500 if (WARN_ON_ONCE(!core_rq
)) /* impossible */
6503 /* copy the shared state to the new leader */
6504 core_rq
->core_task_seq
= rq
->core_task_seq
;
6505 core_rq
->core_pick_seq
= rq
->core_pick_seq
;
6506 core_rq
->core_cookie
= rq
->core_cookie
;
6507 core_rq
->core_forceidle_count
= rq
->core_forceidle_count
;
6508 core_rq
->core_forceidle_seq
= rq
->core_forceidle_seq
;
6509 core_rq
->core_forceidle_occupation
= rq
->core_forceidle_occupation
;
6512 * Accounting edge for forced idle is handled in pick_next_task().
6513 * Don't need another one here, since the hotplug thread shouldn't
6516 core_rq
->core_forceidle_start
= 0;
6518 /* install new leader */
6519 for_each_cpu(t
, smt_mask
) {
6525 static inline void sched_core_cpu_dying(unsigned int cpu
)
6527 struct rq
*rq
= cpu_rq(cpu
);
6533 #else /* !CONFIG_SCHED_CORE */
6535 static inline void sched_core_cpu_starting(unsigned int cpu
) {}
6536 static inline void sched_core_cpu_deactivate(unsigned int cpu
) {}
6537 static inline void sched_core_cpu_dying(unsigned int cpu
) {}
6539 static struct task_struct
*
6540 pick_next_task(struct rq
*rq
, struct task_struct
*prev
, struct rq_flags
*rf
)
6542 return __pick_next_task(rq
, prev
, rf
);
6545 #endif /* CONFIG_SCHED_CORE */
6548 * Constants for the sched_mode argument of __schedule().
6550 * The mode argument allows RT enabled kernels to differentiate a
6551 * preemption from blocking on an 'sleeping' spin/rwlock.
6553 #define SM_IDLE (-1)
6555 #define SM_PREEMPT 1
6556 #define SM_RTLOCK_WAIT 2
6559 * Helper function for __schedule()
6561 * If a task does not have signals pending, deactivate it
6562 * Otherwise marks the task's __state as RUNNING
6564 static bool try_to_block_task(struct rq
*rq
, struct task_struct
*p
,
6565 unsigned long task_state
)
6567 int flags
= DEQUEUE_NOCLOCK
;
6569 if (signal_pending_state(task_state
, p
)) {
6570 WRITE_ONCE(p
->__state
, TASK_RUNNING
);
6574 p
->sched_contributes_to_load
=
6575 (task_state
& TASK_UNINTERRUPTIBLE
) &&
6576 !(task_state
& TASK_NOLOAD
) &&
6577 !(task_state
& TASK_FROZEN
);
6579 if (unlikely(is_special_task_state(task_state
)))
6580 flags
|= DEQUEUE_SPECIAL
;
6583 * __schedule() ttwu()
6584 * prev_state = prev->state; if (p->on_rq && ...)
6585 * if (prev_state) goto out;
6586 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6587 * p->state = TASK_WAKING
6589 * Where __schedule() and ttwu() have matching control dependencies.
6591 * After this, schedule() must not care about p->state any more.
6593 block_task(rq
, p
, flags
);
6598 * __schedule() is the main scheduler function.
6600 * The main means of driving the scheduler and thus entering this function are:
6602 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6604 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6605 * paths. For example, see arch/x86/entry_64.S.
6607 * To drive preemption between tasks, the scheduler sets the flag in timer
6608 * interrupt handler sched_tick().
6610 * 3. Wakeups don't really cause entry into schedule(). They add a
6611 * task to the run-queue and that's it.
6613 * Now, if the new task added to the run-queue preempts the current
6614 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6615 * called on the nearest possible occasion:
6617 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6619 * - in syscall or exception context, at the next outmost
6620 * preempt_enable(). (this might be as soon as the wake_up()'s
6623 * - in IRQ context, return from interrupt-handler to
6624 * preemptible context
6626 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6629 * - cond_resched() call
6630 * - explicit schedule() call
6631 * - return from syscall or exception to user-space
6632 * - return from interrupt-handler to user-space
6634 * WARNING: must be called with preemption disabled!
6636 static void __sched notrace
__schedule(int sched_mode
)
6638 struct task_struct
*prev
, *next
;
6640 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6641 * as a preemption by schedule_debug() and RCU.
6643 bool preempt
= sched_mode
> SM_NONE
;
6645 unsigned long *switch_count
;
6646 unsigned long prev_state
;
6651 cpu
= smp_processor_id();
6655 schedule_debug(prev
, preempt
);
6657 if (sched_feat(HRTICK
) || sched_feat(HRTICK_DL
))
6660 local_irq_disable();
6661 rcu_note_context_switch(preempt
);
6664 * Make sure that signal_pending_state()->signal_pending() below
6665 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6666 * done by the caller to avoid the race with signal_wake_up():
6668 * __set_current_state(@state) signal_wake_up()
6669 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6670 * wake_up_state(p, state)
6671 * LOCK rq->lock LOCK p->pi_state
6672 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6673 * if (signal_pending_state()) if (p->state & @state)
6675 * Also, the membarrier system call requires a full memory barrier
6676 * after coming from user-space, before storing to rq->curr; this
6677 * barrier matches a full barrier in the proximity of the membarrier
6681 smp_mb__after_spinlock();
6683 /* Promote REQ to ACT */
6684 rq
->clock_update_flags
<<= 1;
6685 update_rq_clock(rq
);
6686 rq
->clock_update_flags
= RQCF_UPDATED
;
6688 switch_count
= &prev
->nivcsw
;
6690 /* Task state changes only considers SM_PREEMPT as preemption */
6691 preempt
= sched_mode
== SM_PREEMPT
;
6694 * We must load prev->state once (task_struct::state is volatile), such
6695 * that we form a control dependency vs deactivate_task() below.
6697 prev_state
= READ_ONCE(prev
->__state
);
6698 if (sched_mode
== SM_IDLE
) {
6699 /* SCX must consult the BPF scheduler to tell if rq is empty */
6700 if (!rq
->nr_running
&& !scx_enabled()) {
6704 } else if (!preempt
&& prev_state
) {
6705 block
= try_to_block_task(rq
, prev
, prev_state
);
6706 switch_count
= &prev
->nvcsw
;
6709 next
= pick_next_task(rq
, prev
, &rf
);
6710 rq_set_donor(rq
, next
);
6712 clear_tsk_need_resched(prev
);
6713 clear_preempt_need_resched();
6714 #ifdef CONFIG_SCHED_DEBUG
6715 rq
->last_seen_need_resched_ns
= 0;
6718 if (likely(prev
!= next
)) {
6721 * RCU users of rcu_dereference(rq->curr) may not see
6722 * changes to task_struct made by pick_next_task().
6724 RCU_INIT_POINTER(rq
->curr
, next
);
6726 * The membarrier system call requires each architecture
6727 * to have a full memory barrier after updating
6728 * rq->curr, before returning to user-space.
6730 * Here are the schemes providing that barrier on the
6731 * various architectures:
6732 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6733 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6734 * on PowerPC and on RISC-V.
6735 * - finish_lock_switch() for weakly-ordered
6736 * architectures where spin_unlock is a full barrier,
6737 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6738 * is a RELEASE barrier),
6740 * The barrier matches a full barrier in the proximity of
6741 * the membarrier system call entry.
6743 * On RISC-V, this barrier pairing is also needed for the
6744 * SYNC_CORE command when switching between processes, cf.
6745 * the inline comments in membarrier_arch_switch_mm().
6749 migrate_disable_switch(rq
, prev
);
6750 psi_account_irqtime(rq
, prev
, next
);
6751 psi_sched_switch(prev
, next
, block
);
6753 trace_sched_switch(preempt
, prev
, next
, prev_state
);
6755 /* Also unlocks the rq: */
6756 rq
= context_switch(rq
, prev
, next
, &rf
);
6758 rq_unpin_lock(rq
, &rf
);
6759 __balance_callbacks(rq
);
6760 raw_spin_rq_unlock_irq(rq
);
6764 void __noreturn
do_task_dead(void)
6766 /* Causes final put_task_struct in finish_task_switch(): */
6767 set_special_state(TASK_DEAD
);
6769 /* Tell freezer to ignore us: */
6770 current
->flags
|= PF_NOFREEZE
;
6772 __schedule(SM_NONE
);
6775 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6780 static inline void sched_submit_work(struct task_struct
*tsk
)
6782 static DEFINE_WAIT_OVERRIDE_MAP(sched_map
, LD_WAIT_CONFIG
);
6783 unsigned int task_flags
;
6786 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6787 * will use a blocking primitive -- which would lead to recursion.
6789 lock_map_acquire_try(&sched_map
);
6791 task_flags
= tsk
->flags
;
6793 * If a worker goes to sleep, notify and ask workqueue whether it
6794 * wants to wake up a task to maintain concurrency.
6796 if (task_flags
& PF_WQ_WORKER
)
6797 wq_worker_sleeping(tsk
);
6798 else if (task_flags
& PF_IO_WORKER
)
6799 io_wq_worker_sleeping(tsk
);
6802 * spinlock and rwlock must not flush block requests. This will
6803 * deadlock if the callback attempts to acquire a lock which is
6806 SCHED_WARN_ON(current
->__state
& TASK_RTLOCK_WAIT
);
6809 * If we are going to sleep and we have plugged IO queued,
6810 * make sure to submit it to avoid deadlocks.
6812 blk_flush_plug(tsk
->plug
, true);
6814 lock_map_release(&sched_map
);
6817 static void sched_update_worker(struct task_struct
*tsk
)
6819 if (tsk
->flags
& (PF_WQ_WORKER
| PF_IO_WORKER
| PF_BLOCK_TS
)) {
6820 if (tsk
->flags
& PF_BLOCK_TS
)
6821 blk_plug_invalidate_ts(tsk
);
6822 if (tsk
->flags
& PF_WQ_WORKER
)
6823 wq_worker_running(tsk
);
6824 else if (tsk
->flags
& PF_IO_WORKER
)
6825 io_wq_worker_running(tsk
);
6829 static __always_inline
void __schedule_loop(int sched_mode
)
6833 __schedule(sched_mode
);
6834 sched_preempt_enable_no_resched();
6835 } while (need_resched());
6838 asmlinkage __visible
void __sched
schedule(void)
6840 struct task_struct
*tsk
= current
;
6842 #ifdef CONFIG_RT_MUTEXES
6843 lockdep_assert(!tsk
->sched_rt_mutex
);
6846 if (!task_is_running(tsk
))
6847 sched_submit_work(tsk
);
6848 __schedule_loop(SM_NONE
);
6849 sched_update_worker(tsk
);
6851 EXPORT_SYMBOL(schedule
);
6854 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6855 * state (have scheduled out non-voluntarily) by making sure that all
6856 * tasks have either left the run queue or have gone into user space.
6857 * As idle tasks do not do either, they must not ever be preempted
6858 * (schedule out non-voluntarily).
6860 * schedule_idle() is similar to schedule_preempt_disable() except that it
6861 * never enables preemption because it does not call sched_submit_work().
6863 void __sched
schedule_idle(void)
6866 * As this skips calling sched_submit_work(), which the idle task does
6867 * regardless because that function is a NOP when the task is in a
6868 * TASK_RUNNING state, make sure this isn't used someplace that the
6869 * current task can be in any other state. Note, idle is always in the
6870 * TASK_RUNNING state.
6872 WARN_ON_ONCE(current
->__state
);
6874 __schedule(SM_IDLE
);
6875 } while (need_resched());
6878 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
6879 asmlinkage __visible
void __sched
schedule_user(void)
6882 * If we come here after a random call to set_need_resched(),
6883 * or we have been woken up remotely but the IPI has not yet arrived,
6884 * we haven't yet exited the RCU idle mode. Do it here manually until
6885 * we find a better solution.
6887 * NB: There are buggy callers of this function. Ideally we
6888 * should warn if prev_state != CT_STATE_USER, but that will trigger
6889 * too frequently to make sense yet.
6891 enum ctx_state prev_state
= exception_enter();
6893 exception_exit(prev_state
);
6898 * schedule_preempt_disabled - called with preemption disabled
6900 * Returns with preemption disabled. Note: preempt_count must be 1
6902 void __sched
schedule_preempt_disabled(void)
6904 sched_preempt_enable_no_resched();
6909 #ifdef CONFIG_PREEMPT_RT
6910 void __sched notrace
schedule_rtlock(void)
6912 __schedule_loop(SM_RTLOCK_WAIT
);
6914 NOKPROBE_SYMBOL(schedule_rtlock
);
6917 static void __sched notrace
preempt_schedule_common(void)
6921 * Because the function tracer can trace preempt_count_sub()
6922 * and it also uses preempt_enable/disable_notrace(), if
6923 * NEED_RESCHED is set, the preempt_enable_notrace() called
6924 * by the function tracer will call this function again and
6925 * cause infinite recursion.
6927 * Preemption must be disabled here before the function
6928 * tracer can trace. Break up preempt_disable() into two
6929 * calls. One to disable preemption without fear of being
6930 * traced. The other to still record the preemption latency,
6931 * which can also be traced by the function tracer.
6933 preempt_disable_notrace();
6934 preempt_latency_start(1);
6935 __schedule(SM_PREEMPT
);
6936 preempt_latency_stop(1);
6937 preempt_enable_no_resched_notrace();
6940 * Check again in case we missed a preemption opportunity
6941 * between schedule and now.
6943 } while (need_resched());
6946 #ifdef CONFIG_PREEMPTION
6948 * This is the entry point to schedule() from in-kernel preemption
6949 * off of preempt_enable.
6951 asmlinkage __visible
void __sched notrace
preempt_schedule(void)
6954 * If there is a non-zero preempt_count or interrupts are disabled,
6955 * we do not want to preempt the current task. Just return..
6957 if (likely(!preemptible()))
6959 preempt_schedule_common();
6961 NOKPROBE_SYMBOL(preempt_schedule
);
6962 EXPORT_SYMBOL(preempt_schedule
);
6964 #ifdef CONFIG_PREEMPT_DYNAMIC
6965 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6966 #ifndef preempt_schedule_dynamic_enabled
6967 #define preempt_schedule_dynamic_enabled preempt_schedule
6968 #define preempt_schedule_dynamic_disabled NULL
6970 DEFINE_STATIC_CALL(preempt_schedule
, preempt_schedule_dynamic_enabled
);
6971 EXPORT_STATIC_CALL_TRAMP(preempt_schedule
);
6972 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6973 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule
);
6974 void __sched notrace
dynamic_preempt_schedule(void)
6976 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule
))
6980 NOKPROBE_SYMBOL(dynamic_preempt_schedule
);
6981 EXPORT_SYMBOL(dynamic_preempt_schedule
);
6986 * preempt_schedule_notrace - preempt_schedule called by tracing
6988 * The tracing infrastructure uses preempt_enable_notrace to prevent
6989 * recursion and tracing preempt enabling caused by the tracing
6990 * infrastructure itself. But as tracing can happen in areas coming
6991 * from userspace or just about to enter userspace, a preempt enable
6992 * can occur before user_exit() is called. This will cause the scheduler
6993 * to be called when the system is still in usermode.
6995 * To prevent this, the preempt_enable_notrace will use this function
6996 * instead of preempt_schedule() to exit user context if needed before
6997 * calling the scheduler.
6999 asmlinkage __visible
void __sched notrace
preempt_schedule_notrace(void)
7001 enum ctx_state prev_ctx
;
7003 if (likely(!preemptible()))
7008 * Because the function tracer can trace preempt_count_sub()
7009 * and it also uses preempt_enable/disable_notrace(), if
7010 * NEED_RESCHED is set, the preempt_enable_notrace() called
7011 * by the function tracer will call this function again and
7012 * cause infinite recursion.
7014 * Preemption must be disabled here before the function
7015 * tracer can trace. Break up preempt_disable() into two
7016 * calls. One to disable preemption without fear of being
7017 * traced. The other to still record the preemption latency,
7018 * which can also be traced by the function tracer.
7020 preempt_disable_notrace();
7021 preempt_latency_start(1);
7023 * Needs preempt disabled in case user_exit() is traced
7024 * and the tracer calls preempt_enable_notrace() causing
7025 * an infinite recursion.
7027 prev_ctx
= exception_enter();
7028 __schedule(SM_PREEMPT
);
7029 exception_exit(prev_ctx
);
7031 preempt_latency_stop(1);
7032 preempt_enable_no_resched_notrace();
7033 } while (need_resched());
7035 EXPORT_SYMBOL_GPL(preempt_schedule_notrace
);
7037 #ifdef CONFIG_PREEMPT_DYNAMIC
7038 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7039 #ifndef preempt_schedule_notrace_dynamic_enabled
7040 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
7041 #define preempt_schedule_notrace_dynamic_disabled NULL
7043 DEFINE_STATIC_CALL(preempt_schedule_notrace
, preempt_schedule_notrace_dynamic_enabled
);
7044 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace
);
7045 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7046 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace
);
7047 void __sched notrace
dynamic_preempt_schedule_notrace(void)
7049 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace
))
7051 preempt_schedule_notrace();
7053 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace
);
7054 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace
);
7058 #endif /* CONFIG_PREEMPTION */
7061 * This is the entry point to schedule() from kernel preemption
7062 * off of IRQ context.
7063 * Note, that this is called and return with IRQs disabled. This will
7064 * protect us against recursive calling from IRQ contexts.
7066 asmlinkage __visible
void __sched
preempt_schedule_irq(void)
7068 enum ctx_state prev_state
;
7070 /* Catch callers which need to be fixed */
7071 BUG_ON(preempt_count() || !irqs_disabled());
7073 prev_state
= exception_enter();
7078 __schedule(SM_PREEMPT
);
7079 local_irq_disable();
7080 sched_preempt_enable_no_resched();
7081 } while (need_resched());
7083 exception_exit(prev_state
);
7086 int default_wake_function(wait_queue_entry_t
*curr
, unsigned mode
, int wake_flags
,
7089 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG
) && wake_flags
& ~(WF_SYNC
|WF_CURRENT_CPU
));
7090 return try_to_wake_up(curr
->private, mode
, wake_flags
);
7092 EXPORT_SYMBOL(default_wake_function
);
7094 const struct sched_class
*__setscheduler_class(int policy
, int prio
)
7097 return &dl_sched_class
;
7100 return &rt_sched_class
;
7102 #ifdef CONFIG_SCHED_CLASS_EXT
7103 if (task_should_scx(policy
))
7104 return &ext_sched_class
;
7107 return &fair_sched_class
;
7110 #ifdef CONFIG_RT_MUTEXES
7113 * Would be more useful with typeof()/auto_type but they don't mix with
7114 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7115 * name such that if someone were to implement this function we get to compare
7118 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7120 void rt_mutex_pre_schedule(void)
7122 lockdep_assert(!fetch_and_set(current
->sched_rt_mutex
, 1));
7123 sched_submit_work(current
);
7126 void rt_mutex_schedule(void)
7128 lockdep_assert(current
->sched_rt_mutex
);
7129 __schedule_loop(SM_NONE
);
7132 void rt_mutex_post_schedule(void)
7134 sched_update_worker(current
);
7135 lockdep_assert(fetch_and_set(current
->sched_rt_mutex
, 0));
7139 * rt_mutex_setprio - set the current priority of a task
7141 * @pi_task: donor task
7143 * This function changes the 'effective' priority of a task. It does
7144 * not touch ->normal_prio like __setscheduler().
7146 * Used by the rt_mutex code to implement priority inheritance
7147 * logic. Call site only calls if the priority of the task changed.
7149 void rt_mutex_setprio(struct task_struct
*p
, struct task_struct
*pi_task
)
7151 int prio
, oldprio
, queued
, running
, queue_flag
=
7152 DEQUEUE_SAVE
| DEQUEUE_MOVE
| DEQUEUE_NOCLOCK
;
7153 const struct sched_class
*prev_class
, *next_class
;
7157 /* XXX used to be waiter->prio, not waiter->task->prio */
7158 prio
= __rt_effective_prio(pi_task
, p
->normal_prio
);
7161 * If nothing changed; bail early.
7163 if (p
->pi_top_task
== pi_task
&& prio
== p
->prio
&& !dl_prio(prio
))
7166 rq
= __task_rq_lock(p
, &rf
);
7167 update_rq_clock(rq
);
7169 * Set under pi_lock && rq->lock, such that the value can be used under
7172 * Note that there is loads of tricky to make this pointer cache work
7173 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7174 * ensure a task is de-boosted (pi_task is set to NULL) before the
7175 * task is allowed to run again (and can exit). This ensures the pointer
7176 * points to a blocked task -- which guarantees the task is present.
7178 p
->pi_top_task
= pi_task
;
7181 * For FIFO/RR we only need to set prio, if that matches we're done.
7183 if (prio
== p
->prio
&& !dl_prio(prio
))
7187 * Idle task boosting is a no-no in general. There is one
7188 * exception, when PREEMPT_RT and NOHZ is active:
7190 * The idle task calls get_next_timer_interrupt() and holds
7191 * the timer wheel base->lock on the CPU and another CPU wants
7192 * to access the timer (probably to cancel it). We can safely
7193 * ignore the boosting request, as the idle CPU runs this code
7194 * with interrupts disabled and will complete the lock
7195 * protected section without being interrupted. So there is no
7196 * real need to boost.
7198 if (unlikely(p
== rq
->idle
)) {
7199 WARN_ON(p
!= rq
->curr
);
7200 WARN_ON(p
->pi_blocked_on
);
7204 trace_sched_pi_setprio(p
, pi_task
);
7207 if (oldprio
== prio
)
7208 queue_flag
&= ~DEQUEUE_MOVE
;
7210 prev_class
= p
->sched_class
;
7211 next_class
= __setscheduler_class(p
->policy
, prio
);
7213 if (prev_class
!= next_class
&& p
->se
.sched_delayed
)
7214 dequeue_task(rq
, p
, DEQUEUE_SLEEP
| DEQUEUE_DELAYED
| DEQUEUE_NOCLOCK
);
7216 queued
= task_on_rq_queued(p
);
7217 running
= task_current_donor(rq
, p
);
7219 dequeue_task(rq
, p
, queue_flag
);
7221 put_prev_task(rq
, p
);
7224 * Boosting condition are:
7225 * 1. -rt task is running and holds mutex A
7226 * --> -dl task blocks on mutex A
7228 * 2. -dl task is running and holds mutex A
7229 * --> -dl task blocks on mutex A and could preempt the
7232 if (dl_prio(prio
)) {
7233 if (!dl_prio(p
->normal_prio
) ||
7234 (pi_task
&& dl_prio(pi_task
->prio
) &&
7235 dl_entity_preempt(&pi_task
->dl
, &p
->dl
))) {
7236 p
->dl
.pi_se
= pi_task
->dl
.pi_se
;
7237 queue_flag
|= ENQUEUE_REPLENISH
;
7239 p
->dl
.pi_se
= &p
->dl
;
7241 } else if (rt_prio(prio
)) {
7242 if (dl_prio(oldprio
))
7243 p
->dl
.pi_se
= &p
->dl
;
7245 queue_flag
|= ENQUEUE_HEAD
;
7247 if (dl_prio(oldprio
))
7248 p
->dl
.pi_se
= &p
->dl
;
7249 if (rt_prio(oldprio
))
7253 p
->sched_class
= next_class
;
7256 check_class_changing(rq
, p
, prev_class
);
7259 enqueue_task(rq
, p
, queue_flag
);
7261 set_next_task(rq
, p
);
7263 check_class_changed(rq
, p
, prev_class
, oldprio
);
7265 /* Avoid rq from going away on us: */
7268 rq_unpin_lock(rq
, &rf
);
7269 __balance_callbacks(rq
);
7270 raw_spin_rq_unlock(rq
);
7276 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
7277 int __sched
__cond_resched(void)
7279 if (should_resched(0)) {
7280 preempt_schedule_common();
7284 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
7285 * whether the current CPU is in an RCU read-side critical section,
7286 * so the tick can report quiescent states even for CPUs looping
7287 * in kernel context. In contrast, in non-preemptible kernels,
7288 * RCU readers leave no in-memory hints, which means that CPU-bound
7289 * processes executing in kernel context might never report an
7290 * RCU quiescent state. Therefore, the following code causes
7291 * cond_resched() to report a quiescent state, but only when RCU
7292 * is in urgent need of one.
7294 #ifndef CONFIG_PREEMPT_RCU
7299 EXPORT_SYMBOL(__cond_resched
);
7302 #ifdef CONFIG_PREEMPT_DYNAMIC
7303 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7304 #define cond_resched_dynamic_enabled __cond_resched
7305 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7306 DEFINE_STATIC_CALL_RET0(cond_resched
, __cond_resched
);
7307 EXPORT_STATIC_CALL_TRAMP(cond_resched
);
7309 #define might_resched_dynamic_enabled __cond_resched
7310 #define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7311 DEFINE_STATIC_CALL_RET0(might_resched
, __cond_resched
);
7312 EXPORT_STATIC_CALL_TRAMP(might_resched
);
7313 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7314 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched
);
7315 int __sched
dynamic_cond_resched(void)
7317 klp_sched_try_switch();
7318 if (!static_branch_unlikely(&sk_dynamic_cond_resched
))
7320 return __cond_resched();
7322 EXPORT_SYMBOL(dynamic_cond_resched
);
7324 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched
);
7325 int __sched
dynamic_might_resched(void)
7327 if (!static_branch_unlikely(&sk_dynamic_might_resched
))
7329 return __cond_resched();
7331 EXPORT_SYMBOL(dynamic_might_resched
);
7336 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7337 * call schedule, and on return reacquire the lock.
7339 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7340 * operations here to prevent schedule() from being called twice (once via
7341 * spin_unlock(), once by hand).
7343 int __cond_resched_lock(spinlock_t
*lock
)
7345 int resched
= should_resched(PREEMPT_LOCK_OFFSET
);
7348 lockdep_assert_held(lock
);
7350 if (spin_needbreak(lock
) || resched
) {
7352 if (!_cond_resched())
7359 EXPORT_SYMBOL(__cond_resched_lock
);
7361 int __cond_resched_rwlock_read(rwlock_t
*lock
)
7363 int resched
= should_resched(PREEMPT_LOCK_OFFSET
);
7366 lockdep_assert_held_read(lock
);
7368 if (rwlock_needbreak(lock
) || resched
) {
7370 if (!_cond_resched())
7377 EXPORT_SYMBOL(__cond_resched_rwlock_read
);
7379 int __cond_resched_rwlock_write(rwlock_t
*lock
)
7381 int resched
= should_resched(PREEMPT_LOCK_OFFSET
);
7384 lockdep_assert_held_write(lock
);
7386 if (rwlock_needbreak(lock
) || resched
) {
7388 if (!_cond_resched())
7395 EXPORT_SYMBOL(__cond_resched_rwlock_write
);
7397 #ifdef CONFIG_PREEMPT_DYNAMIC
7399 #ifdef CONFIG_GENERIC_ENTRY
7400 #include <linux/entry-common.h>
7406 * SC:preempt_schedule
7407 * SC:preempt_schedule_notrace
7408 * SC:irqentry_exit_cond_resched
7412 * cond_resched <- __cond_resched
7413 * might_resched <- RET0
7414 * preempt_schedule <- NOP
7415 * preempt_schedule_notrace <- NOP
7416 * irqentry_exit_cond_resched <- NOP
7417 * dynamic_preempt_lazy <- false
7420 * cond_resched <- __cond_resched
7421 * might_resched <- __cond_resched
7422 * preempt_schedule <- NOP
7423 * preempt_schedule_notrace <- NOP
7424 * irqentry_exit_cond_resched <- NOP
7425 * dynamic_preempt_lazy <- false
7428 * cond_resched <- RET0
7429 * might_resched <- RET0
7430 * preempt_schedule <- preempt_schedule
7431 * preempt_schedule_notrace <- preempt_schedule_notrace
7432 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7433 * dynamic_preempt_lazy <- false
7436 * cond_resched <- RET0
7437 * might_resched <- RET0
7438 * preempt_schedule <- preempt_schedule
7439 * preempt_schedule_notrace <- preempt_schedule_notrace
7440 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7441 * dynamic_preempt_lazy <- true
7445 preempt_dynamic_undefined
= -1,
7446 preempt_dynamic_none
,
7447 preempt_dynamic_voluntary
,
7448 preempt_dynamic_full
,
7449 preempt_dynamic_lazy
,
7452 int preempt_dynamic_mode
= preempt_dynamic_undefined
;
7454 int sched_dynamic_mode(const char *str
)
7456 #ifndef CONFIG_PREEMPT_RT
7457 if (!strcmp(str
, "none"))
7458 return preempt_dynamic_none
;
7460 if (!strcmp(str
, "voluntary"))
7461 return preempt_dynamic_voluntary
;
7464 if (!strcmp(str
, "full"))
7465 return preempt_dynamic_full
;
7467 #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7468 if (!strcmp(str
, "lazy"))
7469 return preempt_dynamic_lazy
;
7475 #define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key)
7476 #define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key)
7478 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7479 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7480 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
7481 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7482 #define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f)
7483 #define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f)
7485 #error "Unsupported PREEMPT_DYNAMIC mechanism"
7488 static DEFINE_MUTEX(sched_dynamic_mutex
);
7489 static bool klp_override
;
7491 static void __sched_dynamic_update(int mode
)
7494 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7495 * the ZERO state, which is invalid.
7498 preempt_dynamic_enable(cond_resched
);
7499 preempt_dynamic_enable(might_resched
);
7500 preempt_dynamic_enable(preempt_schedule
);
7501 preempt_dynamic_enable(preempt_schedule_notrace
);
7502 preempt_dynamic_enable(irqentry_exit_cond_resched
);
7503 preempt_dynamic_key_disable(preempt_lazy
);
7506 case preempt_dynamic_none
:
7508 preempt_dynamic_enable(cond_resched
);
7509 preempt_dynamic_disable(might_resched
);
7510 preempt_dynamic_disable(preempt_schedule
);
7511 preempt_dynamic_disable(preempt_schedule_notrace
);
7512 preempt_dynamic_disable(irqentry_exit_cond_resched
);
7513 preempt_dynamic_key_disable(preempt_lazy
);
7514 if (mode
!= preempt_dynamic_mode
)
7515 pr_info("Dynamic Preempt: none\n");
7518 case preempt_dynamic_voluntary
:
7520 preempt_dynamic_enable(cond_resched
);
7521 preempt_dynamic_enable(might_resched
);
7522 preempt_dynamic_disable(preempt_schedule
);
7523 preempt_dynamic_disable(preempt_schedule_notrace
);
7524 preempt_dynamic_disable(irqentry_exit_cond_resched
);
7525 preempt_dynamic_key_disable(preempt_lazy
);
7526 if (mode
!= preempt_dynamic_mode
)
7527 pr_info("Dynamic Preempt: voluntary\n");
7530 case preempt_dynamic_full
:
7532 preempt_dynamic_disable(cond_resched
);
7533 preempt_dynamic_disable(might_resched
);
7534 preempt_dynamic_enable(preempt_schedule
);
7535 preempt_dynamic_enable(preempt_schedule_notrace
);
7536 preempt_dynamic_enable(irqentry_exit_cond_resched
);
7537 preempt_dynamic_key_disable(preempt_lazy
);
7538 if (mode
!= preempt_dynamic_mode
)
7539 pr_info("Dynamic Preempt: full\n");
7542 case preempt_dynamic_lazy
:
7544 preempt_dynamic_disable(cond_resched
);
7545 preempt_dynamic_disable(might_resched
);
7546 preempt_dynamic_enable(preempt_schedule
);
7547 preempt_dynamic_enable(preempt_schedule_notrace
);
7548 preempt_dynamic_enable(irqentry_exit_cond_resched
);
7549 preempt_dynamic_key_enable(preempt_lazy
);
7550 if (mode
!= preempt_dynamic_mode
)
7551 pr_info("Dynamic Preempt: lazy\n");
7555 preempt_dynamic_mode
= mode
;
7558 void sched_dynamic_update(int mode
)
7560 mutex_lock(&sched_dynamic_mutex
);
7561 __sched_dynamic_update(mode
);
7562 mutex_unlock(&sched_dynamic_mutex
);
7565 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7567 static int klp_cond_resched(void)
7569 __klp_sched_try_switch();
7570 return __cond_resched();
7573 void sched_dynamic_klp_enable(void)
7575 mutex_lock(&sched_dynamic_mutex
);
7577 klp_override
= true;
7578 static_call_update(cond_resched
, klp_cond_resched
);
7580 mutex_unlock(&sched_dynamic_mutex
);
7583 void sched_dynamic_klp_disable(void)
7585 mutex_lock(&sched_dynamic_mutex
);
7587 klp_override
= false;
7588 __sched_dynamic_update(preempt_dynamic_mode
);
7590 mutex_unlock(&sched_dynamic_mutex
);
7593 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
7595 static int __init
setup_preempt_mode(char *str
)
7597 int mode
= sched_dynamic_mode(str
);
7599 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str
);
7603 sched_dynamic_update(mode
);
7606 __setup("preempt=", setup_preempt_mode
);
7608 static void __init
preempt_dynamic_init(void)
7610 if (preempt_dynamic_mode
== preempt_dynamic_undefined
) {
7611 if (IS_ENABLED(CONFIG_PREEMPT_NONE
)) {
7612 sched_dynamic_update(preempt_dynamic_none
);
7613 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY
)) {
7614 sched_dynamic_update(preempt_dynamic_voluntary
);
7615 } else if (IS_ENABLED(CONFIG_PREEMPT_LAZY
)) {
7616 sched_dynamic_update(preempt_dynamic_lazy
);
7618 /* Default static call setting, nothing to do */
7619 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT
));
7620 preempt_dynamic_mode
= preempt_dynamic_full
;
7621 pr_info("Dynamic Preempt: full\n");
7626 #define PREEMPT_MODEL_ACCESSOR(mode) \
7627 bool preempt_model_##mode(void) \
7629 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7630 return preempt_dynamic_mode == preempt_dynamic_##mode; \
7632 EXPORT_SYMBOL_GPL(preempt_model_##mode)
7634 PREEMPT_MODEL_ACCESSOR(none
);
7635 PREEMPT_MODEL_ACCESSOR(voluntary
);
7636 PREEMPT_MODEL_ACCESSOR(full
);
7637 PREEMPT_MODEL_ACCESSOR(lazy
);
7639 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7641 static inline void preempt_dynamic_init(void) { }
7643 #endif /* CONFIG_PREEMPT_DYNAMIC */
7645 int io_schedule_prepare(void)
7647 int old_iowait
= current
->in_iowait
;
7649 current
->in_iowait
= 1;
7650 blk_flush_plug(current
->plug
, true);
7654 void io_schedule_finish(int token
)
7656 current
->in_iowait
= token
;
7660 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7661 * that process accounting knows that this is a task in IO wait state.
7663 long __sched
io_schedule_timeout(long timeout
)
7668 token
= io_schedule_prepare();
7669 ret
= schedule_timeout(timeout
);
7670 io_schedule_finish(token
);
7674 EXPORT_SYMBOL(io_schedule_timeout
);
7676 void __sched
io_schedule(void)
7680 token
= io_schedule_prepare();
7682 io_schedule_finish(token
);
7684 EXPORT_SYMBOL(io_schedule
);
7686 void sched_show_task(struct task_struct
*p
)
7691 if (!try_get_task_stack(p
))
7694 pr_info("task:%-15.15s state:%c", p
->comm
, task_state_to_char(p
));
7696 if (task_is_running(p
))
7697 pr_cont(" running task ");
7698 free
= stack_not_used(p
);
7702 ppid
= task_pid_nr(rcu_dereference(p
->real_parent
));
7704 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
7705 free
, task_pid_nr(p
), task_tgid_nr(p
),
7706 ppid
, read_task_thread_flags(p
));
7708 print_worker_info(KERN_INFO
, p
);
7709 print_stop_info(KERN_INFO
, p
);
7710 print_scx_info(KERN_INFO
, p
);
7711 show_stack(p
, NULL
, KERN_INFO
);
7714 EXPORT_SYMBOL_GPL(sched_show_task
);
7717 state_filter_match(unsigned long state_filter
, struct task_struct
*p
)
7719 unsigned int state
= READ_ONCE(p
->__state
);
7721 /* no filter, everything matches */
7725 /* filter, but doesn't match */
7726 if (!(state
& state_filter
))
7730 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7733 if (state_filter
== TASK_UNINTERRUPTIBLE
&& (state
& TASK_NOLOAD
))
7740 void show_state_filter(unsigned int state_filter
)
7742 struct task_struct
*g
, *p
;
7745 for_each_process_thread(g
, p
) {
7747 * reset the NMI-timeout, listing all files on a slow
7748 * console might take a lot of time:
7749 * Also, reset softlockup watchdogs on all CPUs, because
7750 * another CPU might be blocked waiting for us to process
7753 touch_nmi_watchdog();
7754 touch_all_softlockup_watchdogs();
7755 if (state_filter_match(state_filter
, p
))
7759 #ifdef CONFIG_SCHED_DEBUG
7761 sysrq_sched_debug_show();
7765 * Only show locks if all tasks are dumped:
7768 debug_show_all_locks();
7772 * init_idle - set up an idle thread for a given CPU
7773 * @idle: task in question
7774 * @cpu: CPU the idle task belongs to
7776 * NOTE: this function does not set the idle thread's NEED_RESCHED
7777 * flag, to make booting more robust.
7779 void __init
init_idle(struct task_struct
*idle
, int cpu
)
7782 struct affinity_context ac
= (struct affinity_context
) {
7783 .new_mask
= cpumask_of(cpu
),
7787 struct rq
*rq
= cpu_rq(cpu
);
7788 unsigned long flags
;
7790 raw_spin_lock_irqsave(&idle
->pi_lock
, flags
);
7791 raw_spin_rq_lock(rq
);
7793 idle
->__state
= TASK_RUNNING
;
7794 idle
->se
.exec_start
= sched_clock();
7796 * PF_KTHREAD should already be set at this point; regardless, make it
7797 * look like a proper per-CPU kthread.
7799 idle
->flags
|= PF_KTHREAD
| PF_NO_SETAFFINITY
;
7800 kthread_set_per_cpu(idle
, cpu
);
7804 * No validation and serialization required at boot time and for
7805 * setting up the idle tasks of not yet online CPUs.
7807 set_cpus_allowed_common(idle
, &ac
);
7810 * We're having a chicken and egg problem, even though we are
7811 * holding rq->lock, the CPU isn't yet set to this CPU so the
7812 * lockdep check in task_group() will fail.
7814 * Similar case to sched_fork(). / Alternatively we could
7815 * use task_rq_lock() here and obtain the other rq->lock.
7820 __set_task_cpu(idle
, cpu
);
7824 rq_set_donor(rq
, idle
);
7825 rcu_assign_pointer(rq
->curr
, idle
);
7826 idle
->on_rq
= TASK_ON_RQ_QUEUED
;
7830 raw_spin_rq_unlock(rq
);
7831 raw_spin_unlock_irqrestore(&idle
->pi_lock
, flags
);
7833 /* Set the preempt count _outside_ the spinlocks! */
7834 init_idle_preempt_count(idle
, cpu
);
7837 * The idle tasks have their own, simple scheduling class:
7839 idle
->sched_class
= &idle_sched_class
;
7840 ftrace_graph_init_idle_task(idle
, cpu
);
7841 vtime_init_idle(idle
, cpu
);
7843 sprintf(idle
->comm
, "%s/%d", INIT_TASK_COMM
, cpu
);
7849 int cpuset_cpumask_can_shrink(const struct cpumask
*cur
,
7850 const struct cpumask
*trial
)
7854 if (cpumask_empty(cur
))
7857 ret
= dl_cpuset_cpumask_can_shrink(cur
, trial
);
7862 int task_can_attach(struct task_struct
*p
)
7867 * Kthreads which disallow setaffinity shouldn't be moved
7868 * to a new cpuset; we don't want to change their CPU
7869 * affinity and isolating such threads by their set of
7870 * allowed nodes is unnecessary. Thus, cpusets are not
7871 * applicable for such threads. This prevents checking for
7872 * success of set_cpus_allowed_ptr() on all attached tasks
7873 * before cpus_mask may be changed.
7875 if (p
->flags
& PF_NO_SETAFFINITY
)
7881 bool sched_smp_initialized __read_mostly
;
7883 #ifdef CONFIG_NUMA_BALANCING
7884 /* Migrate current task p to target_cpu */
7885 int migrate_task_to(struct task_struct
*p
, int target_cpu
)
7887 struct migration_arg arg
= { p
, target_cpu
};
7888 int curr_cpu
= task_cpu(p
);
7890 if (curr_cpu
== target_cpu
)
7893 if (!cpumask_test_cpu(target_cpu
, p
->cpus_ptr
))
7896 /* TODO: This is not properly updating schedstats */
7898 trace_sched_move_numa(p
, curr_cpu
, target_cpu
);
7899 return stop_one_cpu(curr_cpu
, migration_cpu_stop
, &arg
);
7903 * Requeue a task on a given node and accurately track the number of NUMA
7904 * tasks on the runqueues
7906 void sched_setnuma(struct task_struct
*p
, int nid
)
7908 bool queued
, running
;
7912 rq
= task_rq_lock(p
, &rf
);
7913 queued
= task_on_rq_queued(p
);
7914 running
= task_current_donor(rq
, p
);
7917 dequeue_task(rq
, p
, DEQUEUE_SAVE
);
7919 put_prev_task(rq
, p
);
7921 p
->numa_preferred_nid
= nid
;
7924 enqueue_task(rq
, p
, ENQUEUE_RESTORE
| ENQUEUE_NOCLOCK
);
7926 set_next_task(rq
, p
);
7927 task_rq_unlock(rq
, p
, &rf
);
7929 #endif /* CONFIG_NUMA_BALANCING */
7931 #ifdef CONFIG_HOTPLUG_CPU
7933 * Ensure that the idle task is using init_mm right before its CPU goes
7936 void idle_task_exit(void)
7938 struct mm_struct
*mm
= current
->active_mm
;
7940 BUG_ON(cpu_online(smp_processor_id()));
7941 BUG_ON(current
!= this_rq()->idle
);
7943 if (mm
!= &init_mm
) {
7944 switch_mm(mm
, &init_mm
, current
);
7945 finish_arch_post_lock_switch();
7948 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7951 static int __balance_push_cpu_stop(void *arg
)
7953 struct task_struct
*p
= arg
;
7954 struct rq
*rq
= this_rq();
7958 raw_spin_lock_irq(&p
->pi_lock
);
7961 update_rq_clock(rq
);
7963 if (task_rq(p
) == rq
&& task_on_rq_queued(p
)) {
7964 cpu
= select_fallback_rq(rq
->cpu
, p
);
7965 rq
= __migrate_task(rq
, &rf
, p
, cpu
);
7969 raw_spin_unlock_irq(&p
->pi_lock
);
7976 static DEFINE_PER_CPU(struct cpu_stop_work
, push_work
);
7979 * Ensure we only run per-cpu kthreads once the CPU goes !active.
7981 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7982 * effective when the hotplug motion is down.
7984 static void balance_push(struct rq
*rq
)
7986 struct task_struct
*push_task
= rq
->curr
;
7988 lockdep_assert_rq_held(rq
);
7991 * Ensure the thing is persistent until balance_push_set(.on = false);
7993 rq
->balance_callback
= &balance_push_callback
;
7996 * Only active while going offline and when invoked on the outgoing
7999 if (!cpu_dying(rq
->cpu
) || rq
!= this_rq())
8003 * Both the cpu-hotplug and stop task are in this case and are
8004 * required to complete the hotplug process.
8006 if (kthread_is_per_cpu(push_task
) ||
8007 is_migration_disabled(push_task
)) {
8010 * If this is the idle task on the outgoing CPU try to wake
8011 * up the hotplug control thread which might wait for the
8012 * last task to vanish. The rcuwait_active() check is
8013 * accurate here because the waiter is pinned on this CPU
8014 * and can't obviously be running in parallel.
8016 * On RT kernels this also has to check whether there are
8017 * pinned and scheduled out tasks on the runqueue. They
8018 * need to leave the migrate disabled section first.
8020 if (!rq
->nr_running
&& !rq_has_pinned_tasks(rq
) &&
8021 rcuwait_active(&rq
->hotplug_wait
)) {
8022 raw_spin_rq_unlock(rq
);
8023 rcuwait_wake_up(&rq
->hotplug_wait
);
8024 raw_spin_rq_lock(rq
);
8029 get_task_struct(push_task
);
8031 * Temporarily drop rq->lock such that we can wake-up the stop task.
8032 * Both preemption and IRQs are still disabled.
8035 raw_spin_rq_unlock(rq
);
8036 stop_one_cpu_nowait(rq
->cpu
, __balance_push_cpu_stop
, push_task
,
8037 this_cpu_ptr(&push_work
));
8040 * At this point need_resched() is true and we'll take the loop in
8041 * schedule(). The next pick is obviously going to be the stop task
8042 * which kthread_is_per_cpu() and will push this task away.
8044 raw_spin_rq_lock(rq
);
8047 static void balance_push_set(int cpu
, bool on
)
8049 struct rq
*rq
= cpu_rq(cpu
);
8052 rq_lock_irqsave(rq
, &rf
);
8054 WARN_ON_ONCE(rq
->balance_callback
);
8055 rq
->balance_callback
= &balance_push_callback
;
8056 } else if (rq
->balance_callback
== &balance_push_callback
) {
8057 rq
->balance_callback
= NULL
;
8059 rq_unlock_irqrestore(rq
, &rf
);
8063 * Invoked from a CPUs hotplug control thread after the CPU has been marked
8064 * inactive. All tasks which are not per CPU kernel threads are either
8065 * pushed off this CPU now via balance_push() or placed on a different CPU
8066 * during wakeup. Wait until the CPU is quiescent.
8068 static void balance_hotplug_wait(void)
8070 struct rq
*rq
= this_rq();
8072 rcuwait_wait_event(&rq
->hotplug_wait
,
8073 rq
->nr_running
== 1 && !rq_has_pinned_tasks(rq
),
8074 TASK_UNINTERRUPTIBLE
);
8079 static inline void balance_push(struct rq
*rq
)
8083 static inline void balance_push_set(int cpu
, bool on
)
8087 static inline void balance_hotplug_wait(void)
8091 #endif /* CONFIG_HOTPLUG_CPU */
8093 void set_rq_online(struct rq
*rq
)
8096 const struct sched_class
*class;
8098 cpumask_set_cpu(rq
->cpu
, rq
->rd
->online
);
8101 for_each_class(class) {
8102 if (class->rq_online
)
8103 class->rq_online(rq
);
8108 void set_rq_offline(struct rq
*rq
)
8111 const struct sched_class
*class;
8113 update_rq_clock(rq
);
8114 for_each_class(class) {
8115 if (class->rq_offline
)
8116 class->rq_offline(rq
);
8119 cpumask_clear_cpu(rq
->cpu
, rq
->rd
->online
);
8124 static inline void sched_set_rq_online(struct rq
*rq
, int cpu
)
8128 rq_lock_irqsave(rq
, &rf
);
8130 BUG_ON(!cpumask_test_cpu(cpu
, rq
->rd
->span
));
8133 rq_unlock_irqrestore(rq
, &rf
);
8136 static inline void sched_set_rq_offline(struct rq
*rq
, int cpu
)
8140 rq_lock_irqsave(rq
, &rf
);
8142 BUG_ON(!cpumask_test_cpu(cpu
, rq
->rd
->span
));
8145 rq_unlock_irqrestore(rq
, &rf
);
8149 * used to mark begin/end of suspend/resume:
8151 static int num_cpus_frozen
;
8154 * Update cpusets according to cpu_active mask. If cpusets are
8155 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8156 * around partition_sched_domains().
8158 * If we come here as part of a suspend/resume, don't touch cpusets because we
8159 * want to restore it back to its original state upon resume anyway.
8161 static void cpuset_cpu_active(void)
8163 if (cpuhp_tasks_frozen
) {
8165 * num_cpus_frozen tracks how many CPUs are involved in suspend
8166 * resume sequence. As long as this is not the last online
8167 * operation in the resume sequence, just build a single sched
8168 * domain, ignoring cpusets.
8170 partition_sched_domains(1, NULL
, NULL
);
8171 if (--num_cpus_frozen
)
8174 * This is the last CPU online operation. So fall through and
8175 * restore the original sched domains by considering the
8176 * cpuset configurations.
8178 cpuset_force_rebuild();
8180 cpuset_update_active_cpus();
8183 static int cpuset_cpu_inactive(unsigned int cpu
)
8185 if (!cpuhp_tasks_frozen
) {
8186 int ret
= dl_bw_check_overflow(cpu
);
8190 cpuset_update_active_cpus();
8193 partition_sched_domains(1, NULL
, NULL
);
8198 static inline void sched_smt_present_inc(int cpu
)
8200 #ifdef CONFIG_SCHED_SMT
8201 if (cpumask_weight(cpu_smt_mask(cpu
)) == 2)
8202 static_branch_inc_cpuslocked(&sched_smt_present
);
8206 static inline void sched_smt_present_dec(int cpu
)
8208 #ifdef CONFIG_SCHED_SMT
8209 if (cpumask_weight(cpu_smt_mask(cpu
)) == 2)
8210 static_branch_dec_cpuslocked(&sched_smt_present
);
8214 int sched_cpu_activate(unsigned int cpu
)
8216 struct rq
*rq
= cpu_rq(cpu
);
8219 * Clear the balance_push callback and prepare to schedule
8222 balance_push_set(cpu
, false);
8225 * When going up, increment the number of cores with SMT present.
8227 sched_smt_present_inc(cpu
);
8228 set_cpu_active(cpu
, true);
8230 if (sched_smp_initialized
) {
8231 sched_update_numa(cpu
, true);
8232 sched_domains_numa_masks_set(cpu
);
8233 cpuset_cpu_active();
8236 scx_rq_activate(rq
);
8239 * Put the rq online, if not already. This happens:
8241 * 1) In the early boot process, because we build the real domains
8242 * after all CPUs have been brought up.
8244 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8247 sched_set_rq_online(rq
, cpu
);
8252 int sched_cpu_deactivate(unsigned int cpu
)
8254 struct rq
*rq
= cpu_rq(cpu
);
8258 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8259 * load balancing when not active
8261 nohz_balance_exit_idle(rq
);
8263 set_cpu_active(cpu
, false);
8266 * From this point forward, this CPU will refuse to run any task that
8267 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8268 * push those tasks away until this gets cleared, see
8269 * sched_cpu_dying().
8271 balance_push_set(cpu
, true);
8274 * We've cleared cpu_active_mask / set balance_push, wait for all
8275 * preempt-disabled and RCU users of this state to go away such that
8276 * all new such users will observe it.
8278 * Specifically, we rely on ttwu to no longer target this CPU, see
8279 * ttwu_queue_cond() and is_cpu_allowed().
8281 * Do sync before park smpboot threads to take care the RCU boost case.
8285 sched_set_rq_offline(rq
, cpu
);
8287 scx_rq_deactivate(rq
);
8290 * When going down, decrement the number of cores with SMT present.
8292 sched_smt_present_dec(cpu
);
8294 #ifdef CONFIG_SCHED_SMT
8295 sched_core_cpu_deactivate(cpu
);
8298 if (!sched_smp_initialized
)
8301 sched_update_numa(cpu
, false);
8302 ret
= cpuset_cpu_inactive(cpu
);
8304 sched_smt_present_inc(cpu
);
8305 sched_set_rq_online(rq
, cpu
);
8306 balance_push_set(cpu
, false);
8307 set_cpu_active(cpu
, true);
8308 sched_update_numa(cpu
, true);
8311 sched_domains_numa_masks_clear(cpu
);
8315 static void sched_rq_cpu_starting(unsigned int cpu
)
8317 struct rq
*rq
= cpu_rq(cpu
);
8319 rq
->calc_load_update
= calc_load_update
;
8320 update_max_interval();
8323 int sched_cpu_starting(unsigned int cpu
)
8325 sched_core_cpu_starting(cpu
);
8326 sched_rq_cpu_starting(cpu
);
8327 sched_tick_start(cpu
);
8331 #ifdef CONFIG_HOTPLUG_CPU
8334 * Invoked immediately before the stopper thread is invoked to bring the
8335 * CPU down completely. At this point all per CPU kthreads except the
8336 * hotplug thread (current) and the stopper thread (inactive) have been
8337 * either parked or have been unbound from the outgoing CPU. Ensure that
8338 * any of those which might be on the way out are gone.
8340 * If after this point a bound task is being woken on this CPU then the
8341 * responsible hotplug callback has failed to do it's job.
8342 * sched_cpu_dying() will catch it with the appropriate fireworks.
8344 int sched_cpu_wait_empty(unsigned int cpu
)
8346 balance_hotplug_wait();
8351 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8352 * might have. Called from the CPU stopper task after ensuring that the
8353 * stopper is the last running task on the CPU, so nr_active count is
8354 * stable. We need to take the tear-down thread which is calling this into
8355 * account, so we hand in adjust = 1 to the load calculation.
8357 * Also see the comment "Global load-average calculations".
8359 static void calc_load_migrate(struct rq
*rq
)
8361 long delta
= calc_load_fold_active(rq
, 1);
8364 atomic_long_add(delta
, &calc_load_tasks
);
8367 static void dump_rq_tasks(struct rq
*rq
, const char *loglvl
)
8369 struct task_struct
*g
, *p
;
8370 int cpu
= cpu_of(rq
);
8372 lockdep_assert_rq_held(rq
);
8374 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl
, cpu
, rq
->nr_running
);
8375 for_each_process_thread(g
, p
) {
8376 if (task_cpu(p
) != cpu
)
8379 if (!task_on_rq_queued(p
))
8382 printk("%s\tpid: %d, name: %s\n", loglvl
, p
->pid
, p
->comm
);
8386 int sched_cpu_dying(unsigned int cpu
)
8388 struct rq
*rq
= cpu_rq(cpu
);
8391 /* Handle pending wakeups and then migrate everything off */
8392 sched_tick_stop(cpu
);
8394 rq_lock_irqsave(rq
, &rf
);
8395 if (rq
->nr_running
!= 1 || rq_has_pinned_tasks(rq
)) {
8396 WARN(true, "Dying CPU not properly vacated!");
8397 dump_rq_tasks(rq
, KERN_WARNING
);
8399 rq_unlock_irqrestore(rq
, &rf
);
8401 calc_load_migrate(rq
);
8402 update_max_interval();
8404 sched_core_cpu_dying(cpu
);
8409 void __init
sched_init_smp(void)
8411 sched_init_numa(NUMA_NO_NODE
);
8414 * There's no userspace yet to cause hotplug operations; hence all the
8415 * CPU masks are stable and all blatant races in the below code cannot
8418 mutex_lock(&sched_domains_mutex
);
8419 sched_init_domains(cpu_active_mask
);
8420 mutex_unlock(&sched_domains_mutex
);
8422 /* Move init over to a non-isolated CPU */
8423 if (set_cpus_allowed_ptr(current
, housekeeping_cpumask(HK_TYPE_DOMAIN
)) < 0)
8425 current
->flags
&= ~PF_NO_SETAFFINITY
;
8426 sched_init_granularity();
8428 init_sched_rt_class();
8429 init_sched_dl_class();
8431 sched_smp_initialized
= true;
8434 static int __init
migration_init(void)
8436 sched_cpu_starting(smp_processor_id());
8439 early_initcall(migration_init
);
8442 void __init
sched_init_smp(void)
8444 sched_init_granularity();
8446 #endif /* CONFIG_SMP */
8448 int in_sched_functions(unsigned long addr
)
8450 return in_lock_functions(addr
) ||
8451 (addr
>= (unsigned long)__sched_text_start
8452 && addr
< (unsigned long)__sched_text_end
);
8455 #ifdef CONFIG_CGROUP_SCHED
8457 * Default task group.
8458 * Every task in system belongs to this group at bootup.
8460 struct task_group root_task_group
;
8461 LIST_HEAD(task_groups
);
8463 /* Cacheline aligned slab cache for task_group */
8464 static struct kmem_cache
*task_group_cache __ro_after_init
;
8467 void __init
sched_init(void)
8469 unsigned long ptr
= 0;
8472 /* Make sure the linker didn't screw up */
8474 BUG_ON(!sched_class_above(&stop_sched_class
, &dl_sched_class
));
8476 BUG_ON(!sched_class_above(&dl_sched_class
, &rt_sched_class
));
8477 BUG_ON(!sched_class_above(&rt_sched_class
, &fair_sched_class
));
8478 BUG_ON(!sched_class_above(&fair_sched_class
, &idle_sched_class
));
8479 #ifdef CONFIG_SCHED_CLASS_EXT
8480 BUG_ON(!sched_class_above(&fair_sched_class
, &ext_sched_class
));
8481 BUG_ON(!sched_class_above(&ext_sched_class
, &idle_sched_class
));
8486 #ifdef CONFIG_FAIR_GROUP_SCHED
8487 ptr
+= 2 * nr_cpu_ids
* sizeof(void **);
8489 #ifdef CONFIG_RT_GROUP_SCHED
8490 ptr
+= 2 * nr_cpu_ids
* sizeof(void **);
8493 ptr
= (unsigned long)kzalloc(ptr
, GFP_NOWAIT
);
8495 #ifdef CONFIG_FAIR_GROUP_SCHED
8496 root_task_group
.se
= (struct sched_entity
**)ptr
;
8497 ptr
+= nr_cpu_ids
* sizeof(void **);
8499 root_task_group
.cfs_rq
= (struct cfs_rq
**)ptr
;
8500 ptr
+= nr_cpu_ids
* sizeof(void **);
8502 root_task_group
.shares
= ROOT_TASK_GROUP_LOAD
;
8503 init_cfs_bandwidth(&root_task_group
.cfs_bandwidth
, NULL
);
8504 #endif /* CONFIG_FAIR_GROUP_SCHED */
8505 #ifdef CONFIG_EXT_GROUP_SCHED
8506 root_task_group
.scx_weight
= CGROUP_WEIGHT_DFL
;
8507 #endif /* CONFIG_EXT_GROUP_SCHED */
8508 #ifdef CONFIG_RT_GROUP_SCHED
8509 root_task_group
.rt_se
= (struct sched_rt_entity
**)ptr
;
8510 ptr
+= nr_cpu_ids
* sizeof(void **);
8512 root_task_group
.rt_rq
= (struct rt_rq
**)ptr
;
8513 ptr
+= nr_cpu_ids
* sizeof(void **);
8515 #endif /* CONFIG_RT_GROUP_SCHED */
8519 init_defrootdomain();
8522 #ifdef CONFIG_RT_GROUP_SCHED
8523 init_rt_bandwidth(&root_task_group
.rt_bandwidth
,
8524 global_rt_period(), global_rt_runtime());
8525 #endif /* CONFIG_RT_GROUP_SCHED */
8527 #ifdef CONFIG_CGROUP_SCHED
8528 task_group_cache
= KMEM_CACHE(task_group
, 0);
8530 list_add(&root_task_group
.list
, &task_groups
);
8531 INIT_LIST_HEAD(&root_task_group
.children
);
8532 INIT_LIST_HEAD(&root_task_group
.siblings
);
8533 autogroup_init(&init_task
);
8534 #endif /* CONFIG_CGROUP_SCHED */
8536 for_each_possible_cpu(i
) {
8540 raw_spin_lock_init(&rq
->__lock
);
8542 rq
->calc_load_active
= 0;
8543 rq
->calc_load_update
= jiffies
+ LOAD_FREQ
;
8544 init_cfs_rq(&rq
->cfs
);
8545 init_rt_rq(&rq
->rt
);
8546 init_dl_rq(&rq
->dl
);
8547 #ifdef CONFIG_FAIR_GROUP_SCHED
8548 INIT_LIST_HEAD(&rq
->leaf_cfs_rq_list
);
8549 rq
->tmp_alone_branch
= &rq
->leaf_cfs_rq_list
;
8551 * How much CPU bandwidth does root_task_group get?
8553 * In case of task-groups formed through the cgroup filesystem, it
8554 * gets 100% of the CPU resources in the system. This overall
8555 * system CPU resource is divided among the tasks of
8556 * root_task_group and its child task-groups in a fair manner,
8557 * based on each entity's (task or task-group's) weight
8558 * (se->load.weight).
8560 * In other words, if root_task_group has 10 tasks of weight
8561 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8562 * then A0's share of the CPU resource is:
8564 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8566 * We achieve this by letting root_task_group's tasks sit
8567 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8569 init_tg_cfs_entry(&root_task_group
, &rq
->cfs
, NULL
, i
, NULL
);
8570 #endif /* CONFIG_FAIR_GROUP_SCHED */
8572 #ifdef CONFIG_RT_GROUP_SCHED
8574 * This is required for init cpu because rt.c:__enable_runtime()
8575 * starts working after scheduler_running, which is not the case
8578 rq
->rt
.rt_runtime
= global_rt_runtime();
8579 init_tg_rt_entry(&root_task_group
, &rq
->rt
, NULL
, i
, NULL
);
8584 rq
->cpu_capacity
= SCHED_CAPACITY_SCALE
;
8585 rq
->balance_callback
= &balance_push_callback
;
8586 rq
->active_balance
= 0;
8587 rq
->next_balance
= jiffies
;
8592 rq
->avg_idle
= 2*sysctl_sched_migration_cost
;
8593 rq
->max_idle_balance_cost
= sysctl_sched_migration_cost
;
8595 INIT_LIST_HEAD(&rq
->cfs_tasks
);
8597 rq_attach_root(rq
, &def_root_domain
);
8598 #ifdef CONFIG_NO_HZ_COMMON
8599 rq
->last_blocked_load_update_tick
= jiffies
;
8600 atomic_set(&rq
->nohz_flags
, 0);
8602 INIT_CSD(&rq
->nohz_csd
, nohz_csd_func
, rq
);
8604 #ifdef CONFIG_HOTPLUG_CPU
8605 rcuwait_init(&rq
->hotplug_wait
);
8607 #endif /* CONFIG_SMP */
8609 atomic_set(&rq
->nr_iowait
, 0);
8610 fair_server_init(rq
);
8612 #ifdef CONFIG_SCHED_CORE
8614 rq
->core_pick
= NULL
;
8615 rq
->core_dl_server
= NULL
;
8616 rq
->core_enabled
= 0;
8617 rq
->core_tree
= RB_ROOT
;
8618 rq
->core_forceidle_count
= 0;
8619 rq
->core_forceidle_occupation
= 0;
8620 rq
->core_forceidle_start
= 0;
8622 rq
->core_cookie
= 0UL;
8624 zalloc_cpumask_var_node(&rq
->scratch_mask
, GFP_KERNEL
, cpu_to_node(i
));
8627 set_load_weight(&init_task
, false);
8628 init_task
.se
.slice
= sysctl_sched_base_slice
,
8631 * The boot idle thread does lazy MMU switching as well:
8633 mmgrab_lazy_tlb(&init_mm
);
8634 enter_lazy_tlb(&init_mm
, current
);
8637 * The idle task doesn't need the kthread struct to function, but it
8638 * is dressed up as a per-CPU kthread and thus needs to play the part
8639 * if we want to avoid special-casing it in code that deals with per-CPU
8642 WARN_ON(!set_kthread_struct(current
));
8645 * Make us the idle thread. Technically, schedule() should not be
8646 * called from this thread, however somewhere below it might be,
8647 * but because we are the idle thread, we just pick up running again
8648 * when this runqueue becomes "idle".
8650 __sched_fork(0, current
);
8651 init_idle(current
, smp_processor_id());
8653 calc_load_update
= jiffies
+ LOAD_FREQ
;
8656 idle_thread_set_boot_cpu();
8657 balance_push_set(smp_processor_id(), false);
8659 init_sched_fair_class();
8660 init_sched_ext_class();
8666 preempt_dynamic_init();
8668 scheduler_running
= 1;
8671 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8673 void __might_sleep(const char *file
, int line
)
8675 unsigned int state
= get_current_state();
8677 * Blocking primitives will set (and therefore destroy) current->state,
8678 * since we will exit with TASK_RUNNING make sure we enter with it,
8679 * otherwise we will destroy state.
8681 WARN_ONCE(state
!= TASK_RUNNING
&& current
->task_state_change
,
8682 "do not call blocking ops when !TASK_RUNNING; "
8683 "state=%x set at [<%p>] %pS\n", state
,
8684 (void *)current
->task_state_change
,
8685 (void *)current
->task_state_change
);
8687 __might_resched(file
, line
, 0);
8689 EXPORT_SYMBOL(__might_sleep
);
8691 static void print_preempt_disable_ip(int preempt_offset
, unsigned long ip
)
8693 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT
))
8696 if (preempt_count() == preempt_offset
)
8699 pr_err("Preemption disabled at:");
8700 print_ip_sym(KERN_ERR
, ip
);
8703 static inline bool resched_offsets_ok(unsigned int offsets
)
8705 unsigned int nested
= preempt_count();
8707 nested
+= rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT
;
8709 return nested
== offsets
;
8712 void __might_resched(const char *file
, int line
, unsigned int offsets
)
8714 /* Ratelimiting timestamp: */
8715 static unsigned long prev_jiffy
;
8717 unsigned long preempt_disable_ip
;
8719 /* WARN_ON_ONCE() by default, no rate limit required: */
8722 if ((resched_offsets_ok(offsets
) && !irqs_disabled() &&
8723 !is_idle_task(current
) && !current
->non_block_count
) ||
8724 system_state
== SYSTEM_BOOTING
|| system_state
> SYSTEM_RUNNING
||
8728 if (time_before(jiffies
, prev_jiffy
+ HZ
) && prev_jiffy
)
8730 prev_jiffy
= jiffies
;
8732 /* Save this before calling printk(), since that will clobber it: */
8733 preempt_disable_ip
= get_preempt_disable_ip(current
);
8735 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8737 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8738 in_atomic(), irqs_disabled(), current
->non_block_count
,
8739 current
->pid
, current
->comm
);
8740 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8741 offsets
& MIGHT_RESCHED_PREEMPT_MASK
);
8743 if (IS_ENABLED(CONFIG_PREEMPT_RCU
)) {
8744 pr_err("RCU nest depth: %d, expected: %u\n",
8745 rcu_preempt_depth(), offsets
>> MIGHT_RESCHED_RCU_SHIFT
);
8748 if (task_stack_end_corrupted(current
))
8749 pr_emerg("Thread overran stack, or stack corrupted\n");
8751 debug_show_held_locks(current
);
8752 if (irqs_disabled())
8753 print_irqtrace_events(current
);
8755 print_preempt_disable_ip(offsets
& MIGHT_RESCHED_PREEMPT_MASK
,
8756 preempt_disable_ip
);
8759 add_taint(TAINT_WARN
, LOCKDEP_STILL_OK
);
8761 EXPORT_SYMBOL(__might_resched
);
8763 void __cant_sleep(const char *file
, int line
, int preempt_offset
)
8765 static unsigned long prev_jiffy
;
8767 if (irqs_disabled())
8770 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT
))
8773 if (preempt_count() > preempt_offset
)
8776 if (time_before(jiffies
, prev_jiffy
+ HZ
) && prev_jiffy
)
8778 prev_jiffy
= jiffies
;
8780 printk(KERN_ERR
"BUG: assuming atomic context at %s:%d\n", file
, line
);
8781 printk(KERN_ERR
"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8782 in_atomic(), irqs_disabled(),
8783 current
->pid
, current
->comm
);
8785 debug_show_held_locks(current
);
8787 add_taint(TAINT_WARN
, LOCKDEP_STILL_OK
);
8789 EXPORT_SYMBOL_GPL(__cant_sleep
);
8792 void __cant_migrate(const char *file
, int line
)
8794 static unsigned long prev_jiffy
;
8796 if (irqs_disabled())
8799 if (is_migration_disabled(current
))
8802 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT
))
8805 if (preempt_count() > 0)
8808 if (time_before(jiffies
, prev_jiffy
+ HZ
) && prev_jiffy
)
8810 prev_jiffy
= jiffies
;
8812 pr_err("BUG: assuming non migratable context at %s:%d\n", file
, line
);
8813 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8814 in_atomic(), irqs_disabled(), is_migration_disabled(current
),
8815 current
->pid
, current
->comm
);
8817 debug_show_held_locks(current
);
8819 add_taint(TAINT_WARN
, LOCKDEP_STILL_OK
);
8821 EXPORT_SYMBOL_GPL(__cant_migrate
);
8825 #ifdef CONFIG_MAGIC_SYSRQ
8826 void normalize_rt_tasks(void)
8828 struct task_struct
*g
, *p
;
8829 struct sched_attr attr
= {
8830 .sched_policy
= SCHED_NORMAL
,
8833 read_lock(&tasklist_lock
);
8834 for_each_process_thread(g
, p
) {
8836 * Only normalize user tasks:
8838 if (p
->flags
& PF_KTHREAD
)
8841 p
->se
.exec_start
= 0;
8842 schedstat_set(p
->stats
.wait_start
, 0);
8843 schedstat_set(p
->stats
.sleep_start
, 0);
8844 schedstat_set(p
->stats
.block_start
, 0);
8846 if (!rt_or_dl_task(p
)) {
8848 * Renice negative nice level userspace
8851 if (task_nice(p
) < 0)
8852 set_user_nice(p
, 0);
8856 __sched_setscheduler(p
, &attr
, false, false);
8858 read_unlock(&tasklist_lock
);
8861 #endif /* CONFIG_MAGIC_SYSRQ */
8863 #if defined(CONFIG_KGDB_KDB)
8865 * These functions are only useful for KDB.
8867 * They can only be called when the whole system has been
8868 * stopped - every CPU needs to be quiescent, and no scheduling
8869 * activity can take place. Using them for anything else would
8870 * be a serious bug, and as a result, they aren't even visible
8871 * under any other configuration.
8875 * curr_task - return the current task for a given CPU.
8876 * @cpu: the processor in question.
8878 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8880 * Return: The current task for @cpu.
8882 struct task_struct
*curr_task(int cpu
)
8884 return cpu_curr(cpu
);
8887 #endif /* defined(CONFIG_KGDB_KDB) */
8889 #ifdef CONFIG_CGROUP_SCHED
8890 /* task_group_lock serializes the addition/removal of task groups */
8891 static DEFINE_SPINLOCK(task_group_lock
);
8893 static inline void alloc_uclamp_sched_group(struct task_group
*tg
,
8894 struct task_group
*parent
)
8896 #ifdef CONFIG_UCLAMP_TASK_GROUP
8897 enum uclamp_id clamp_id
;
8899 for_each_clamp_id(clamp_id
) {
8900 uclamp_se_set(&tg
->uclamp_req
[clamp_id
],
8901 uclamp_none(clamp_id
), false);
8902 tg
->uclamp
[clamp_id
] = parent
->uclamp
[clamp_id
];
8907 static void sched_free_group(struct task_group
*tg
)
8909 free_fair_sched_group(tg
);
8910 free_rt_sched_group(tg
);
8912 kmem_cache_free(task_group_cache
, tg
);
8915 static void sched_free_group_rcu(struct rcu_head
*rcu
)
8917 sched_free_group(container_of(rcu
, struct task_group
, rcu
));
8920 static void sched_unregister_group(struct task_group
*tg
)
8922 unregister_fair_sched_group(tg
);
8923 unregister_rt_sched_group(tg
);
8925 * We have to wait for yet another RCU grace period to expire, as
8926 * print_cfs_stats() might run concurrently.
8928 call_rcu(&tg
->rcu
, sched_free_group_rcu
);
8931 /* allocate runqueue etc for a new task group */
8932 struct task_group
*sched_create_group(struct task_group
*parent
)
8934 struct task_group
*tg
;
8936 tg
= kmem_cache_alloc(task_group_cache
, GFP_KERNEL
| __GFP_ZERO
);
8938 return ERR_PTR(-ENOMEM
);
8940 if (!alloc_fair_sched_group(tg
, parent
))
8943 if (!alloc_rt_sched_group(tg
, parent
))
8946 scx_group_set_weight(tg
, CGROUP_WEIGHT_DFL
);
8947 alloc_uclamp_sched_group(tg
, parent
);
8952 sched_free_group(tg
);
8953 return ERR_PTR(-ENOMEM
);
8956 void sched_online_group(struct task_group
*tg
, struct task_group
*parent
)
8958 unsigned long flags
;
8960 spin_lock_irqsave(&task_group_lock
, flags
);
8961 list_add_rcu(&tg
->list
, &task_groups
);
8963 /* Root should already exist: */
8966 tg
->parent
= parent
;
8967 INIT_LIST_HEAD(&tg
->children
);
8968 list_add_rcu(&tg
->siblings
, &parent
->children
);
8969 spin_unlock_irqrestore(&task_group_lock
, flags
);
8971 online_fair_sched_group(tg
);
8974 /* RCU callback to free various structures associated with a task group */
8975 static void sched_unregister_group_rcu(struct rcu_head
*rhp
)
8977 /* Now it should be safe to free those cfs_rqs: */
8978 sched_unregister_group(container_of(rhp
, struct task_group
, rcu
));
8981 void sched_destroy_group(struct task_group
*tg
)
8983 /* Wait for possible concurrent references to cfs_rqs complete: */
8984 call_rcu(&tg
->rcu
, sched_unregister_group_rcu
);
8987 void sched_release_group(struct task_group
*tg
)
8989 unsigned long flags
;
8992 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
8993 * sched_cfs_period_timer()).
8995 * For this to be effective, we have to wait for all pending users of
8996 * this task group to leave their RCU critical section to ensure no new
8997 * user will see our dying task group any more. Specifically ensure
8998 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9000 * We therefore defer calling unregister_fair_sched_group() to
9001 * sched_unregister_group() which is guarantied to get called only after the
9002 * current RCU grace period has expired.
9004 spin_lock_irqsave(&task_group_lock
, flags
);
9005 list_del_rcu(&tg
->list
);
9006 list_del_rcu(&tg
->siblings
);
9007 spin_unlock_irqrestore(&task_group_lock
, flags
);
9010 static struct task_group
*sched_get_task_group(struct task_struct
*tsk
)
9012 struct task_group
*tg
;
9015 * All callers are synchronized by task_rq_lock(); we do not use RCU
9016 * which is pointless here. Thus, we pass "true" to task_css_check()
9017 * to prevent lockdep warnings.
9019 tg
= container_of(task_css_check(tsk
, cpu_cgrp_id
, true),
9020 struct task_group
, css
);
9021 tg
= autogroup_task_group(tsk
, tg
);
9026 static void sched_change_group(struct task_struct
*tsk
, struct task_group
*group
)
9028 tsk
->sched_task_group
= group
;
9030 #ifdef CONFIG_FAIR_GROUP_SCHED
9031 if (tsk
->sched_class
->task_change_group
)
9032 tsk
->sched_class
->task_change_group(tsk
);
9035 set_task_rq(tsk
, task_cpu(tsk
));
9039 * Change task's runqueue when it moves between groups.
9041 * The caller of this function should have put the task in its new group by
9042 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9045 void sched_move_task(struct task_struct
*tsk
)
9047 int queued
, running
, queue_flags
=
9048 DEQUEUE_SAVE
| DEQUEUE_MOVE
| DEQUEUE_NOCLOCK
;
9049 struct task_group
*group
;
9052 CLASS(task_rq_lock
, rq_guard
)(tsk
);
9056 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
9059 group
= sched_get_task_group(tsk
);
9060 if (group
== tsk
->sched_task_group
)
9063 update_rq_clock(rq
);
9065 running
= task_current_donor(rq
, tsk
);
9066 queued
= task_on_rq_queued(tsk
);
9069 dequeue_task(rq
, tsk
, queue_flags
);
9071 put_prev_task(rq
, tsk
);
9073 sched_change_group(tsk
, group
);
9077 enqueue_task(rq
, tsk
, queue_flags
);
9079 set_next_task(rq
, tsk
);
9081 * After changing group, the running task may have joined a
9082 * throttled one but it's still the running task. Trigger a
9083 * resched to make sure that task can still run.
9089 static struct cgroup_subsys_state
*
9090 cpu_cgroup_css_alloc(struct cgroup_subsys_state
*parent_css
)
9092 struct task_group
*parent
= css_tg(parent_css
);
9093 struct task_group
*tg
;
9096 /* This is early initialization for the top cgroup */
9097 return &root_task_group
.css
;
9100 tg
= sched_create_group(parent
);
9102 return ERR_PTR(-ENOMEM
);
9107 /* Expose task group only after completing cgroup initialization */
9108 static int cpu_cgroup_css_online(struct cgroup_subsys_state
*css
)
9110 struct task_group
*tg
= css_tg(css
);
9111 struct task_group
*parent
= css_tg(css
->parent
);
9114 ret
= scx_tg_online(tg
);
9119 sched_online_group(tg
, parent
);
9121 #ifdef CONFIG_UCLAMP_TASK_GROUP
9122 /* Propagate the effective uclamp value for the new group */
9123 guard(mutex
)(&uclamp_mutex
);
9125 cpu_util_update_eff(css
);
9131 static void cpu_cgroup_css_offline(struct cgroup_subsys_state
*css
)
9133 struct task_group
*tg
= css_tg(css
);
9138 static void cpu_cgroup_css_released(struct cgroup_subsys_state
*css
)
9140 struct task_group
*tg
= css_tg(css
);
9142 sched_release_group(tg
);
9145 static void cpu_cgroup_css_free(struct cgroup_subsys_state
*css
)
9147 struct task_group
*tg
= css_tg(css
);
9150 * Relies on the RCU grace period between css_released() and this.
9152 sched_unregister_group(tg
);
9155 static int cpu_cgroup_can_attach(struct cgroup_taskset
*tset
)
9157 #ifdef CONFIG_RT_GROUP_SCHED
9158 struct task_struct
*task
;
9159 struct cgroup_subsys_state
*css
;
9161 cgroup_taskset_for_each(task
, css
, tset
) {
9162 if (!sched_rt_can_attach(css_tg(css
), task
))
9166 return scx_cgroup_can_attach(tset
);
9169 static void cpu_cgroup_attach(struct cgroup_taskset
*tset
)
9171 struct task_struct
*task
;
9172 struct cgroup_subsys_state
*css
;
9174 cgroup_taskset_for_each(task
, css
, tset
)
9175 sched_move_task(task
);
9177 scx_cgroup_finish_attach();
9180 static void cpu_cgroup_cancel_attach(struct cgroup_taskset
*tset
)
9182 scx_cgroup_cancel_attach(tset
);
9185 #ifdef CONFIG_UCLAMP_TASK_GROUP
9186 static void cpu_util_update_eff(struct cgroup_subsys_state
*css
)
9188 struct cgroup_subsys_state
*top_css
= css
;
9189 struct uclamp_se
*uc_parent
= NULL
;
9190 struct uclamp_se
*uc_se
= NULL
;
9191 unsigned int eff
[UCLAMP_CNT
];
9192 enum uclamp_id clamp_id
;
9193 unsigned int clamps
;
9195 lockdep_assert_held(&uclamp_mutex
);
9196 SCHED_WARN_ON(!rcu_read_lock_held());
9198 css_for_each_descendant_pre(css
, top_css
) {
9199 uc_parent
= css_tg(css
)->parent
9200 ? css_tg(css
)->parent
->uclamp
: NULL
;
9202 for_each_clamp_id(clamp_id
) {
9203 /* Assume effective clamps matches requested clamps */
9204 eff
[clamp_id
] = css_tg(css
)->uclamp_req
[clamp_id
].value
;
9205 /* Cap effective clamps with parent's effective clamps */
9207 eff
[clamp_id
] > uc_parent
[clamp_id
].value
) {
9208 eff
[clamp_id
] = uc_parent
[clamp_id
].value
;
9211 /* Ensure protection is always capped by limit */
9212 eff
[UCLAMP_MIN
] = min(eff
[UCLAMP_MIN
], eff
[UCLAMP_MAX
]);
9214 /* Propagate most restrictive effective clamps */
9216 uc_se
= css_tg(css
)->uclamp
;
9217 for_each_clamp_id(clamp_id
) {
9218 if (eff
[clamp_id
] == uc_se
[clamp_id
].value
)
9220 uc_se
[clamp_id
].value
= eff
[clamp_id
];
9221 uc_se
[clamp_id
].bucket_id
= uclamp_bucket_id(eff
[clamp_id
]);
9222 clamps
|= (0x1 << clamp_id
);
9225 css
= css_rightmost_descendant(css
);
9229 /* Immediately update descendants RUNNABLE tasks */
9230 uclamp_update_active_tasks(css
);
9235 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9236 * C expression. Since there is no way to convert a macro argument (N) into a
9237 * character constant, use two levels of macros.
9239 #define _POW10(exp) ((unsigned int)1e##exp)
9240 #define POW10(exp) _POW10(exp)
9242 struct uclamp_request
{
9243 #define UCLAMP_PERCENT_SHIFT 2
9244 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9250 static inline struct uclamp_request
9251 capacity_from_percent(char *buf
)
9253 struct uclamp_request req
= {
9254 .percent
= UCLAMP_PERCENT_SCALE
,
9255 .util
= SCHED_CAPACITY_SCALE
,
9260 if (strcmp(buf
, "max")) {
9261 req
.ret
= cgroup_parse_float(buf
, UCLAMP_PERCENT_SHIFT
,
9265 if ((u64
)req
.percent
> UCLAMP_PERCENT_SCALE
) {
9270 req
.util
= req
.percent
<< SCHED_CAPACITY_SHIFT
;
9271 req
.util
= DIV_ROUND_CLOSEST_ULL(req
.util
, UCLAMP_PERCENT_SCALE
);
9277 static ssize_t
cpu_uclamp_write(struct kernfs_open_file
*of
, char *buf
,
9278 size_t nbytes
, loff_t off
,
9279 enum uclamp_id clamp_id
)
9281 struct uclamp_request req
;
9282 struct task_group
*tg
;
9284 req
= capacity_from_percent(buf
);
9288 static_branch_enable(&sched_uclamp_used
);
9290 guard(mutex
)(&uclamp_mutex
);
9293 tg
= css_tg(of_css(of
));
9294 if (tg
->uclamp_req
[clamp_id
].value
!= req
.util
)
9295 uclamp_se_set(&tg
->uclamp_req
[clamp_id
], req
.util
, false);
9298 * Because of not recoverable conversion rounding we keep track of the
9299 * exact requested value
9301 tg
->uclamp_pct
[clamp_id
] = req
.percent
;
9303 /* Update effective clamps to track the most restrictive value */
9304 cpu_util_update_eff(of_css(of
));
9309 static ssize_t
cpu_uclamp_min_write(struct kernfs_open_file
*of
,
9310 char *buf
, size_t nbytes
,
9313 return cpu_uclamp_write(of
, buf
, nbytes
, off
, UCLAMP_MIN
);
9316 static ssize_t
cpu_uclamp_max_write(struct kernfs_open_file
*of
,
9317 char *buf
, size_t nbytes
,
9320 return cpu_uclamp_write(of
, buf
, nbytes
, off
, UCLAMP_MAX
);
9323 static inline void cpu_uclamp_print(struct seq_file
*sf
,
9324 enum uclamp_id clamp_id
)
9326 struct task_group
*tg
;
9331 scoped_guard (rcu
) {
9332 tg
= css_tg(seq_css(sf
));
9333 util_clamp
= tg
->uclamp_req
[clamp_id
].value
;
9336 if (util_clamp
== SCHED_CAPACITY_SCALE
) {
9337 seq_puts(sf
, "max\n");
9341 percent
= tg
->uclamp_pct
[clamp_id
];
9342 percent
= div_u64_rem(percent
, POW10(UCLAMP_PERCENT_SHIFT
), &rem
);
9343 seq_printf(sf
, "%llu.%0*u\n", percent
, UCLAMP_PERCENT_SHIFT
, rem
);
9346 static int cpu_uclamp_min_show(struct seq_file
*sf
, void *v
)
9348 cpu_uclamp_print(sf
, UCLAMP_MIN
);
9352 static int cpu_uclamp_max_show(struct seq_file
*sf
, void *v
)
9354 cpu_uclamp_print(sf
, UCLAMP_MAX
);
9357 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9359 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9360 static unsigned long tg_weight(struct task_group
*tg
)
9362 #ifdef CONFIG_FAIR_GROUP_SCHED
9363 return scale_load_down(tg
->shares
);
9365 return sched_weight_from_cgroup(tg
->scx_weight
);
9369 static int cpu_shares_write_u64(struct cgroup_subsys_state
*css
,
9370 struct cftype
*cftype
, u64 shareval
)
9374 if (shareval
> scale_load_down(ULONG_MAX
))
9375 shareval
= MAX_SHARES
;
9376 ret
= sched_group_set_shares(css_tg(css
), scale_load(shareval
));
9378 scx_group_set_weight(css_tg(css
),
9379 sched_weight_to_cgroup(shareval
));
9383 static u64
cpu_shares_read_u64(struct cgroup_subsys_state
*css
,
9386 return tg_weight(css_tg(css
));
9388 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9390 #ifdef CONFIG_CFS_BANDWIDTH
9391 static DEFINE_MUTEX(cfs_constraints_mutex
);
9393 const u64 max_cfs_quota_period
= 1 * NSEC_PER_SEC
; /* 1s */
9394 static const u64 min_cfs_quota_period
= 1 * NSEC_PER_MSEC
; /* 1ms */
9395 /* More than 203 days if BW_SHIFT equals 20. */
9396 static const u64 max_cfs_runtime
= MAX_BW
* NSEC_PER_USEC
;
9398 static int __cfs_schedulable(struct task_group
*tg
, u64 period
, u64 runtime
);
9400 static int tg_set_cfs_bandwidth(struct task_group
*tg
, u64 period
, u64 quota
,
9403 int i
, ret
= 0, runtime_enabled
, runtime_was_enabled
;
9404 struct cfs_bandwidth
*cfs_b
= &tg
->cfs_bandwidth
;
9406 if (tg
== &root_task_group
)
9410 * Ensure we have at some amount of bandwidth every period. This is
9411 * to prevent reaching a state of large arrears when throttled via
9412 * entity_tick() resulting in prolonged exit starvation.
9414 if (quota
< min_cfs_quota_period
|| period
< min_cfs_quota_period
)
9418 * Likewise, bound things on the other side by preventing insane quota
9419 * periods. This also allows us to normalize in computing quota
9422 if (period
> max_cfs_quota_period
)
9426 * Bound quota to defend quota against overflow during bandwidth shift.
9428 if (quota
!= RUNTIME_INF
&& quota
> max_cfs_runtime
)
9431 if (quota
!= RUNTIME_INF
&& (burst
> quota
||
9432 burst
+ quota
> max_cfs_runtime
))
9436 * Prevent race between setting of cfs_rq->runtime_enabled and
9437 * unthrottle_offline_cfs_rqs().
9439 guard(cpus_read_lock
)();
9440 guard(mutex
)(&cfs_constraints_mutex
);
9442 ret
= __cfs_schedulable(tg
, period
, quota
);
9446 runtime_enabled
= quota
!= RUNTIME_INF
;
9447 runtime_was_enabled
= cfs_b
->quota
!= RUNTIME_INF
;
9449 * If we need to toggle cfs_bandwidth_used, off->on must occur
9450 * before making related changes, and on->off must occur afterwards
9452 if (runtime_enabled
&& !runtime_was_enabled
)
9453 cfs_bandwidth_usage_inc();
9455 scoped_guard (raw_spinlock_irq
, &cfs_b
->lock
) {
9456 cfs_b
->period
= ns_to_ktime(period
);
9457 cfs_b
->quota
= quota
;
9458 cfs_b
->burst
= burst
;
9460 __refill_cfs_bandwidth_runtime(cfs_b
);
9463 * Restart the period timer (if active) to handle new
9466 if (runtime_enabled
)
9467 start_cfs_bandwidth(cfs_b
);
9470 for_each_online_cpu(i
) {
9471 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[i
];
9472 struct rq
*rq
= cfs_rq
->rq
;
9474 guard(rq_lock_irq
)(rq
);
9475 cfs_rq
->runtime_enabled
= runtime_enabled
;
9476 cfs_rq
->runtime_remaining
= 0;
9478 if (cfs_rq
->throttled
)
9479 unthrottle_cfs_rq(cfs_rq
);
9482 if (runtime_was_enabled
&& !runtime_enabled
)
9483 cfs_bandwidth_usage_dec();
9488 static int tg_set_cfs_quota(struct task_group
*tg
, long cfs_quota_us
)
9490 u64 quota
, period
, burst
;
9492 period
= ktime_to_ns(tg
->cfs_bandwidth
.period
);
9493 burst
= tg
->cfs_bandwidth
.burst
;
9494 if (cfs_quota_us
< 0)
9495 quota
= RUNTIME_INF
;
9496 else if ((u64
)cfs_quota_us
<= U64_MAX
/ NSEC_PER_USEC
)
9497 quota
= (u64
)cfs_quota_us
* NSEC_PER_USEC
;
9501 return tg_set_cfs_bandwidth(tg
, period
, quota
, burst
);
9504 static long tg_get_cfs_quota(struct task_group
*tg
)
9508 if (tg
->cfs_bandwidth
.quota
== RUNTIME_INF
)
9511 quota_us
= tg
->cfs_bandwidth
.quota
;
9512 do_div(quota_us
, NSEC_PER_USEC
);
9517 static int tg_set_cfs_period(struct task_group
*tg
, long cfs_period_us
)
9519 u64 quota
, period
, burst
;
9521 if ((u64
)cfs_period_us
> U64_MAX
/ NSEC_PER_USEC
)
9524 period
= (u64
)cfs_period_us
* NSEC_PER_USEC
;
9525 quota
= tg
->cfs_bandwidth
.quota
;
9526 burst
= tg
->cfs_bandwidth
.burst
;
9528 return tg_set_cfs_bandwidth(tg
, period
, quota
, burst
);
9531 static long tg_get_cfs_period(struct task_group
*tg
)
9535 cfs_period_us
= ktime_to_ns(tg
->cfs_bandwidth
.period
);
9536 do_div(cfs_period_us
, NSEC_PER_USEC
);
9538 return cfs_period_us
;
9541 static int tg_set_cfs_burst(struct task_group
*tg
, long cfs_burst_us
)
9543 u64 quota
, period
, burst
;
9545 if ((u64
)cfs_burst_us
> U64_MAX
/ NSEC_PER_USEC
)
9548 burst
= (u64
)cfs_burst_us
* NSEC_PER_USEC
;
9549 period
= ktime_to_ns(tg
->cfs_bandwidth
.period
);
9550 quota
= tg
->cfs_bandwidth
.quota
;
9552 return tg_set_cfs_bandwidth(tg
, period
, quota
, burst
);
9555 static long tg_get_cfs_burst(struct task_group
*tg
)
9559 burst_us
= tg
->cfs_bandwidth
.burst
;
9560 do_div(burst_us
, NSEC_PER_USEC
);
9565 static s64
cpu_cfs_quota_read_s64(struct cgroup_subsys_state
*css
,
9568 return tg_get_cfs_quota(css_tg(css
));
9571 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state
*css
,
9572 struct cftype
*cftype
, s64 cfs_quota_us
)
9574 return tg_set_cfs_quota(css_tg(css
), cfs_quota_us
);
9577 static u64
cpu_cfs_period_read_u64(struct cgroup_subsys_state
*css
,
9580 return tg_get_cfs_period(css_tg(css
));
9583 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state
*css
,
9584 struct cftype
*cftype
, u64 cfs_period_us
)
9586 return tg_set_cfs_period(css_tg(css
), cfs_period_us
);
9589 static u64
cpu_cfs_burst_read_u64(struct cgroup_subsys_state
*css
,
9592 return tg_get_cfs_burst(css_tg(css
));
9595 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state
*css
,
9596 struct cftype
*cftype
, u64 cfs_burst_us
)
9598 return tg_set_cfs_burst(css_tg(css
), cfs_burst_us
);
9601 struct cfs_schedulable_data
{
9602 struct task_group
*tg
;
9607 * normalize group quota/period to be quota/max_period
9608 * note: units are usecs
9610 static u64
normalize_cfs_quota(struct task_group
*tg
,
9611 struct cfs_schedulable_data
*d
)
9619 period
= tg_get_cfs_period(tg
);
9620 quota
= tg_get_cfs_quota(tg
);
9623 /* note: these should typically be equivalent */
9624 if (quota
== RUNTIME_INF
|| quota
== -1)
9627 return to_ratio(period
, quota
);
9630 static int tg_cfs_schedulable_down(struct task_group
*tg
, void *data
)
9632 struct cfs_schedulable_data
*d
= data
;
9633 struct cfs_bandwidth
*cfs_b
= &tg
->cfs_bandwidth
;
9634 s64 quota
= 0, parent_quota
= -1;
9637 quota
= RUNTIME_INF
;
9639 struct cfs_bandwidth
*parent_b
= &tg
->parent
->cfs_bandwidth
;
9641 quota
= normalize_cfs_quota(tg
, d
);
9642 parent_quota
= parent_b
->hierarchical_quota
;
9645 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9646 * always take the non-RUNTIME_INF min. On cgroup1, only
9647 * inherit when no limit is set. In both cases this is used
9648 * by the scheduler to determine if a given CFS task has a
9649 * bandwidth constraint at some higher level.
9651 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys
)) {
9652 if (quota
== RUNTIME_INF
)
9653 quota
= parent_quota
;
9654 else if (parent_quota
!= RUNTIME_INF
)
9655 quota
= min(quota
, parent_quota
);
9657 if (quota
== RUNTIME_INF
)
9658 quota
= parent_quota
;
9659 else if (parent_quota
!= RUNTIME_INF
&& quota
> parent_quota
)
9663 cfs_b
->hierarchical_quota
= quota
;
9668 static int __cfs_schedulable(struct task_group
*tg
, u64 period
, u64 quota
)
9670 struct cfs_schedulable_data data
= {
9676 if (quota
!= RUNTIME_INF
) {
9677 do_div(data
.period
, NSEC_PER_USEC
);
9678 do_div(data
.quota
, NSEC_PER_USEC
);
9682 return walk_tg_tree(tg_cfs_schedulable_down
, tg_nop
, &data
);
9685 static int cpu_cfs_stat_show(struct seq_file
*sf
, void *v
)
9687 struct task_group
*tg
= css_tg(seq_css(sf
));
9688 struct cfs_bandwidth
*cfs_b
= &tg
->cfs_bandwidth
;
9690 seq_printf(sf
, "nr_periods %d\n", cfs_b
->nr_periods
);
9691 seq_printf(sf
, "nr_throttled %d\n", cfs_b
->nr_throttled
);
9692 seq_printf(sf
, "throttled_time %llu\n", cfs_b
->throttled_time
);
9694 if (schedstat_enabled() && tg
!= &root_task_group
) {
9695 struct sched_statistics
*stats
;
9699 for_each_possible_cpu(i
) {
9700 stats
= __schedstats_from_se(tg
->se
[i
]);
9701 ws
+= schedstat_val(stats
->wait_sum
);
9704 seq_printf(sf
, "wait_sum %llu\n", ws
);
9707 seq_printf(sf
, "nr_bursts %d\n", cfs_b
->nr_burst
);
9708 seq_printf(sf
, "burst_time %llu\n", cfs_b
->burst_time
);
9713 static u64
throttled_time_self(struct task_group
*tg
)
9718 for_each_possible_cpu(i
) {
9719 total
+= READ_ONCE(tg
->cfs_rq
[i
]->throttled_clock_self_time
);
9725 static int cpu_cfs_local_stat_show(struct seq_file
*sf
, void *v
)
9727 struct task_group
*tg
= css_tg(seq_css(sf
));
9729 seq_printf(sf
, "throttled_time %llu\n", throttled_time_self(tg
));
9733 #endif /* CONFIG_CFS_BANDWIDTH */
9735 #ifdef CONFIG_RT_GROUP_SCHED
9736 static int cpu_rt_runtime_write(struct cgroup_subsys_state
*css
,
9737 struct cftype
*cft
, s64 val
)
9739 return sched_group_set_rt_runtime(css_tg(css
), val
);
9742 static s64
cpu_rt_runtime_read(struct cgroup_subsys_state
*css
,
9745 return sched_group_rt_runtime(css_tg(css
));
9748 static int cpu_rt_period_write_uint(struct cgroup_subsys_state
*css
,
9749 struct cftype
*cftype
, u64 rt_period_us
)
9751 return sched_group_set_rt_period(css_tg(css
), rt_period_us
);
9754 static u64
cpu_rt_period_read_uint(struct cgroup_subsys_state
*css
,
9757 return sched_group_rt_period(css_tg(css
));
9759 #endif /* CONFIG_RT_GROUP_SCHED */
9761 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9762 static s64
cpu_idle_read_s64(struct cgroup_subsys_state
*css
,
9765 return css_tg(css
)->idle
;
9768 static int cpu_idle_write_s64(struct cgroup_subsys_state
*css
,
9769 struct cftype
*cft
, s64 idle
)
9773 ret
= sched_group_set_idle(css_tg(css
), idle
);
9775 scx_group_set_idle(css_tg(css
), idle
);
9780 static struct cftype cpu_legacy_files
[] = {
9781 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9784 .read_u64
= cpu_shares_read_u64
,
9785 .write_u64
= cpu_shares_write_u64
,
9789 .read_s64
= cpu_idle_read_s64
,
9790 .write_s64
= cpu_idle_write_s64
,
9793 #ifdef CONFIG_CFS_BANDWIDTH
9795 .name
= "cfs_quota_us",
9796 .read_s64
= cpu_cfs_quota_read_s64
,
9797 .write_s64
= cpu_cfs_quota_write_s64
,
9800 .name
= "cfs_period_us",
9801 .read_u64
= cpu_cfs_period_read_u64
,
9802 .write_u64
= cpu_cfs_period_write_u64
,
9805 .name
= "cfs_burst_us",
9806 .read_u64
= cpu_cfs_burst_read_u64
,
9807 .write_u64
= cpu_cfs_burst_write_u64
,
9811 .seq_show
= cpu_cfs_stat_show
,
9814 .name
= "stat.local",
9815 .seq_show
= cpu_cfs_local_stat_show
,
9818 #ifdef CONFIG_RT_GROUP_SCHED
9820 .name
= "rt_runtime_us",
9821 .read_s64
= cpu_rt_runtime_read
,
9822 .write_s64
= cpu_rt_runtime_write
,
9825 .name
= "rt_period_us",
9826 .read_u64
= cpu_rt_period_read_uint
,
9827 .write_u64
= cpu_rt_period_write_uint
,
9830 #ifdef CONFIG_UCLAMP_TASK_GROUP
9832 .name
= "uclamp.min",
9833 .flags
= CFTYPE_NOT_ON_ROOT
,
9834 .seq_show
= cpu_uclamp_min_show
,
9835 .write
= cpu_uclamp_min_write
,
9838 .name
= "uclamp.max",
9839 .flags
= CFTYPE_NOT_ON_ROOT
,
9840 .seq_show
= cpu_uclamp_max_show
,
9841 .write
= cpu_uclamp_max_write
,
9847 static int cpu_extra_stat_show(struct seq_file
*sf
,
9848 struct cgroup_subsys_state
*css
)
9850 #ifdef CONFIG_CFS_BANDWIDTH
9852 struct task_group
*tg
= css_tg(css
);
9853 struct cfs_bandwidth
*cfs_b
= &tg
->cfs_bandwidth
;
9854 u64 throttled_usec
, burst_usec
;
9856 throttled_usec
= cfs_b
->throttled_time
;
9857 do_div(throttled_usec
, NSEC_PER_USEC
);
9858 burst_usec
= cfs_b
->burst_time
;
9859 do_div(burst_usec
, NSEC_PER_USEC
);
9861 seq_printf(sf
, "nr_periods %d\n"
9863 "throttled_usec %llu\n"
9865 "burst_usec %llu\n",
9866 cfs_b
->nr_periods
, cfs_b
->nr_throttled
,
9867 throttled_usec
, cfs_b
->nr_burst
, burst_usec
);
9873 static int cpu_local_stat_show(struct seq_file
*sf
,
9874 struct cgroup_subsys_state
*css
)
9876 #ifdef CONFIG_CFS_BANDWIDTH
9878 struct task_group
*tg
= css_tg(css
);
9879 u64 throttled_self_usec
;
9881 throttled_self_usec
= throttled_time_self(tg
);
9882 do_div(throttled_self_usec
, NSEC_PER_USEC
);
9884 seq_printf(sf
, "throttled_usec %llu\n",
9885 throttled_self_usec
);
9891 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9893 static u64
cpu_weight_read_u64(struct cgroup_subsys_state
*css
,
9896 return sched_weight_to_cgroup(tg_weight(css_tg(css
)));
9899 static int cpu_weight_write_u64(struct cgroup_subsys_state
*css
,
9900 struct cftype
*cft
, u64 cgrp_weight
)
9902 unsigned long weight
;
9905 if (cgrp_weight
< CGROUP_WEIGHT_MIN
|| cgrp_weight
> CGROUP_WEIGHT_MAX
)
9908 weight
= sched_weight_from_cgroup(cgrp_weight
);
9910 ret
= sched_group_set_shares(css_tg(css
), scale_load(weight
));
9912 scx_group_set_weight(css_tg(css
), cgrp_weight
);
9916 static s64
cpu_weight_nice_read_s64(struct cgroup_subsys_state
*css
,
9919 unsigned long weight
= tg_weight(css_tg(css
));
9920 int last_delta
= INT_MAX
;
9923 /* find the closest nice value to the current weight */
9924 for (prio
= 0; prio
< ARRAY_SIZE(sched_prio_to_weight
); prio
++) {
9925 delta
= abs(sched_prio_to_weight
[prio
] - weight
);
9926 if (delta
>= last_delta
)
9931 return PRIO_TO_NICE(prio
- 1 + MAX_RT_PRIO
);
9934 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state
*css
,
9935 struct cftype
*cft
, s64 nice
)
9937 unsigned long weight
;
9940 if (nice
< MIN_NICE
|| nice
> MAX_NICE
)
9943 idx
= NICE_TO_PRIO(nice
) - MAX_RT_PRIO
;
9944 idx
= array_index_nospec(idx
, 40);
9945 weight
= sched_prio_to_weight
[idx
];
9947 ret
= sched_group_set_shares(css_tg(css
), scale_load(weight
));
9949 scx_group_set_weight(css_tg(css
),
9950 sched_weight_to_cgroup(weight
));
9953 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9955 static void __maybe_unused
cpu_period_quota_print(struct seq_file
*sf
,
9956 long period
, long quota
)
9959 seq_puts(sf
, "max");
9961 seq_printf(sf
, "%ld", quota
);
9963 seq_printf(sf
, " %ld\n", period
);
9966 /* caller should put the current value in *@periodp before calling */
9967 static int __maybe_unused
cpu_period_quota_parse(char *buf
,
9968 u64
*periodp
, u64
*quotap
)
9970 char tok
[21]; /* U64_MAX */
9972 if (sscanf(buf
, "%20s %llu", tok
, periodp
) < 1)
9975 *periodp
*= NSEC_PER_USEC
;
9977 if (sscanf(tok
, "%llu", quotap
))
9978 *quotap
*= NSEC_PER_USEC
;
9979 else if (!strcmp(tok
, "max"))
9980 *quotap
= RUNTIME_INF
;
9987 #ifdef CONFIG_CFS_BANDWIDTH
9988 static int cpu_max_show(struct seq_file
*sf
, void *v
)
9990 struct task_group
*tg
= css_tg(seq_css(sf
));
9992 cpu_period_quota_print(sf
, tg_get_cfs_period(tg
), tg_get_cfs_quota(tg
));
9996 static ssize_t
cpu_max_write(struct kernfs_open_file
*of
,
9997 char *buf
, size_t nbytes
, loff_t off
)
9999 struct task_group
*tg
= css_tg(of_css(of
));
10000 u64 period
= tg_get_cfs_period(tg
);
10001 u64 burst
= tg
->cfs_bandwidth
.burst
;
10005 ret
= cpu_period_quota_parse(buf
, &period
, "a
);
10007 ret
= tg_set_cfs_bandwidth(tg
, period
, quota
, burst
);
10008 return ret
?: nbytes
;
10012 static struct cftype cpu_files
[] = {
10013 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10016 .flags
= CFTYPE_NOT_ON_ROOT
,
10017 .read_u64
= cpu_weight_read_u64
,
10018 .write_u64
= cpu_weight_write_u64
,
10021 .name
= "weight.nice",
10022 .flags
= CFTYPE_NOT_ON_ROOT
,
10023 .read_s64
= cpu_weight_nice_read_s64
,
10024 .write_s64
= cpu_weight_nice_write_s64
,
10028 .flags
= CFTYPE_NOT_ON_ROOT
,
10029 .read_s64
= cpu_idle_read_s64
,
10030 .write_s64
= cpu_idle_write_s64
,
10033 #ifdef CONFIG_CFS_BANDWIDTH
10036 .flags
= CFTYPE_NOT_ON_ROOT
,
10037 .seq_show
= cpu_max_show
,
10038 .write
= cpu_max_write
,
10041 .name
= "max.burst",
10042 .flags
= CFTYPE_NOT_ON_ROOT
,
10043 .read_u64
= cpu_cfs_burst_read_u64
,
10044 .write_u64
= cpu_cfs_burst_write_u64
,
10047 #ifdef CONFIG_UCLAMP_TASK_GROUP
10049 .name
= "uclamp.min",
10050 .flags
= CFTYPE_NOT_ON_ROOT
,
10051 .seq_show
= cpu_uclamp_min_show
,
10052 .write
= cpu_uclamp_min_write
,
10055 .name
= "uclamp.max",
10056 .flags
= CFTYPE_NOT_ON_ROOT
,
10057 .seq_show
= cpu_uclamp_max_show
,
10058 .write
= cpu_uclamp_max_write
,
10061 { } /* terminate */
10064 struct cgroup_subsys cpu_cgrp_subsys
= {
10065 .css_alloc
= cpu_cgroup_css_alloc
,
10066 .css_online
= cpu_cgroup_css_online
,
10067 .css_offline
= cpu_cgroup_css_offline
,
10068 .css_released
= cpu_cgroup_css_released
,
10069 .css_free
= cpu_cgroup_css_free
,
10070 .css_extra_stat_show
= cpu_extra_stat_show
,
10071 .css_local_stat_show
= cpu_local_stat_show
,
10072 .can_attach
= cpu_cgroup_can_attach
,
10073 .attach
= cpu_cgroup_attach
,
10074 .cancel_attach
= cpu_cgroup_cancel_attach
,
10075 .legacy_cftypes
= cpu_legacy_files
,
10076 .dfl_cftypes
= cpu_files
,
10077 .early_init
= true,
10081 #endif /* CONFIG_CGROUP_SCHED */
10083 void dump_cpu_task(int cpu
)
10085 if (in_hardirq() && cpu
== smp_processor_id()) {
10086 struct pt_regs
*regs
;
10088 regs
= get_irq_regs();
10095 if (trigger_single_cpu_backtrace(cpu
))
10098 pr_info("Task dump for CPU %d:\n", cpu
);
10099 sched_show_task(cpu_curr(cpu
));
10103 * Nice levels are multiplicative, with a gentle 10% change for every
10104 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10105 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10106 * that remained on nice 0.
10108 * The "10% effect" is relative and cumulative: from _any_ nice level,
10109 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10110 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10111 * If a task goes up by ~10% and another task goes down by ~10% then
10112 * the relative distance between them is ~25%.)
10114 const int sched_prio_to_weight
[40] = {
10115 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10116 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10117 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10118 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10119 /* 0 */ 1024, 820, 655, 526, 423,
10120 /* 5 */ 335, 272, 215, 172, 137,
10121 /* 10 */ 110, 87, 70, 56, 45,
10122 /* 15 */ 36, 29, 23, 18, 15,
10126 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10128 * In cases where the weight does not change often, we can use the
10129 * pre-calculated inverse to speed up arithmetics by turning divisions
10130 * into multiplications:
10132 const u32 sched_prio_to_wmult
[40] = {
10133 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10134 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10135 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10136 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10137 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10138 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10139 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10140 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10143 void call_trace_sched_update_nr_running(struct rq
*rq
, int count
)
10145 trace_sched_update_nr_running_tp(rq
, count
);
10148 #ifdef CONFIG_SCHED_MM_CID
10151 * @cid_lock: Guarantee forward-progress of cid allocation.
10153 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10154 * is only used when contention is detected by the lock-free allocation so
10155 * forward progress can be guaranteed.
10157 DEFINE_RAW_SPINLOCK(cid_lock
);
10160 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10162 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10163 * detected, it is set to 1 to ensure that all newly coming allocations are
10164 * serialized by @cid_lock until the allocation which detected contention
10165 * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10166 * of a cid allocation.
10171 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10172 * concurrently with respect to the execution of the source runqueue context
10175 * There is one basic properties we want to guarantee here:
10177 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10178 * used by a task. That would lead to concurrent allocation of the cid and
10179 * userspace corruption.
10181 * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10182 * that a pair of loads observe at least one of a pair of stores, which can be
10191 * Which guarantees that x==0 && y==0 is impossible. But rather than using
10192 * values 0 and 1, this algorithm cares about specific state transitions of the
10193 * runqueue current task (as updated by the scheduler context switch), and the
10194 * per-mm/cpu cid value.
10196 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10197 * task->mm != mm for the rest of the discussion. There are two scheduler state
10198 * transitions on context switch we care about:
10200 * (TSA) Store to rq->curr with transition from (N) to (Y)
10202 * (TSB) Store to rq->curr with transition from (Y) to (N)
10204 * On the remote-clear side, there is one transition we care about:
10206 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10208 * There is also a transition to UNSET state which can be performed from all
10209 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10210 * guarantees that only a single thread will succeed:
10212 * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10214 * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10215 * when a thread is actively using the cid (property (1)).
10217 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10219 * Scenario A) (TSA)+(TMA) (from next task perspective)
10223 * Context switch CS-1 Remote-clear
10224 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10225 * (implied barrier after cmpxchg)
10226 * - switch_mm_cid()
10227 * - memory barrier (see switch_mm_cid()
10228 * comment explaining how this barrier
10229 * is combined with other scheduler
10231 * - mm_cid_get (next)
10232 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10234 * This Dekker ensures that either task (Y) is observed by the
10235 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10238 * If task (Y) store is observed by rcu_dereference(), it means that there is
10239 * still an active task on the cpu. Remote-clear will therefore not transition
10240 * to UNSET, which fulfills property (1).
10242 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10243 * it will move its state to UNSET, which clears the percpu cid perhaps
10244 * uselessly (which is not an issue for correctness). Because task (Y) is not
10245 * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10246 * state to UNSET is done with a cmpxchg expecting that the old state has the
10247 * LAZY flag set, only one thread will successfully UNSET.
10249 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10250 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10251 * CPU1 will observe task (Y) and do nothing more, which is fine.
10253 * What we are effectively preventing with this Dekker is a scenario where
10254 * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10255 * because this would UNSET a cid which is actively used.
10258 void sched_mm_cid_migrate_from(struct task_struct
*t
)
10260 t
->migrate_from_cpu
= task_cpu(t
);
10264 int __sched_mm_cid_migrate_from_fetch_cid(struct rq
*src_rq
,
10265 struct task_struct
*t
,
10266 struct mm_cid
*src_pcpu_cid
)
10268 struct mm_struct
*mm
= t
->mm
;
10269 struct task_struct
*src_task
;
10270 int src_cid
, last_mm_cid
;
10275 last_mm_cid
= t
->last_mm_cid
;
10277 * If the migrated task has no last cid, or if the current
10278 * task on src rq uses the cid, it means the source cid does not need
10279 * to be moved to the destination cpu.
10281 if (last_mm_cid
== -1)
10283 src_cid
= READ_ONCE(src_pcpu_cid
->cid
);
10284 if (!mm_cid_is_valid(src_cid
) || last_mm_cid
!= src_cid
)
10288 * If we observe an active task using the mm on this rq, it means we
10289 * are not the last task to be migrated from this cpu for this mm, so
10290 * there is no need to move src_cid to the destination cpu.
10293 src_task
= rcu_dereference(src_rq
->curr
);
10294 if (READ_ONCE(src_task
->mm_cid_active
) && src_task
->mm
== mm
) {
10295 t
->last_mm_cid
= -1;
10303 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq
*src_rq
,
10304 struct task_struct
*t
,
10305 struct mm_cid
*src_pcpu_cid
,
10308 struct task_struct
*src_task
;
10309 struct mm_struct
*mm
= t
->mm
;
10316 * Attempt to clear the source cpu cid to move it to the destination
10319 lazy_cid
= mm_cid_set_lazy_put(src_cid
);
10320 if (!try_cmpxchg(&src_pcpu_cid
->cid
, &src_cid
, lazy_cid
))
10324 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10325 * rq->curr->mm matches the scheduler barrier in context_switch()
10326 * between store to rq->curr and load of prev and next task's
10329 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10330 * rq->curr->mm_cid_active matches the barrier in
10331 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10332 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10333 * load of per-mm/cpu cid.
10337 * If we observe an active task using the mm on this rq after setting
10338 * the lazy-put flag, this task will be responsible for transitioning
10339 * from lazy-put flag set to MM_CID_UNSET.
10341 scoped_guard (rcu
) {
10342 src_task
= rcu_dereference(src_rq
->curr
);
10343 if (READ_ONCE(src_task
->mm_cid_active
) && src_task
->mm
== mm
) {
10345 * We observed an active task for this mm, there is therefore
10346 * no point in moving this cid to the destination cpu.
10348 t
->last_mm_cid
= -1;
10354 * The src_cid is unused, so it can be unset.
10356 if (!try_cmpxchg(&src_pcpu_cid
->cid
, &lazy_cid
, MM_CID_UNSET
))
10358 WRITE_ONCE(src_pcpu_cid
->recent_cid
, MM_CID_UNSET
);
10363 * Migration to dst cpu. Called with dst_rq lock held.
10364 * Interrupts are disabled, which keeps the window of cid ownership without the
10365 * source rq lock held small.
10367 void sched_mm_cid_migrate_to(struct rq
*dst_rq
, struct task_struct
*t
)
10369 struct mm_cid
*src_pcpu_cid
, *dst_pcpu_cid
;
10370 struct mm_struct
*mm
= t
->mm
;
10371 int src_cid
, src_cpu
;
10372 bool dst_cid_is_set
;
10375 lockdep_assert_rq_held(dst_rq
);
10379 src_cpu
= t
->migrate_from_cpu
;
10380 if (src_cpu
== -1) {
10381 t
->last_mm_cid
= -1;
10385 * Move the src cid if the dst cid is unset. This keeps id
10386 * allocation closest to 0 in cases where few threads migrate around
10389 * If destination cid or recent cid is already set, we may have
10390 * to just clear the src cid to ensure compactness in frequent
10391 * migrations scenarios.
10393 * It is not useful to clear the src cid when the number of threads is
10394 * greater or equal to the number of allowed CPUs, because user-space
10395 * can expect that the number of allowed cids can reach the number of
10398 dst_pcpu_cid
= per_cpu_ptr(mm
->pcpu_cid
, cpu_of(dst_rq
));
10399 dst_cid_is_set
= !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid
->cid
)) ||
10400 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid
->recent_cid
));
10401 if (dst_cid_is_set
&& atomic_read(&mm
->mm_users
) >= READ_ONCE(mm
->nr_cpus_allowed
))
10403 src_pcpu_cid
= per_cpu_ptr(mm
->pcpu_cid
, src_cpu
);
10404 src_rq
= cpu_rq(src_cpu
);
10405 src_cid
= __sched_mm_cid_migrate_from_fetch_cid(src_rq
, t
, src_pcpu_cid
);
10408 src_cid
= __sched_mm_cid_migrate_from_try_steal_cid(src_rq
, t
, src_pcpu_cid
,
10412 if (dst_cid_is_set
) {
10413 __mm_cid_put(mm
, src_cid
);
10416 /* Move src_cid to dst cpu. */
10417 mm_cid_snapshot_time(dst_rq
, mm
);
10418 WRITE_ONCE(dst_pcpu_cid
->cid
, src_cid
);
10419 WRITE_ONCE(dst_pcpu_cid
->recent_cid
, src_cid
);
10422 static void sched_mm_cid_remote_clear(struct mm_struct
*mm
, struct mm_cid
*pcpu_cid
,
10425 struct rq
*rq
= cpu_rq(cpu
);
10426 struct task_struct
*t
;
10429 cid
= READ_ONCE(pcpu_cid
->cid
);
10430 if (!mm_cid_is_valid(cid
))
10434 * Clear the cpu cid if it is set to keep cid allocation compact. If
10435 * there happens to be other tasks left on the source cpu using this
10436 * mm, the next task using this mm will reallocate its cid on context
10439 lazy_cid
= mm_cid_set_lazy_put(cid
);
10440 if (!try_cmpxchg(&pcpu_cid
->cid
, &cid
, lazy_cid
))
10444 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10445 * rq->curr->mm matches the scheduler barrier in context_switch()
10446 * between store to rq->curr and load of prev and next task's
10449 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10450 * rq->curr->mm_cid_active matches the barrier in
10451 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10452 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10453 * load of per-mm/cpu cid.
10457 * If we observe an active task using the mm on this rq after setting
10458 * the lazy-put flag, that task will be responsible for transitioning
10459 * from lazy-put flag set to MM_CID_UNSET.
10461 scoped_guard (rcu
) {
10462 t
= rcu_dereference(rq
->curr
);
10463 if (READ_ONCE(t
->mm_cid_active
) && t
->mm
== mm
)
10468 * The cid is unused, so it can be unset.
10469 * Disable interrupts to keep the window of cid ownership without rq
10472 scoped_guard (irqsave
) {
10473 if (try_cmpxchg(&pcpu_cid
->cid
, &lazy_cid
, MM_CID_UNSET
))
10474 __mm_cid_put(mm
, cid
);
10478 static void sched_mm_cid_remote_clear_old(struct mm_struct
*mm
, int cpu
)
10480 struct rq
*rq
= cpu_rq(cpu
);
10481 struct mm_cid
*pcpu_cid
;
10482 struct task_struct
*curr
;
10486 * rq->clock load is racy on 32-bit but one spurious clear once in a
10487 * while is irrelevant.
10489 rq_clock
= READ_ONCE(rq
->clock
);
10490 pcpu_cid
= per_cpu_ptr(mm
->pcpu_cid
, cpu
);
10493 * In order to take care of infrequently scheduled tasks, bump the time
10494 * snapshot associated with this cid if an active task using the mm is
10495 * observed on this rq.
10497 scoped_guard (rcu
) {
10498 curr
= rcu_dereference(rq
->curr
);
10499 if (READ_ONCE(curr
->mm_cid_active
) && curr
->mm
== mm
) {
10500 WRITE_ONCE(pcpu_cid
->time
, rq_clock
);
10505 if (rq_clock
< pcpu_cid
->time
+ SCHED_MM_CID_PERIOD_NS
)
10507 sched_mm_cid_remote_clear(mm
, pcpu_cid
, cpu
);
10510 static void sched_mm_cid_remote_clear_weight(struct mm_struct
*mm
, int cpu
,
10513 struct mm_cid
*pcpu_cid
;
10516 pcpu_cid
= per_cpu_ptr(mm
->pcpu_cid
, cpu
);
10517 cid
= READ_ONCE(pcpu_cid
->cid
);
10518 if (!mm_cid_is_valid(cid
) || cid
< weight
)
10520 sched_mm_cid_remote_clear(mm
, pcpu_cid
, cpu
);
10523 static void task_mm_cid_work(struct callback_head
*work
)
10525 unsigned long now
= jiffies
, old_scan
, next_scan
;
10526 struct task_struct
*t
= current
;
10527 struct cpumask
*cidmask
;
10528 struct mm_struct
*mm
;
10531 SCHED_WARN_ON(t
!= container_of(work
, struct task_struct
, cid_work
));
10533 work
->next
= work
; /* Prevent double-add */
10534 if (t
->flags
& PF_EXITING
)
10539 old_scan
= READ_ONCE(mm
->mm_cid_next_scan
);
10540 next_scan
= now
+ msecs_to_jiffies(MM_CID_SCAN_DELAY
);
10544 res
= cmpxchg(&mm
->mm_cid_next_scan
, old_scan
, next_scan
);
10545 if (res
!= old_scan
)
10548 old_scan
= next_scan
;
10550 if (time_before(now
, old_scan
))
10552 if (!try_cmpxchg(&mm
->mm_cid_next_scan
, &old_scan
, next_scan
))
10554 cidmask
= mm_cidmask(mm
);
10555 /* Clear cids that were not recently used. */
10556 for_each_possible_cpu(cpu
)
10557 sched_mm_cid_remote_clear_old(mm
, cpu
);
10558 weight
= cpumask_weight(cidmask
);
10560 * Clear cids that are greater or equal to the cidmask weight to
10563 for_each_possible_cpu(cpu
)
10564 sched_mm_cid_remote_clear_weight(mm
, cpu
, weight
);
10567 void init_sched_mm_cid(struct task_struct
*t
)
10569 struct mm_struct
*mm
= t
->mm
;
10573 mm_users
= atomic_read(&mm
->mm_users
);
10575 mm
->mm_cid_next_scan
= jiffies
+ msecs_to_jiffies(MM_CID_SCAN_DELAY
);
10577 t
->cid_work
.next
= &t
->cid_work
; /* Protect against double add */
10578 init_task_work(&t
->cid_work
, task_mm_cid_work
);
10581 void task_tick_mm_cid(struct rq
*rq
, struct task_struct
*curr
)
10583 struct callback_head
*work
= &curr
->cid_work
;
10584 unsigned long now
= jiffies
;
10586 if (!curr
->mm
|| (curr
->flags
& (PF_EXITING
| PF_KTHREAD
)) ||
10587 work
->next
!= work
)
10589 if (time_before(now
, READ_ONCE(curr
->mm
->mm_cid_next_scan
)))
10592 /* No page allocation under rq lock */
10593 task_work_add(curr
, work
, TWA_RESUME
| TWAF_NO_ALLOC
);
10596 void sched_mm_cid_exit_signals(struct task_struct
*t
)
10598 struct mm_struct
*mm
= t
->mm
;
10606 guard(rq_lock_irqsave
)(rq
);
10607 preempt_enable_no_resched(); /* holding spinlock */
10608 WRITE_ONCE(t
->mm_cid_active
, 0);
10610 * Store t->mm_cid_active before loading per-mm/cpu cid.
10611 * Matches barrier in sched_mm_cid_remote_clear_old().
10615 t
->last_mm_cid
= t
->mm_cid
= -1;
10618 void sched_mm_cid_before_execve(struct task_struct
*t
)
10620 struct mm_struct
*mm
= t
->mm
;
10628 guard(rq_lock_irqsave
)(rq
);
10629 preempt_enable_no_resched(); /* holding spinlock */
10630 WRITE_ONCE(t
->mm_cid_active
, 0);
10632 * Store t->mm_cid_active before loading per-mm/cpu cid.
10633 * Matches barrier in sched_mm_cid_remote_clear_old().
10637 t
->last_mm_cid
= t
->mm_cid
= -1;
10640 void sched_mm_cid_after_execve(struct task_struct
*t
)
10642 struct mm_struct
*mm
= t
->mm
;
10650 scoped_guard (rq_lock_irqsave
, rq
) {
10651 preempt_enable_no_resched(); /* holding spinlock */
10652 WRITE_ONCE(t
->mm_cid_active
, 1);
10654 * Store t->mm_cid_active before loading per-mm/cpu cid.
10655 * Matches barrier in sched_mm_cid_remote_clear_old().
10658 t
->last_mm_cid
= t
->mm_cid
= mm_cid_get(rq
, t
, mm
);
10660 rseq_set_notify_resume(t
);
10663 void sched_mm_cid_fork(struct task_struct
*t
)
10665 WARN_ON_ONCE(!t
->mm
|| t
->mm_cid
!= -1);
10666 t
->mm_cid_active
= 1;
10670 #ifdef CONFIG_SCHED_CLASS_EXT
10671 void sched_deq_and_put_task(struct task_struct
*p
, int queue_flags
,
10672 struct sched_enq_and_set_ctx
*ctx
)
10674 struct rq
*rq
= task_rq(p
);
10676 lockdep_assert_rq_held(rq
);
10678 *ctx
= (struct sched_enq_and_set_ctx
){
10680 .queue_flags
= queue_flags
,
10681 .queued
= task_on_rq_queued(p
),
10682 .running
= task_current(rq
, p
),
10685 update_rq_clock(rq
);
10687 dequeue_task(rq
, p
, queue_flags
| DEQUEUE_NOCLOCK
);
10689 put_prev_task(rq
, p
);
10692 void sched_enq_and_set_task(struct sched_enq_and_set_ctx
*ctx
)
10694 struct rq
*rq
= task_rq(ctx
->p
);
10696 lockdep_assert_rq_held(rq
);
10699 enqueue_task(rq
, ctx
->p
, ctx
->queue_flags
| ENQUEUE_NOCLOCK
);
10701 set_next_task(rq
, ctx
->p
);
10703 #endif /* CONFIG_SCHED_CLASS_EXT */