Linux 5.8-rc4
[linux/fpc-iii.git] / kernel / sched / stop_task.c
blob4c9e9975684fa8cd633ba05008493499994eb99a
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * stop-task scheduling class.
5 * The stop task is the highest priority task in the system, it preempts
6 * everything and will be preempted by nothing.
8 * See kernel/stop_machine.c
9 */
10 #include "sched.h"
12 #ifdef CONFIG_SMP
13 static int
14 select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
16 return task_cpu(p); /* stop tasks as never migrate */
19 static int
20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
22 return sched_stop_runnable(rq);
24 #endif /* CONFIG_SMP */
26 static void
27 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
29 /* we're never preempted */
32 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
34 stop->se.exec_start = rq_clock_task(rq);
37 static struct task_struct *pick_next_task_stop(struct rq *rq)
39 if (!sched_stop_runnable(rq))
40 return NULL;
42 set_next_task_stop(rq, rq->stop, true);
43 return rq->stop;
46 static void
47 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
49 add_nr_running(rq, 1);
52 static void
53 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
55 sub_nr_running(rq, 1);
58 static void yield_task_stop(struct rq *rq)
60 BUG(); /* the stop task should never yield, its pointless. */
63 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
65 struct task_struct *curr = rq->curr;
66 u64 delta_exec;
68 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
69 if (unlikely((s64)delta_exec < 0))
70 delta_exec = 0;
72 schedstat_set(curr->se.statistics.exec_max,
73 max(curr->se.statistics.exec_max, delta_exec));
75 curr->se.sum_exec_runtime += delta_exec;
76 account_group_exec_runtime(curr, delta_exec);
78 curr->se.exec_start = rq_clock_task(rq);
79 cgroup_account_cputime(curr, delta_exec);
83 * scheduler tick hitting a task of our scheduling class.
85 * NOTE: This function can be called remotely by the tick offload that
86 * goes along full dynticks. Therefore no local assumption can be made
87 * and everything must be accessed through the @rq and @curr passed in
88 * parameters.
90 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
94 static void switched_to_stop(struct rq *rq, struct task_struct *p)
96 BUG(); /* its impossible to change to this class */
99 static void
100 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
102 BUG(); /* how!?, what priority? */
105 static unsigned int
106 get_rr_interval_stop(struct rq *rq, struct task_struct *task)
108 return 0;
111 static void update_curr_stop(struct rq *rq)
116 * Simple, special scheduling class for the per-CPU stop tasks:
118 const struct sched_class stop_sched_class = {
119 .next = &dl_sched_class,
121 .enqueue_task = enqueue_task_stop,
122 .dequeue_task = dequeue_task_stop,
123 .yield_task = yield_task_stop,
125 .check_preempt_curr = check_preempt_curr_stop,
127 .pick_next_task = pick_next_task_stop,
128 .put_prev_task = put_prev_task_stop,
129 .set_next_task = set_next_task_stop,
131 #ifdef CONFIG_SMP
132 .balance = balance_stop,
133 .select_task_rq = select_task_rq_stop,
134 .set_cpus_allowed = set_cpus_allowed_common,
135 #endif
137 .task_tick = task_tick_stop,
139 .get_rr_interval = get_rr_interval_stop,
141 .prio_changed = prio_changed_stop,
142 .switched_to = switched_to_stop,
143 .update_curr = update_curr_stop,