accel/amdxdna: use modern PM helpers
[drm/drm-misc.git] / kernel / sched / core_sched.c
blob1ef98a93eb1df6b82229a5e73dd43b7d94413ef2
1 // SPDX-License-Identifier: GPL-2.0-only
3 /*
4 * A simple wrapper around refcount. An allocated sched_core_cookie's
5 * address is used to compute the cookie of the task.
6 */
7 struct sched_core_cookie {
8 refcount_t refcnt;
9 };
11 static unsigned long sched_core_alloc_cookie(void)
13 struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL);
14 if (!ck)
15 return 0;
17 refcount_set(&ck->refcnt, 1);
18 sched_core_get();
20 return (unsigned long)ck;
23 static void sched_core_put_cookie(unsigned long cookie)
25 struct sched_core_cookie *ptr = (void *)cookie;
27 if (ptr && refcount_dec_and_test(&ptr->refcnt)) {
28 kfree(ptr);
29 sched_core_put();
33 static unsigned long sched_core_get_cookie(unsigned long cookie)
35 struct sched_core_cookie *ptr = (void *)cookie;
37 if (ptr)
38 refcount_inc(&ptr->refcnt);
40 return cookie;
44 * sched_core_update_cookie - replace the cookie on a task
45 * @p: the task to update
46 * @cookie: the new cookie
48 * Effectively exchange the task cookie; caller is responsible for lifetimes on
49 * both ends.
51 * Returns: the old cookie
53 static unsigned long sched_core_update_cookie(struct task_struct *p,
54 unsigned long cookie)
56 unsigned long old_cookie;
57 struct rq_flags rf;
58 struct rq *rq;
60 rq = task_rq_lock(p, &rf);
63 * Since creating a cookie implies sched_core_get(), and we cannot set
64 * a cookie until after we've created it, similarly, we cannot destroy
65 * a cookie until after we've removed it, we must have core scheduling
66 * enabled here.
68 SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq));
70 if (sched_core_enqueued(p))
71 sched_core_dequeue(rq, p, DEQUEUE_SAVE);
73 old_cookie = p->core_cookie;
74 p->core_cookie = cookie;
77 * Consider the cases: !prev_cookie and !cookie.
79 if (cookie && task_on_rq_queued(p))
80 sched_core_enqueue(rq, p);
83 * If task is currently running, it may not be compatible anymore after
84 * the cookie change, so enter the scheduler on its CPU to schedule it
85 * away.
87 * Note that it is possible that as a result of this cookie change, the
88 * core has now entered/left forced idle state. Defer accounting to the
89 * next scheduling edge, rather than always forcing a reschedule here.
91 if (task_on_cpu(rq, p))
92 resched_curr(rq);
94 task_rq_unlock(rq, p, &rf);
96 return old_cookie;
99 static unsigned long sched_core_clone_cookie(struct task_struct *p)
101 unsigned long cookie, flags;
103 raw_spin_lock_irqsave(&p->pi_lock, flags);
104 cookie = sched_core_get_cookie(p->core_cookie);
105 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
107 return cookie;
110 void sched_core_fork(struct task_struct *p)
112 RB_CLEAR_NODE(&p->core_node);
113 p->core_cookie = sched_core_clone_cookie(current);
116 void sched_core_free(struct task_struct *p)
118 sched_core_put_cookie(p->core_cookie);
121 static void __sched_core_set(struct task_struct *p, unsigned long cookie)
123 cookie = sched_core_get_cookie(cookie);
124 cookie = sched_core_update_cookie(p, cookie);
125 sched_core_put_cookie(cookie);
128 /* Called from prctl interface: PR_SCHED_CORE */
129 int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
130 unsigned long uaddr)
132 unsigned long cookie = 0, id = 0;
133 struct task_struct *task, *p;
134 struct pid *grp;
135 int err = 0;
137 if (!static_branch_likely(&sched_smt_present))
138 return -ENODEV;
140 BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD != PIDTYPE_PID);
141 BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD_GROUP != PIDTYPE_TGID);
142 BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_PROCESS_GROUP != PIDTYPE_PGID);
144 if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 ||
145 (cmd != PR_SCHED_CORE_GET && uaddr))
146 return -EINVAL;
148 rcu_read_lock();
149 if (pid == 0) {
150 task = current;
151 } else {
152 task = find_task_by_vpid(pid);
153 if (!task) {
154 rcu_read_unlock();
155 return -ESRCH;
158 get_task_struct(task);
159 rcu_read_unlock();
162 * Check if this process has the right to modify the specified
163 * process. Use the regular "ptrace_may_access()" checks.
165 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
166 err = -EPERM;
167 goto out;
170 switch (cmd) {
171 case PR_SCHED_CORE_GET:
172 if (type != PIDTYPE_PID || uaddr & 7) {
173 err = -EINVAL;
174 goto out;
176 cookie = sched_core_clone_cookie(task);
177 if (cookie) {
178 /* XXX improve ? */
179 ptr_to_hashval((void *)cookie, &id);
181 err = put_user(id, (u64 __user *)uaddr);
182 goto out;
184 case PR_SCHED_CORE_CREATE:
185 cookie = sched_core_alloc_cookie();
186 if (!cookie) {
187 err = -ENOMEM;
188 goto out;
190 break;
192 case PR_SCHED_CORE_SHARE_TO:
193 cookie = sched_core_clone_cookie(current);
194 break;
196 case PR_SCHED_CORE_SHARE_FROM:
197 if (type != PIDTYPE_PID) {
198 err = -EINVAL;
199 goto out;
201 cookie = sched_core_clone_cookie(task);
202 __sched_core_set(current, cookie);
203 goto out;
205 default:
206 err = -EINVAL;
207 goto out;
210 if (type == PIDTYPE_PID) {
211 __sched_core_set(task, cookie);
212 goto out;
215 read_lock(&tasklist_lock);
216 grp = task_pid_type(task, type);
218 do_each_pid_thread(grp, type, p) {
219 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) {
220 err = -EPERM;
221 goto out_tasklist;
223 } while_each_pid_thread(grp, type, p);
225 do_each_pid_thread(grp, type, p) {
226 __sched_core_set(p, cookie);
227 } while_each_pid_thread(grp, type, p);
228 out_tasklist:
229 read_unlock(&tasklist_lock);
231 out:
232 sched_core_put_cookie(cookie);
233 put_task_struct(task);
234 return err;
237 #ifdef CONFIG_SCHEDSTATS
239 /* REQUIRES: rq->core's clock recently updated. */
240 void __sched_core_account_forceidle(struct rq *rq)
242 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
243 u64 delta, now = rq_clock(rq->core);
244 struct rq *rq_i;
245 struct task_struct *p;
246 int i;
248 lockdep_assert_rq_held(rq);
250 WARN_ON_ONCE(!rq->core->core_forceidle_count);
252 if (rq->core->core_forceidle_start == 0)
253 return;
255 delta = now - rq->core->core_forceidle_start;
256 if (unlikely((s64)delta <= 0))
257 return;
259 rq->core->core_forceidle_start = now;
261 if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) {
262 /* can't be forced idle without a running task */
263 } else if (rq->core->core_forceidle_count > 1 ||
264 rq->core->core_forceidle_occupation > 1) {
266 * For larger SMT configurations, we need to scale the charged
267 * forced idle amount since there can be more than one forced
268 * idle sibling and more than one running cookied task.
270 delta *= rq->core->core_forceidle_count;
271 delta = div_u64(delta, rq->core->core_forceidle_occupation);
274 for_each_cpu(i, smt_mask) {
275 rq_i = cpu_rq(i);
276 p = rq_i->core_pick ?: rq_i->curr;
278 if (p == rq_i->idle)
279 continue;
282 * Note: this will account forceidle to the current CPU, even
283 * if it comes from our SMT sibling.
285 __account_forceidle_time(p, delta);
289 void __sched_core_tick(struct rq *rq)
291 if (!rq->core->core_forceidle_count)
292 return;
294 if (rq != rq->core)
295 update_rq_clock(rq->core);
297 __sched_core_account_forceidle(rq);
300 #endif /* CONFIG_SCHEDSTATS */