Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / mips / kernel / mips-mt-fpaff.c
blob6c590ef27648248ae68592a1d54652227ea054e2
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * General MIPS MT support routines, usable in AP/SP and SMVP.
4 * Copyright (C) 2005 Mips Technologies, Inc
5 */
6 #include <linux/cpu.h>
7 #include <linux/cpuset.h>
8 #include <linux/cpumask.h>
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task.h>
14 #include <linux/cred.h>
15 #include <linux/security.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
20 * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
22 cpumask_t mt_fpu_cpumask;
24 static int fpaff_threshold = -1;
25 unsigned long mt_fpemul_threshold;
28 * Replacement functions for the sys_sched_setaffinity() and
29 * sys_sched_getaffinity() system calls, so that we can integrate
30 * FPU affinity with the user's requested processor affinity.
31 * This code is 98% identical with the sys_sched_setaffinity()
32 * and sys_sched_getaffinity() system calls, and should be
33 * updated when kernel/sched/core.c changes.
37 * find_process_by_pid - find a process with a matching PID value.
38 * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
39 * cloned here.
41 static inline struct task_struct *find_process_by_pid(pid_t pid)
43 return pid ? find_task_by_vpid(pid) : current;
47 * check the target process has a UID that matches the current process's
49 static bool check_same_owner(struct task_struct *p)
51 const struct cred *cred = current_cred(), *pcred;
52 bool match;
54 rcu_read_lock();
55 pcred = __task_cred(p);
56 match = (uid_eq(cred->euid, pcred->euid) ||
57 uid_eq(cred->euid, pcred->uid));
58 rcu_read_unlock();
59 return match;
63 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
65 asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
66 unsigned long __user *user_mask_ptr)
68 cpumask_var_t cpus_allowed, new_mask, effective_mask;
69 struct thread_info *ti;
70 struct task_struct *p;
71 int retval;
73 if (len < sizeof(new_mask))
74 return -EINVAL;
76 if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
77 return -EFAULT;
79 get_online_cpus();
80 rcu_read_lock();
82 p = find_process_by_pid(pid);
83 if (!p) {
84 rcu_read_unlock();
85 put_online_cpus();
86 return -ESRCH;
89 /* Prevent p going away */
90 get_task_struct(p);
91 rcu_read_unlock();
93 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
94 retval = -ENOMEM;
95 goto out_put_task;
97 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
98 retval = -ENOMEM;
99 goto out_free_cpus_allowed;
101 if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
102 retval = -ENOMEM;
103 goto out_free_new_mask;
105 if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) {
106 retval = -EPERM;
107 goto out_unlock;
110 retval = security_task_setscheduler(p);
111 if (retval)
112 goto out_unlock;
114 /* Record new user-specified CPU set for future reference */
115 cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
117 again:
118 /* Compute new global allowed CPU set if necessary */
119 ti = task_thread_info(p);
120 if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
121 cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
122 cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
123 retval = set_cpus_allowed_ptr(p, effective_mask);
124 } else {
125 cpumask_copy(effective_mask, new_mask);
126 clear_ti_thread_flag(ti, TIF_FPUBOUND);
127 retval = set_cpus_allowed_ptr(p, new_mask);
130 if (!retval) {
131 cpuset_cpus_allowed(p, cpus_allowed);
132 if (!cpumask_subset(effective_mask, cpus_allowed)) {
134 * We must have raced with a concurrent cpuset
135 * update. Just reset the cpus_allowed to the
136 * cpuset's cpus_allowed
138 cpumask_copy(new_mask, cpus_allowed);
139 goto again;
142 out_unlock:
143 free_cpumask_var(effective_mask);
144 out_free_new_mask:
145 free_cpumask_var(new_mask);
146 out_free_cpus_allowed:
147 free_cpumask_var(cpus_allowed);
148 out_put_task:
149 put_task_struct(p);
150 put_online_cpus();
151 return retval;
155 * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
157 asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
158 unsigned long __user *user_mask_ptr)
160 unsigned int real_len;
161 cpumask_t allowed, mask;
162 int retval;
163 struct task_struct *p;
165 real_len = sizeof(mask);
166 if (len < real_len)
167 return -EINVAL;
169 get_online_cpus();
170 rcu_read_lock();
172 retval = -ESRCH;
173 p = find_process_by_pid(pid);
174 if (!p)
175 goto out_unlock;
176 retval = security_task_getscheduler(p);
177 if (retval)
178 goto out_unlock;
180 cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
181 cpumask_and(&mask, &allowed, cpu_active_mask);
183 out_unlock:
184 rcu_read_unlock();
185 put_online_cpus();
186 if (retval)
187 return retval;
188 if (copy_to_user(user_mask_ptr, &mask, real_len))
189 return -EFAULT;
190 return real_len;
194 static int __init fpaff_thresh(char *str)
196 get_option(&str, &fpaff_threshold);
197 return 1;
199 __setup("fpaff=", fpaff_thresh);
202 * FPU Use Factor empirically derived from experiments on 34K
204 #define FPUSEFACTOR 2000
206 static __init int mt_fp_affinity_init(void)
208 if (fpaff_threshold >= 0) {
209 mt_fpemul_threshold = fpaff_threshold;
210 } else {
211 mt_fpemul_threshold =
212 (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
214 printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
215 mt_fpemul_threshold);
217 return 0;
219 arch_initcall(mt_fp_affinity_init);