ARC: [*defconfig] Reenable soft lock-up detector
[linux/fpc-iii.git] / block / ioprio.c
blob6f5d0b6625e39b930d93aca15082daec481f0199
1 /*
2 * fs/ioprio.c
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
13 * IOW, setting BE scheduling class with prio 2 is done ala:
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
17 * ioprio_set(PRIO_PROCESS, pid, prio);
19 * See also Documentation/block/ioprio.txt
22 #include <linux/gfp.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ioprio.h>
26 #include <linux/cred.h>
27 #include <linux/blkdev.h>
28 #include <linux/capability.h>
29 #include <linux/sched/user.h>
30 #include <linux/sched/task.h>
31 #include <linux/syscalls.h>
32 #include <linux/security.h>
33 #include <linux/pid_namespace.h>
35 int set_task_ioprio(struct task_struct *task, int ioprio)
37 int err;
38 struct io_context *ioc;
39 const struct cred *cred = current_cred(), *tcred;
41 rcu_read_lock();
42 tcred = __task_cred(task);
43 if (!uid_eq(tcred->uid, cred->euid) &&
44 !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
45 rcu_read_unlock();
46 return -EPERM;
48 rcu_read_unlock();
50 err = security_task_setioprio(task, ioprio);
51 if (err)
52 return err;
54 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
55 if (ioc) {
56 ioc->ioprio = ioprio;
57 put_io_context(ioc);
60 return err;
62 EXPORT_SYMBOL_GPL(set_task_ioprio);
64 SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
66 int class = IOPRIO_PRIO_CLASS(ioprio);
67 int data = IOPRIO_PRIO_DATA(ioprio);
68 struct task_struct *p, *g;
69 struct user_struct *user;
70 struct pid *pgrp;
71 kuid_t uid;
72 int ret;
74 switch (class) {
75 case IOPRIO_CLASS_RT:
76 if (!capable(CAP_SYS_ADMIN))
77 return -EPERM;
78 /* fall through */
79 /* rt has prio field too */
80 case IOPRIO_CLASS_BE:
81 if (data >= IOPRIO_BE_NR || data < 0)
82 return -EINVAL;
84 break;
85 case IOPRIO_CLASS_IDLE:
86 break;
87 case IOPRIO_CLASS_NONE:
88 if (data)
89 return -EINVAL;
90 break;
91 default:
92 return -EINVAL;
95 ret = -ESRCH;
96 rcu_read_lock();
97 switch (which) {
98 case IOPRIO_WHO_PROCESS:
99 if (!who)
100 p = current;
101 else
102 p = find_task_by_vpid(who);
103 if (p)
104 ret = set_task_ioprio(p, ioprio);
105 break;
106 case IOPRIO_WHO_PGRP:
107 if (!who)
108 pgrp = task_pgrp(current);
109 else
110 pgrp = find_vpid(who);
111 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
112 ret = set_task_ioprio(p, ioprio);
113 if (ret)
114 break;
115 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
116 break;
117 case IOPRIO_WHO_USER:
118 uid = make_kuid(current_user_ns(), who);
119 if (!uid_valid(uid))
120 break;
121 if (!who)
122 user = current_user();
123 else
124 user = find_user(uid);
126 if (!user)
127 break;
129 for_each_process_thread(g, p) {
130 if (!uid_eq(task_uid(p), uid) ||
131 !task_pid_vnr(p))
132 continue;
133 ret = set_task_ioprio(p, ioprio);
134 if (ret)
135 goto free_uid;
137 free_uid:
138 if (who)
139 free_uid(user);
140 break;
141 default:
142 ret = -EINVAL;
145 rcu_read_unlock();
146 return ret;
149 static int get_task_ioprio(struct task_struct *p)
151 int ret;
153 ret = security_task_getioprio(p);
154 if (ret)
155 goto out;
156 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
157 task_lock(p);
158 if (p->io_context)
159 ret = p->io_context->ioprio;
160 task_unlock(p);
161 out:
162 return ret;
165 int ioprio_best(unsigned short aprio, unsigned short bprio)
167 if (!ioprio_valid(aprio))
168 aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
169 if (!ioprio_valid(bprio))
170 bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
172 return min(aprio, bprio);
175 SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
177 struct task_struct *g, *p;
178 struct user_struct *user;
179 struct pid *pgrp;
180 kuid_t uid;
181 int ret = -ESRCH;
182 int tmpio;
184 rcu_read_lock();
185 switch (which) {
186 case IOPRIO_WHO_PROCESS:
187 if (!who)
188 p = current;
189 else
190 p = find_task_by_vpid(who);
191 if (p)
192 ret = get_task_ioprio(p);
193 break;
194 case IOPRIO_WHO_PGRP:
195 if (!who)
196 pgrp = task_pgrp(current);
197 else
198 pgrp = find_vpid(who);
199 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
200 tmpio = get_task_ioprio(p);
201 if (tmpio < 0)
202 continue;
203 if (ret == -ESRCH)
204 ret = tmpio;
205 else
206 ret = ioprio_best(ret, tmpio);
207 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
208 break;
209 case IOPRIO_WHO_USER:
210 uid = make_kuid(current_user_ns(), who);
211 if (!who)
212 user = current_user();
213 else
214 user = find_user(uid);
216 if (!user)
217 break;
219 for_each_process_thread(g, p) {
220 if (!uid_eq(task_uid(p), user->uid) ||
221 !task_pid_vnr(p))
222 continue;
223 tmpio = get_task_ioprio(p);
224 if (tmpio < 0)
225 continue;
226 if (ret == -ESRCH)
227 ret = tmpio;
228 else
229 ret = ioprio_best(ret, tmpio);
232 if (who)
233 free_uid(user);
234 break;
235 default:
236 ret = -EINVAL;
239 rcu_read_unlock();
240 return ret;