Linux 4.8.3
[linux/fpc-iii.git] / block / ioprio.c
blob01b8116298a13b5463e7969ce66c0b037bcfccd5
1 /*
2 * fs/ioprio.c
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
13 * IOW, setting BE scheduling class with prio 2 is done ala:
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
17 * ioprio_set(PRIO_PROCESS, pid, prio);
19 * See also Documentation/block/ioprio.txt
22 #include <linux/gfp.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ioprio.h>
26 #include <linux/blkdev.h>
27 #include <linux/capability.h>
28 #include <linux/syscalls.h>
29 #include <linux/security.h>
30 #include <linux/pid_namespace.h>
32 int set_task_ioprio(struct task_struct *task, int ioprio)
34 int err;
35 struct io_context *ioc;
36 const struct cred *cred = current_cred(), *tcred;
38 rcu_read_lock();
39 tcred = __task_cred(task);
40 if (!uid_eq(tcred->uid, cred->euid) &&
41 !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
42 rcu_read_unlock();
43 return -EPERM;
45 rcu_read_unlock();
47 err = security_task_setioprio(task, ioprio);
48 if (err)
49 return err;
51 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
52 if (ioc) {
53 ioc->ioprio = ioprio;
54 put_io_context(ioc);
57 return err;
59 EXPORT_SYMBOL_GPL(set_task_ioprio);
61 SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
63 int class = IOPRIO_PRIO_CLASS(ioprio);
64 int data = IOPRIO_PRIO_DATA(ioprio);
65 struct task_struct *p, *g;
66 struct user_struct *user;
67 struct pid *pgrp;
68 kuid_t uid;
69 int ret;
71 switch (class) {
72 case IOPRIO_CLASS_RT:
73 if (!capable(CAP_SYS_ADMIN))
74 return -EPERM;
75 /* fall through, rt has prio field too */
76 case IOPRIO_CLASS_BE:
77 if (data >= IOPRIO_BE_NR || data < 0)
78 return -EINVAL;
80 break;
81 case IOPRIO_CLASS_IDLE:
82 break;
83 case IOPRIO_CLASS_NONE:
84 if (data)
85 return -EINVAL;
86 break;
87 default:
88 return -EINVAL;
91 ret = -ESRCH;
92 rcu_read_lock();
93 switch (which) {
94 case IOPRIO_WHO_PROCESS:
95 if (!who)
96 p = current;
97 else
98 p = find_task_by_vpid(who);
99 if (p)
100 ret = set_task_ioprio(p, ioprio);
101 break;
102 case IOPRIO_WHO_PGRP:
103 if (!who)
104 pgrp = task_pgrp(current);
105 else
106 pgrp = find_vpid(who);
107 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
108 ret = set_task_ioprio(p, ioprio);
109 if (ret)
110 break;
111 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
112 break;
113 case IOPRIO_WHO_USER:
114 uid = make_kuid(current_user_ns(), who);
115 if (!uid_valid(uid))
116 break;
117 if (!who)
118 user = current_user();
119 else
120 user = find_user(uid);
122 if (!user)
123 break;
125 do_each_thread(g, p) {
126 if (!uid_eq(task_uid(p), uid) ||
127 !task_pid_vnr(p))
128 continue;
129 ret = set_task_ioprio(p, ioprio);
130 if (ret)
131 goto free_uid;
132 } while_each_thread(g, p);
133 free_uid:
134 if (who)
135 free_uid(user);
136 break;
137 default:
138 ret = -EINVAL;
141 rcu_read_unlock();
142 return ret;
145 static int get_task_ioprio(struct task_struct *p)
147 int ret;
149 ret = security_task_getioprio(p);
150 if (ret)
151 goto out;
152 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
153 task_lock(p);
154 if (p->io_context)
155 ret = p->io_context->ioprio;
156 task_unlock(p);
157 out:
158 return ret;
161 int ioprio_best(unsigned short aprio, unsigned short bprio)
163 unsigned short aclass;
164 unsigned short bclass;
166 if (!ioprio_valid(aprio))
167 aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
168 if (!ioprio_valid(bprio))
169 bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
171 aclass = IOPRIO_PRIO_CLASS(aprio);
172 bclass = IOPRIO_PRIO_CLASS(bprio);
173 if (aclass == bclass)
174 return min(aprio, bprio);
175 if (aclass > bclass)
176 return bprio;
177 else
178 return aprio;
181 SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
183 struct task_struct *g, *p;
184 struct user_struct *user;
185 struct pid *pgrp;
186 kuid_t uid;
187 int ret = -ESRCH;
188 int tmpio;
190 rcu_read_lock();
191 switch (which) {
192 case IOPRIO_WHO_PROCESS:
193 if (!who)
194 p = current;
195 else
196 p = find_task_by_vpid(who);
197 if (p)
198 ret = get_task_ioprio(p);
199 break;
200 case IOPRIO_WHO_PGRP:
201 if (!who)
202 pgrp = task_pgrp(current);
203 else
204 pgrp = find_vpid(who);
205 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
206 tmpio = get_task_ioprio(p);
207 if (tmpio < 0)
208 continue;
209 if (ret == -ESRCH)
210 ret = tmpio;
211 else
212 ret = ioprio_best(ret, tmpio);
213 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
214 break;
215 case IOPRIO_WHO_USER:
216 uid = make_kuid(current_user_ns(), who);
217 if (!who)
218 user = current_user();
219 else
220 user = find_user(uid);
222 if (!user)
223 break;
225 do_each_thread(g, p) {
226 if (!uid_eq(task_uid(p), user->uid) ||
227 !task_pid_vnr(p))
228 continue;
229 tmpio = get_task_ioprio(p);
230 if (tmpio < 0)
231 continue;
232 if (ret == -ESRCH)
233 ret = tmpio;
234 else
235 ret = ioprio_best(ret, tmpio);
236 } while_each_thread(g, p);
238 if (who)
239 free_uid(user);
240 break;
241 default:
242 ret = -EINVAL;
245 rcu_read_unlock();
246 return ret;