Linux 4.6-rc6
[linux/fpc-iii.git] / block / ioprio.c
blobcc7800e9eb441e2b7737a152f0dbb60182821408
1 /*
2 * fs/ioprio.c
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
13 * IOW, setting BE scheduling class with prio 2 is done ala:
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
17 * ioprio_set(PRIO_PROCESS, pid, prio);
19 * See also Documentation/block/ioprio.txt
22 #include <linux/gfp.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ioprio.h>
26 #include <linux/blkdev.h>
27 #include <linux/capability.h>
28 #include <linux/syscalls.h>
29 #include <linux/security.h>
30 #include <linux/pid_namespace.h>
32 int set_task_ioprio(struct task_struct *task, int ioprio)
34 int err;
35 struct io_context *ioc;
36 const struct cred *cred = current_cred(), *tcred;
38 rcu_read_lock();
39 tcred = __task_cred(task);
40 if (!uid_eq(tcred->uid, cred->euid) &&
41 !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
42 rcu_read_unlock();
43 return -EPERM;
45 rcu_read_unlock();
47 err = security_task_setioprio(task, ioprio);
48 if (err)
49 return err;
51 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
52 if (ioc) {
53 ioc->ioprio = ioprio;
54 put_io_context(ioc);
57 return err;
59 EXPORT_SYMBOL_GPL(set_task_ioprio);
61 SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
63 int class = IOPRIO_PRIO_CLASS(ioprio);
64 int data = IOPRIO_PRIO_DATA(ioprio);
65 struct task_struct *p, *g;
66 struct user_struct *user;
67 struct pid *pgrp;
68 kuid_t uid;
69 int ret;
71 switch (class) {
72 case IOPRIO_CLASS_RT:
73 if (!capable(CAP_SYS_ADMIN))
74 return -EPERM;
75 /* fall through, rt has prio field too */
76 case IOPRIO_CLASS_BE:
77 if (data >= IOPRIO_BE_NR || data < 0)
78 return -EINVAL;
80 break;
81 case IOPRIO_CLASS_IDLE:
82 break;
83 case IOPRIO_CLASS_NONE:
84 if (data)
85 return -EINVAL;
86 break;
87 default:
88 return -EINVAL;
91 ret = -ESRCH;
92 rcu_read_lock();
93 switch (which) {
94 case IOPRIO_WHO_PROCESS:
95 if (!who)
96 p = current;
97 else
98 p = find_task_by_vpid(who);
99 if (p)
100 ret = set_task_ioprio(p, ioprio);
101 break;
102 case IOPRIO_WHO_PGRP:
103 if (!who)
104 pgrp = task_pgrp(current);
105 else
106 pgrp = find_vpid(who);
107 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
108 ret = set_task_ioprio(p, ioprio);
109 if (ret)
110 break;
111 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
112 break;
113 case IOPRIO_WHO_USER:
114 uid = make_kuid(current_user_ns(), who);
115 if (!uid_valid(uid))
116 break;
117 if (!who)
118 user = current_user();
119 else
120 user = find_user(uid);
122 if (!user)
123 break;
125 do_each_thread(g, p) {
126 if (!uid_eq(task_uid(p), uid) ||
127 !task_pid_vnr(p))
128 continue;
129 ret = set_task_ioprio(p, ioprio);
130 if (ret)
131 goto free_uid;
132 } while_each_thread(g, p);
133 free_uid:
134 if (who)
135 free_uid(user);
136 break;
137 default:
138 ret = -EINVAL;
141 rcu_read_unlock();
142 return ret;
145 static int get_task_ioprio(struct task_struct *p)
147 int ret;
149 ret = security_task_getioprio(p);
150 if (ret)
151 goto out;
152 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
153 if (p->io_context)
154 ret = p->io_context->ioprio;
155 out:
156 return ret;
159 int ioprio_best(unsigned short aprio, unsigned short bprio)
161 unsigned short aclass;
162 unsigned short bclass;
164 if (!ioprio_valid(aprio))
165 aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
166 if (!ioprio_valid(bprio))
167 bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
169 aclass = IOPRIO_PRIO_CLASS(aprio);
170 bclass = IOPRIO_PRIO_CLASS(bprio);
171 if (aclass == bclass)
172 return min(aprio, bprio);
173 if (aclass > bclass)
174 return bprio;
175 else
176 return aprio;
179 SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
181 struct task_struct *g, *p;
182 struct user_struct *user;
183 struct pid *pgrp;
184 kuid_t uid;
185 int ret = -ESRCH;
186 int tmpio;
188 rcu_read_lock();
189 switch (which) {
190 case IOPRIO_WHO_PROCESS:
191 if (!who)
192 p = current;
193 else
194 p = find_task_by_vpid(who);
195 if (p)
196 ret = get_task_ioprio(p);
197 break;
198 case IOPRIO_WHO_PGRP:
199 if (!who)
200 pgrp = task_pgrp(current);
201 else
202 pgrp = find_vpid(who);
203 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
204 tmpio = get_task_ioprio(p);
205 if (tmpio < 0)
206 continue;
207 if (ret == -ESRCH)
208 ret = tmpio;
209 else
210 ret = ioprio_best(ret, tmpio);
211 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
212 break;
213 case IOPRIO_WHO_USER:
214 uid = make_kuid(current_user_ns(), who);
215 if (!who)
216 user = current_user();
217 else
218 user = find_user(uid);
220 if (!user)
221 break;
223 do_each_thread(g, p) {
224 if (!uid_eq(task_uid(p), user->uid) ||
225 !task_pid_vnr(p))
226 continue;
227 tmpio = get_task_ioprio(p);
228 if (tmpio < 0)
229 continue;
230 if (ret == -ESRCH)
231 ret = tmpio;
232 else
233 ret = ioprio_best(ret, tmpio);
234 } while_each_thread(g, p);
236 if (who)
237 free_uid(user);
238 break;
239 default:
240 ret = -EINVAL;
243 rcu_read_unlock();
244 return ret;