x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / fs / ioprio.c
blobc7c0b28d7d2177c7c88d32f3f62856e97cfc7ff5
1 /*
2 * fs/ioprio.c
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
13 * IOW, setting BE scheduling class with prio 2 is done ala:
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
17 * ioprio_set(PRIO_PROCESS, pid, prio);
19 * See also Documentation/block/ioprio.txt
22 #include <linux/kernel.h>
23 #include <linux/ioprio.h>
24 #include <linux/blkdev.h>
25 #include <linux/capability.h>
26 #include <linux/syscalls.h>
27 #include <linux/security.h>
28 #include <linux/pid_namespace.h>
30 int set_task_ioprio(struct task_struct *task, int ioprio)
32 int err;
33 struct io_context *ioc;
34 const struct cred *cred = current_cred(), *tcred;
36 rcu_read_lock();
37 tcred = __task_cred(task);
38 if (tcred->uid != cred->euid &&
39 tcred->uid != cred->uid && !capable(CAP_SYS_NICE)) {
40 rcu_read_unlock();
41 return -EPERM;
43 rcu_read_unlock();
45 err = security_task_setioprio(task, ioprio);
46 if (err)
47 return err;
49 task_lock(task);
50 do {
51 ioc = task->io_context;
52 /* see wmb() in current_io_context() */
53 smp_read_barrier_depends();
54 if (ioc)
55 break;
57 ioc = alloc_io_context(GFP_ATOMIC, -1);
58 if (!ioc) {
59 err = -ENOMEM;
60 break;
62 task->io_context = ioc;
63 } while (1);
65 if (!err) {
66 ioc->ioprio = ioprio;
67 ioc->ioprio_changed = 1;
70 task_unlock(task);
71 return err;
73 EXPORT_SYMBOL_GPL(set_task_ioprio);
75 SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
77 int class = IOPRIO_PRIO_CLASS(ioprio);
78 int data = IOPRIO_PRIO_DATA(ioprio);
79 struct task_struct *p, *g;
80 struct user_struct *user;
81 struct pid *pgrp;
82 int ret;
84 switch (class) {
85 case IOPRIO_CLASS_RT:
86 if (!capable(CAP_SYS_ADMIN))
87 return -EPERM;
88 /* fall through, rt has prio field too */
89 case IOPRIO_CLASS_BE:
90 if (data >= IOPRIO_BE_NR || data < 0)
91 return -EINVAL;
93 break;
94 case IOPRIO_CLASS_IDLE:
95 break;
96 case IOPRIO_CLASS_NONE:
97 if (data)
98 return -EINVAL;
99 break;
100 default:
101 return -EINVAL;
104 ret = -ESRCH;
106 * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic",
107 * so we can't use rcu_read_lock(). See re-copy of ->ioprio
108 * in copy_process().
110 read_lock(&tasklist_lock);
111 switch (which) {
112 case IOPRIO_WHO_PROCESS:
113 if (!who)
114 p = current;
115 else
116 p = find_task_by_vpid(who);
117 if (p)
118 ret = set_task_ioprio(p, ioprio);
119 break;
120 case IOPRIO_WHO_PGRP:
121 if (!who)
122 pgrp = task_pgrp(current);
123 else
124 pgrp = find_vpid(who);
125 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
126 ret = set_task_ioprio(p, ioprio);
127 if (ret)
128 break;
129 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
130 break;
131 case IOPRIO_WHO_USER:
132 if (!who)
133 user = current_user();
134 else
135 user = find_user(who);
137 if (!user)
138 break;
140 do_each_thread(g, p) {
141 if (__task_cred(p)->uid != who)
142 continue;
143 ret = set_task_ioprio(p, ioprio);
144 if (ret)
145 goto free_uid;
146 } while_each_thread(g, p);
147 free_uid:
148 if (who)
149 free_uid(user);
150 break;
151 default:
152 ret = -EINVAL;
155 read_unlock(&tasklist_lock);
156 return ret;
159 static int get_task_ioprio(struct task_struct *p)
161 int ret;
163 ret = security_task_getioprio(p);
164 if (ret)
165 goto out;
166 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
167 if (p->io_context)
168 ret = p->io_context->ioprio;
169 out:
170 return ret;
173 int ioprio_best(unsigned short aprio, unsigned short bprio)
175 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
176 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
178 if (aclass == IOPRIO_CLASS_NONE)
179 aclass = IOPRIO_CLASS_BE;
180 if (bclass == IOPRIO_CLASS_NONE)
181 bclass = IOPRIO_CLASS_BE;
183 if (aclass == bclass)
184 return min(aprio, bprio);
185 if (aclass > bclass)
186 return bprio;
187 else
188 return aprio;
191 SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
193 struct task_struct *g, *p;
194 struct user_struct *user;
195 struct pid *pgrp;
196 int ret = -ESRCH;
197 int tmpio;
199 read_lock(&tasklist_lock);
200 switch (which) {
201 case IOPRIO_WHO_PROCESS:
202 if (!who)
203 p = current;
204 else
205 p = find_task_by_vpid(who);
206 if (p)
207 ret = get_task_ioprio(p);
208 break;
209 case IOPRIO_WHO_PGRP:
210 if (!who)
211 pgrp = task_pgrp(current);
212 else
213 pgrp = find_vpid(who);
214 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
215 tmpio = get_task_ioprio(p);
216 if (tmpio < 0)
217 continue;
218 if (ret == -ESRCH)
219 ret = tmpio;
220 else
221 ret = ioprio_best(ret, tmpio);
222 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
223 break;
224 case IOPRIO_WHO_USER:
225 if (!who)
226 user = current_user();
227 else
228 user = find_user(who);
230 if (!user)
231 break;
233 do_each_thread(g, p) {
234 if (__task_cred(p)->uid != user->uid)
235 continue;
236 tmpio = get_task_ioprio(p);
237 if (tmpio < 0)
238 continue;
239 if (ret == -ESRCH)
240 ret = tmpio;
241 else
242 ret = ioprio_best(ret, tmpio);
243 } while_each_thread(g, p);
245 if (who)
246 free_uid(user);
247 break;
248 default:
249 ret = -EINVAL;
252 read_unlock(&tasklist_lock);
253 return ret;