ARM: 7406/1: hotplug: copy the affinity mask when forcefully migrating IRQs
[linux/fpc-iii.git] / fs / ioprio.c
blobf79dab83e17b304b123fa725124372561f03e349
1 /*
2 * fs/ioprio.c
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
13 * IOW, setting BE scheduling class with prio 2 is done ala:
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
17 * ioprio_set(PRIO_PROCESS, pid, prio);
19 * See also Documentation/block/ioprio.txt
22 #include <linux/gfp.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ioprio.h>
26 #include <linux/blkdev.h>
27 #include <linux/capability.h>
28 #include <linux/syscalls.h>
29 #include <linux/security.h>
30 #include <linux/pid_namespace.h>
32 int set_task_ioprio(struct task_struct *task, int ioprio)
34 int err;
35 struct io_context *ioc;
36 const struct cred *cred = current_cred(), *tcred;
38 rcu_read_lock();
39 tcred = __task_cred(task);
40 if (tcred->uid != cred->euid &&
41 tcred->uid != cred->uid && !capable(CAP_SYS_NICE)) {
42 rcu_read_unlock();
43 return -EPERM;
45 rcu_read_unlock();
47 err = security_task_setioprio(task, ioprio);
48 if (err)
49 return err;
51 task_lock(task);
52 do {
53 ioc = task->io_context;
54 /* see wmb() in current_io_context() */
55 smp_read_barrier_depends();
56 if (ioc)
57 break;
59 ioc = alloc_io_context(GFP_ATOMIC, -1);
60 if (!ioc) {
61 err = -ENOMEM;
62 break;
64 task->io_context = ioc;
65 } while (1);
67 if (!err) {
68 ioc->ioprio = ioprio;
69 ioc->ioprio_changed = 1;
72 task_unlock(task);
73 return err;
75 EXPORT_SYMBOL_GPL(set_task_ioprio);
77 SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
79 int class = IOPRIO_PRIO_CLASS(ioprio);
80 int data = IOPRIO_PRIO_DATA(ioprio);
81 struct task_struct *p, *g;
82 struct user_struct *user;
83 struct pid *pgrp;
84 int ret;
86 switch (class) {
87 case IOPRIO_CLASS_RT:
88 if (!capable(CAP_SYS_ADMIN))
89 return -EPERM;
90 /* fall through, rt has prio field too */
91 case IOPRIO_CLASS_BE:
92 if (data >= IOPRIO_BE_NR || data < 0)
93 return -EINVAL;
95 break;
96 case IOPRIO_CLASS_IDLE:
97 break;
98 case IOPRIO_CLASS_NONE:
99 if (data)
100 return -EINVAL;
101 break;
102 default:
103 return -EINVAL;
106 ret = -ESRCH;
107 rcu_read_lock();
108 switch (which) {
109 case IOPRIO_WHO_PROCESS:
110 if (!who)
111 p = current;
112 else
113 p = find_task_by_vpid(who);
114 if (p)
115 ret = set_task_ioprio(p, ioprio);
116 break;
117 case IOPRIO_WHO_PGRP:
118 if (!who)
119 pgrp = task_pgrp(current);
120 else
121 pgrp = find_vpid(who);
122 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
123 ret = set_task_ioprio(p, ioprio);
124 if (ret)
125 break;
126 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
127 break;
128 case IOPRIO_WHO_USER:
129 if (!who)
130 user = current_user();
131 else
132 user = find_user(who);
134 if (!user)
135 break;
137 do_each_thread(g, p) {
138 if (__task_cred(p)->uid != who)
139 continue;
140 ret = set_task_ioprio(p, ioprio);
141 if (ret)
142 goto free_uid;
143 } while_each_thread(g, p);
144 free_uid:
145 if (who)
146 free_uid(user);
147 break;
148 default:
149 ret = -EINVAL;
152 rcu_read_unlock();
153 return ret;
156 static int get_task_ioprio(struct task_struct *p)
158 int ret;
160 ret = security_task_getioprio(p);
161 if (ret)
162 goto out;
163 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
164 if (p->io_context)
165 ret = p->io_context->ioprio;
166 out:
167 return ret;
170 int ioprio_best(unsigned short aprio, unsigned short bprio)
172 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
173 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
175 if (aclass == IOPRIO_CLASS_NONE)
176 aclass = IOPRIO_CLASS_BE;
177 if (bclass == IOPRIO_CLASS_NONE)
178 bclass = IOPRIO_CLASS_BE;
180 if (aclass == bclass)
181 return min(aprio, bprio);
182 if (aclass > bclass)
183 return bprio;
184 else
185 return aprio;
188 SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
190 struct task_struct *g, *p;
191 struct user_struct *user;
192 struct pid *pgrp;
193 int ret = -ESRCH;
194 int tmpio;
196 rcu_read_lock();
197 switch (which) {
198 case IOPRIO_WHO_PROCESS:
199 if (!who)
200 p = current;
201 else
202 p = find_task_by_vpid(who);
203 if (p)
204 ret = get_task_ioprio(p);
205 break;
206 case IOPRIO_WHO_PGRP:
207 if (!who)
208 pgrp = task_pgrp(current);
209 else
210 pgrp = find_vpid(who);
211 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
212 tmpio = get_task_ioprio(p);
213 if (tmpio < 0)
214 continue;
215 if (ret == -ESRCH)
216 ret = tmpio;
217 else
218 ret = ioprio_best(ret, tmpio);
219 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
220 break;
221 case IOPRIO_WHO_USER:
222 if (!who)
223 user = current_user();
224 else
225 user = find_user(who);
227 if (!user)
228 break;
230 do_each_thread(g, p) {
231 if (__task_cred(p)->uid != user->uid)
232 continue;
233 tmpio = get_task_ioprio(p);
234 if (tmpio < 0)
235 continue;
236 if (ret == -ESRCH)
237 ret = tmpio;
238 else
239 ret = ioprio_best(ret, tmpio);
240 } while_each_thread(g, p);
242 if (who)
243 free_uid(user);
244 break;
245 default:
246 ret = -EINVAL;
249 rcu_read_unlock();
250 return ret;