x86: fix bogus KERN_ALERT on oops
[wrt350n-kernel.git] / kernel / user.c
blobe91331c457e270e8ca7aaa7f91a2f45ecde2dcc8
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
21 * UID task count cache, to get fast user lookup in "alloc_uid"
22 * when changing user ID's (ie setuid() and friends).
25 #define UIDHASH_MASK (UIDHASH_SZ - 1)
26 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
27 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
29 static struct kmem_cache *uid_cachep;
32 * The uidhash_lock is mostly taken from process context, but it is
33 * occasionally also taken from softirq/tasklet context, when
34 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
35 * But free_uid() is also called with local interrupts disabled, and running
36 * local_bh_enable() with local interrupts disabled is an error - we'll run
37 * softirq callbacks, and they can unconditionally enable interrupts, and
38 * the caller of free_uid() didn't expect that..
40 static DEFINE_SPINLOCK(uidhash_lock);
42 struct user_struct root_user = {
43 .__count = ATOMIC_INIT(1),
44 .processes = ATOMIC_INIT(1),
45 .files = ATOMIC_INIT(0),
46 .sigpending = ATOMIC_INIT(0),
47 .locked_shm = 0,
48 #ifdef CONFIG_KEYS
49 .uid_keyring = &root_user_keyring,
50 .session_keyring = &root_session_keyring,
51 #endif
52 #ifdef CONFIG_FAIR_USER_SCHED
53 .tg = &init_task_group,
54 #endif
58 * These routines must be called with the uidhash spinlock held!
60 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
62 hlist_add_head(&up->uidhash_node, hashent);
65 static void uid_hash_remove(struct user_struct *up)
67 hlist_del_init(&up->uidhash_node);
70 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
72 struct user_struct *user;
73 struct hlist_node *h;
75 hlist_for_each_entry(user, h, hashent, uidhash_node) {
76 if (user->uid == uid) {
77 atomic_inc(&user->__count);
78 return user;
82 return NULL;
85 #ifdef CONFIG_FAIR_USER_SCHED
87 static void sched_destroy_user(struct user_struct *up)
89 sched_destroy_group(up->tg);
92 static int sched_create_user(struct user_struct *up)
94 int rc = 0;
96 up->tg = sched_create_group();
97 if (IS_ERR(up->tg))
98 rc = -ENOMEM;
100 return rc;
103 static void sched_switch_user(struct task_struct *p)
105 sched_move_task(p);
108 #else /* CONFIG_FAIR_USER_SCHED */
110 static void sched_destroy_user(struct user_struct *up) { }
111 static int sched_create_user(struct user_struct *up) { return 0; }
112 static void sched_switch_user(struct task_struct *p) { }
114 #endif /* CONFIG_FAIR_USER_SCHED */
116 #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
118 static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
119 static DEFINE_MUTEX(uids_mutex);
121 static inline void uids_mutex_lock(void)
123 mutex_lock(&uids_mutex);
126 static inline void uids_mutex_unlock(void)
128 mutex_unlock(&uids_mutex);
131 /* return cpu shares held by the user */
132 ssize_t cpu_shares_show(struct kset *kset, char *buffer)
134 struct user_struct *up = container_of(kset, struct user_struct, kset);
136 return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
139 /* modify cpu shares held by the user */
140 ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size)
142 struct user_struct *up = container_of(kset, struct user_struct, kset);
143 unsigned long shares;
144 int rc;
146 sscanf(buffer, "%lu", &shares);
148 rc = sched_group_set_shares(up->tg, shares);
150 return (rc ? rc : size);
153 static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
155 sa->attr.name = name;
156 sa->attr.mode = mode;
157 sa->show = cpu_shares_show;
158 sa->store = cpu_shares_store;
161 /* Create "/sys/kernel/uids/<uid>" directory and
162 * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
164 static int user_kobject_create(struct user_struct *up)
166 struct kset *kset = &up->kset;
167 struct kobject *kobj = &kset->kobj;
168 int error;
170 memset(kset, 0, sizeof(struct kset));
171 kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */
172 kobject_set_name(kobj, "%d", up->uid);
173 kset_init(kset);
174 user_attr_init(&up->user_attr, "cpu_share", 0644);
176 error = kobject_add(kobj);
177 if (error)
178 goto done;
180 error = sysfs_create_file(kobj, &up->user_attr.attr);
181 if (error)
182 kobject_del(kobj);
184 kobject_uevent(kobj, KOBJ_ADD);
186 done:
187 return error;
190 /* create these in sysfs filesystem:
191 * "/sys/kernel/uids" directory
192 * "/sys/kernel/uids/0" directory (for root user)
193 * "/sys/kernel/uids/0/cpu_share" file (for root user)
195 int __init uids_kobject_init(void)
197 int error;
199 /* create under /sys/kernel dir */
200 uids_kobject.parent = &kernel_subsys.kobj;
201 uids_kobject.kset = &kernel_subsys;
202 kobject_set_name(&uids_kobject, "uids");
203 kobject_init(&uids_kobject);
205 error = kobject_add(&uids_kobject);
206 if (!error)
207 error = user_kobject_create(&root_user);
209 return error;
212 /* work function to remove sysfs directory for a user and free up
213 * corresponding structures.
215 static void remove_user_sysfs_dir(struct work_struct *w)
217 struct user_struct *up = container_of(w, struct user_struct, work);
218 struct kobject *kobj = &up->kset.kobj;
219 unsigned long flags;
220 int remove_user = 0;
222 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
223 * atomic.
225 uids_mutex_lock();
227 local_irq_save(flags);
229 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
230 uid_hash_remove(up);
231 remove_user = 1;
232 spin_unlock_irqrestore(&uidhash_lock, flags);
233 } else {
234 local_irq_restore(flags);
237 if (!remove_user)
238 goto done;
240 sysfs_remove_file(kobj, &up->user_attr.attr);
241 kobject_uevent(kobj, KOBJ_REMOVE);
242 kobject_del(kobj);
244 sched_destroy_user(up);
245 key_put(up->uid_keyring);
246 key_put(up->session_keyring);
247 kmem_cache_free(uid_cachep, up);
249 done:
250 uids_mutex_unlock();
253 /* IRQs are disabled and uidhash_lock is held upon function entry.
254 * IRQ state (as stored in flags) is restored and uidhash_lock released
255 * upon function exit.
257 static inline void free_user(struct user_struct *up, unsigned long flags)
259 /* restore back the count */
260 atomic_inc(&up->__count);
261 spin_unlock_irqrestore(&uidhash_lock, flags);
263 INIT_WORK(&up->work, remove_user_sysfs_dir);
264 schedule_work(&up->work);
267 #else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
269 static inline int user_kobject_create(struct user_struct *up) { return 0; }
270 static inline void uids_mutex_lock(void) { }
271 static inline void uids_mutex_unlock(void) { }
273 /* IRQs are disabled and uidhash_lock is held upon function entry.
274 * IRQ state (as stored in flags) is restored and uidhash_lock released
275 * upon function exit.
277 static inline void free_user(struct user_struct *up, unsigned long flags)
279 uid_hash_remove(up);
280 spin_unlock_irqrestore(&uidhash_lock, flags);
281 sched_destroy_user(up);
282 key_put(up->uid_keyring);
283 key_put(up->session_keyring);
284 kmem_cache_free(uid_cachep, up);
287 #endif
290 * Locate the user_struct for the passed UID. If found, take a ref on it. The
291 * caller must undo that ref with free_uid().
293 * If the user_struct could not be found, return NULL.
295 struct user_struct *find_user(uid_t uid)
297 struct user_struct *ret;
298 unsigned long flags;
299 struct user_namespace *ns = current->nsproxy->user_ns;
301 spin_lock_irqsave(&uidhash_lock, flags);
302 ret = uid_hash_find(uid, uidhashentry(ns, uid));
303 spin_unlock_irqrestore(&uidhash_lock, flags);
304 return ret;
307 void free_uid(struct user_struct *up)
309 unsigned long flags;
311 if (!up)
312 return;
314 local_irq_save(flags);
315 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
316 free_user(up, flags);
317 else
318 local_irq_restore(flags);
321 struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
323 struct hlist_head *hashent = uidhashentry(ns, uid);
324 struct user_struct *up;
326 /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
327 * atomic.
329 uids_mutex_lock();
331 spin_lock_irq(&uidhash_lock);
332 up = uid_hash_find(uid, hashent);
333 spin_unlock_irq(&uidhash_lock);
335 if (!up) {
336 struct user_struct *new;
338 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
339 if (!new)
340 return NULL;
341 new->uid = uid;
342 atomic_set(&new->__count, 1);
343 atomic_set(&new->processes, 0);
344 atomic_set(&new->files, 0);
345 atomic_set(&new->sigpending, 0);
346 #ifdef CONFIG_INOTIFY_USER
347 atomic_set(&new->inotify_watches, 0);
348 atomic_set(&new->inotify_devs, 0);
349 #endif
350 #ifdef CONFIG_POSIX_MQUEUE
351 new->mq_bytes = 0;
352 #endif
353 new->locked_shm = 0;
355 if (alloc_uid_keyring(new, current) < 0) {
356 kmem_cache_free(uid_cachep, new);
357 return NULL;
360 if (sched_create_user(new) < 0) {
361 key_put(new->uid_keyring);
362 key_put(new->session_keyring);
363 kmem_cache_free(uid_cachep, new);
364 return NULL;
367 if (user_kobject_create(new)) {
368 sched_destroy_user(new);
369 key_put(new->uid_keyring);
370 key_put(new->session_keyring);
371 kmem_cache_free(uid_cachep, new);
372 uids_mutex_unlock();
373 return NULL;
377 * Before adding this, check whether we raced
378 * on adding the same user already..
380 spin_lock_irq(&uidhash_lock);
381 up = uid_hash_find(uid, hashent);
382 if (up) {
383 /* This case is not possible when CONFIG_FAIR_USER_SCHED
384 * is defined, since we serialize alloc_uid() using
385 * uids_mutex. Hence no need to call
386 * sched_destroy_user() or remove_user_sysfs_dir().
388 key_put(new->uid_keyring);
389 key_put(new->session_keyring);
390 kmem_cache_free(uid_cachep, new);
391 } else {
392 uid_hash_insert(new, hashent);
393 up = new;
395 spin_unlock_irq(&uidhash_lock);
399 uids_mutex_unlock();
401 return up;
404 void switch_uid(struct user_struct *new_user)
406 struct user_struct *old_user;
408 /* What if a process setreuid()'s and this brings the
409 * new uid over his NPROC rlimit? We can check this now
410 * cheaply with the new uid cache, so if it matters
411 * we should be checking for it. -DaveM
413 old_user = current->user;
414 atomic_inc(&new_user->processes);
415 atomic_dec(&old_user->processes);
416 switch_uid_keyring(new_user);
417 current->user = new_user;
418 sched_switch_user(current);
421 * We need to synchronize with __sigqueue_alloc()
422 * doing a get_uid(p->user).. If that saw the old
423 * user value, we need to wait until it has exited
424 * its critical region before we can free the old
425 * structure.
427 smp_mb();
428 spin_unlock_wait(&current->sighand->siglock);
430 free_uid(old_user);
431 suid_keys(current);
434 void release_uids(struct user_namespace *ns)
436 int i;
437 unsigned long flags;
438 struct hlist_head *head;
439 struct hlist_node *nd;
441 spin_lock_irqsave(&uidhash_lock, flags);
443 * collapse the chains so that the user_struct-s will
444 * be still alive, but not in hashes. subsequent free_uid()
445 * will free them.
447 for (i = 0; i < UIDHASH_SZ; i++) {
448 head = ns->uidhash_table + i;
449 while (!hlist_empty(head)) {
450 nd = head->first;
451 hlist_del_init(nd);
454 spin_unlock_irqrestore(&uidhash_lock, flags);
456 free_uid(ns->root_user);
459 static int __init uid_cache_init(void)
461 int n;
463 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
464 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
466 for(n = 0; n < UIDHASH_SZ; ++n)
467 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
469 /* Insert the root user immediately (init already runs as root) */
470 spin_lock_irq(&uidhash_lock);
471 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
472 spin_unlock_irq(&uidhash_lock);
474 return 0;
477 module_init(uid_cache_init);