sched: Fix wake_affine() vs RT tasks
[linux/fpc-iii.git] / kernel / user.c
blob65be394167ed7d195b88ba4812a21d9071f28251
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
20 struct user_namespace init_user_ns = {
21 .kref = {
22 .refcount = ATOMIC_INIT(2),
24 .creator = &root_user,
26 EXPORT_SYMBOL_GPL(init_user_ns);
29 * UID task count cache, to get fast user lookup in "alloc_uid"
30 * when changing user ID's (ie setuid() and friends).
33 #define UIDHASH_MASK (UIDHASH_SZ - 1)
34 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
35 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
37 static struct kmem_cache *uid_cachep;
40 * The uidhash_lock is mostly taken from process context, but it is
41 * occasionally also taken from softirq/tasklet context, when
42 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
43 * But free_uid() is also called with local interrupts disabled, and running
44 * local_bh_enable() with local interrupts disabled is an error - we'll run
45 * softirq callbacks, and they can unconditionally enable interrupts, and
46 * the caller of free_uid() didn't expect that..
48 static DEFINE_SPINLOCK(uidhash_lock);
50 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
51 struct user_struct root_user = {
52 .__count = ATOMIC_INIT(2),
53 .processes = ATOMIC_INIT(1),
54 .files = ATOMIC_INIT(0),
55 .sigpending = ATOMIC_INIT(0),
56 .locked_shm = 0,
57 .user_ns = &init_user_ns,
61 * These routines must be called with the uidhash spinlock held!
63 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
65 hlist_add_head(&up->uidhash_node, hashent);
68 static void uid_hash_remove(struct user_struct *up)
70 hlist_del_init(&up->uidhash_node);
71 put_user_ns(up->user_ns);
74 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
76 struct user_struct *user;
77 struct hlist_node *h;
79 hlist_for_each_entry(user, h, hashent, uidhash_node) {
80 if (user->uid == uid) {
81 atomic_inc(&user->__count);
82 return user;
86 return NULL;
89 /* IRQs are disabled and uidhash_lock is held upon function entry.
90 * IRQ state (as stored in flags) is restored and uidhash_lock released
91 * upon function exit.
93 static void free_user(struct user_struct *up, unsigned long flags)
95 uid_hash_remove(up);
96 spin_unlock_irqrestore(&uidhash_lock, flags);
97 key_put(up->uid_keyring);
98 key_put(up->session_keyring);
99 kmem_cache_free(uid_cachep, up);
103 * Locate the user_struct for the passed UID. If found, take a ref on it. The
104 * caller must undo that ref with free_uid().
106 * If the user_struct could not be found, return NULL.
108 struct user_struct *find_user(uid_t uid)
110 struct user_struct *ret;
111 unsigned long flags;
112 struct user_namespace *ns = current_user_ns();
114 spin_lock_irqsave(&uidhash_lock, flags);
115 ret = uid_hash_find(uid, uidhashentry(ns, uid));
116 spin_unlock_irqrestore(&uidhash_lock, flags);
117 return ret;
120 void free_uid(struct user_struct *up)
122 unsigned long flags;
124 if (!up)
125 return;
127 local_irq_save(flags);
128 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
129 free_user(up, flags);
130 else
131 local_irq_restore(flags);
134 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
136 struct hlist_head *hashent = uidhashentry(ns, uid);
137 struct user_struct *up, *new;
139 /* Make uid_hash_find() + uid_hash_insert() atomic. */
140 spin_lock_irq(&uidhash_lock);
141 up = uid_hash_find(uid, hashent);
142 spin_unlock_irq(&uidhash_lock);
144 if (!up) {
145 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
146 if (!new)
147 goto out_unlock;
149 new->uid = uid;
150 atomic_set(&new->__count, 1);
152 new->user_ns = get_user_ns(ns);
155 * Before adding this, check whether we raced
156 * on adding the same user already..
158 spin_lock_irq(&uidhash_lock);
159 up = uid_hash_find(uid, hashent);
160 if (up) {
161 key_put(new->uid_keyring);
162 key_put(new->session_keyring);
163 kmem_cache_free(uid_cachep, new);
164 } else {
165 uid_hash_insert(new, hashent);
166 up = new;
168 spin_unlock_irq(&uidhash_lock);
171 return up;
173 put_user_ns(new->user_ns);
174 kmem_cache_free(uid_cachep, new);
175 out_unlock:
176 return NULL;
179 static int __init uid_cache_init(void)
181 int n;
183 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
184 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
186 for(n = 0; n < UIDHASH_SZ; ++n)
187 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
189 /* Insert the root user immediately (init already runs as root) */
190 spin_lock_irq(&uidhash_lock);
191 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
192 spin_unlock_irq(&uidhash_lock);
194 return 0;
197 module_init(uid_cache_init);