block: avoid ordered task state change for polled IO
[linux/fpc-iii.git] / kernel / user.c
blob0df9b1640b2af5d4ade21a376172fdfd6d097694
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/sched/user.h>
17 #include <linux/interrupt.h>
18 #include <linux/export.h>
19 #include <linux/user_namespace.h>
20 #include <linux/proc_ns.h>
23 * userns count is 1 for root user, 1 for init_uts_ns,
24 * and 1 for... ?
26 struct user_namespace init_user_ns = {
27 .uid_map = {
28 .nr_extents = 1,
30 .extent[0] = {
31 .first = 0,
32 .lower_first = 0,
33 .count = 4294967295U,
37 .gid_map = {
38 .nr_extents = 1,
40 .extent[0] = {
41 .first = 0,
42 .lower_first = 0,
43 .count = 4294967295U,
47 .projid_map = {
48 .nr_extents = 1,
50 .extent[0] = {
51 .first = 0,
52 .lower_first = 0,
53 .count = 4294967295U,
57 .count = ATOMIC_INIT(3),
58 .owner = GLOBAL_ROOT_UID,
59 .group = GLOBAL_ROOT_GID,
60 .ns.inum = PROC_USER_INIT_INO,
61 #ifdef CONFIG_USER_NS
62 .ns.ops = &userns_operations,
63 #endif
64 .flags = USERNS_INIT_FLAGS,
65 #ifdef CONFIG_PERSISTENT_KEYRINGS
66 .persistent_keyring_register_sem =
67 __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
68 #endif
70 EXPORT_SYMBOL_GPL(init_user_ns);
73 * UID task count cache, to get fast user lookup in "alloc_uid"
74 * when changing user ID's (ie setuid() and friends).
77 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
78 #define UIDHASH_SZ (1 << UIDHASH_BITS)
79 #define UIDHASH_MASK (UIDHASH_SZ - 1)
80 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
81 #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
83 static struct kmem_cache *uid_cachep;
84 struct hlist_head uidhash_table[UIDHASH_SZ];
87 * The uidhash_lock is mostly taken from process context, but it is
88 * occasionally also taken from softirq/tasklet context, when
89 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
90 * But free_uid() is also called with local interrupts disabled, and running
91 * local_bh_enable() with local interrupts disabled is an error - we'll run
92 * softirq callbacks, and they can unconditionally enable interrupts, and
93 * the caller of free_uid() didn't expect that..
95 static DEFINE_SPINLOCK(uidhash_lock);
97 /* root_user.__count is 1, for init task cred */
98 struct user_struct root_user = {
99 .__count = REFCOUNT_INIT(1),
100 .processes = ATOMIC_INIT(1),
101 .sigpending = ATOMIC_INIT(0),
102 .locked_shm = 0,
103 .uid = GLOBAL_ROOT_UID,
104 .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
108 * These routines must be called with the uidhash spinlock held!
110 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
112 hlist_add_head(&up->uidhash_node, hashent);
115 static void uid_hash_remove(struct user_struct *up)
117 hlist_del_init(&up->uidhash_node);
120 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
122 struct user_struct *user;
124 hlist_for_each_entry(user, hashent, uidhash_node) {
125 if (uid_eq(user->uid, uid)) {
126 refcount_inc(&user->__count);
127 return user;
131 return NULL;
134 /* IRQs are disabled and uidhash_lock is held upon function entry.
135 * IRQ state (as stored in flags) is restored and uidhash_lock released
136 * upon function exit.
138 static void free_user(struct user_struct *up, unsigned long flags)
139 __releases(&uidhash_lock)
141 uid_hash_remove(up);
142 spin_unlock_irqrestore(&uidhash_lock, flags);
143 key_put(up->uid_keyring);
144 key_put(up->session_keyring);
145 kmem_cache_free(uid_cachep, up);
149 * Locate the user_struct for the passed UID. If found, take a ref on it. The
150 * caller must undo that ref with free_uid().
152 * If the user_struct could not be found, return NULL.
154 struct user_struct *find_user(kuid_t uid)
156 struct user_struct *ret;
157 unsigned long flags;
159 spin_lock_irqsave(&uidhash_lock, flags);
160 ret = uid_hash_find(uid, uidhashentry(uid));
161 spin_unlock_irqrestore(&uidhash_lock, flags);
162 return ret;
165 void free_uid(struct user_struct *up)
167 unsigned long flags;
169 if (!up)
170 return;
172 if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
173 free_user(up, flags);
176 struct user_struct *alloc_uid(kuid_t uid)
178 struct hlist_head *hashent = uidhashentry(uid);
179 struct user_struct *up, *new;
181 spin_lock_irq(&uidhash_lock);
182 up = uid_hash_find(uid, hashent);
183 spin_unlock_irq(&uidhash_lock);
185 if (!up) {
186 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
187 if (!new)
188 goto out_unlock;
190 new->uid = uid;
191 refcount_set(&new->__count, 1);
192 ratelimit_state_init(&new->ratelimit, HZ, 100);
193 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
196 * Before adding this, check whether we raced
197 * on adding the same user already..
199 spin_lock_irq(&uidhash_lock);
200 up = uid_hash_find(uid, hashent);
201 if (up) {
202 key_put(new->uid_keyring);
203 key_put(new->session_keyring);
204 kmem_cache_free(uid_cachep, new);
205 } else {
206 uid_hash_insert(new, hashent);
207 up = new;
209 spin_unlock_irq(&uidhash_lock);
212 return up;
214 out_unlock:
215 return NULL;
218 static int __init uid_cache_init(void)
220 int n;
222 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
223 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
225 for(n = 0; n < UIDHASH_SZ; ++n)
226 INIT_HLIST_HEAD(uidhash_table + n);
228 /* Insert the root user immediately (init already runs as root) */
229 spin_lock_irq(&uidhash_lock);
230 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
231 spin_unlock_irq(&uidhash_lock);
233 return 0;
235 subsys_initcall(uid_cache_init);