[POWERPC] Remove the dregs of APUS support from arch/powerpc
[pv_ops_mirror.git] / kernel / user.c
blob4869563080e9e080954d26f34b70bbffc13de18b
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
19 * UID task count cache, to get fast user lookup in "alloc_uid"
20 * when changing user ID's (ie setuid() and friends).
23 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8)
24 #define UIDHASH_SZ (1 << UIDHASH_BITS)
25 #define UIDHASH_MASK (UIDHASH_SZ - 1)
26 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
27 #define uidhashentry(uid) (uidhash_table + __uidhashfn((uid)))
29 static struct kmem_cache *uid_cachep;
30 static struct list_head uidhash_table[UIDHASH_SZ];
33 * The uidhash_lock is mostly taken from process context, but it is
34 * occasionally also taken from softirq/tasklet context, when
35 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
36 * But free_uid() is also called with local interrupts disabled, and running
37 * local_bh_enable() with local interrupts disabled is an error - we'll run
38 * softirq callbacks, and they can unconditionally enable interrupts, and
39 * the caller of free_uid() didn't expect that..
41 static DEFINE_SPINLOCK(uidhash_lock);
43 struct user_struct root_user = {
44 .__count = ATOMIC_INIT(1),
45 .processes = ATOMIC_INIT(1),
46 .files = ATOMIC_INIT(0),
47 .sigpending = ATOMIC_INIT(0),
48 .mq_bytes = 0,
49 .locked_shm = 0,
50 #ifdef CONFIG_KEYS
51 .uid_keyring = &root_user_keyring,
52 .session_keyring = &root_session_keyring,
53 #endif
57 * These routines must be called with the uidhash spinlock held!
59 static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
61 list_add(&up->uidhash_list, hashent);
64 static inline void uid_hash_remove(struct user_struct *up)
66 list_del(&up->uidhash_list);
69 static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent)
71 struct list_head *up;
73 list_for_each(up, hashent) {
74 struct user_struct *user;
76 user = list_entry(up, struct user_struct, uidhash_list);
78 if(user->uid == uid) {
79 atomic_inc(&user->__count);
80 return user;
84 return NULL;
88 * Locate the user_struct for the passed UID. If found, take a ref on it. The
89 * caller must undo that ref with free_uid().
91 * If the user_struct could not be found, return NULL.
93 struct user_struct *find_user(uid_t uid)
95 struct user_struct *ret;
96 unsigned long flags;
98 spin_lock_irqsave(&uidhash_lock, flags);
99 ret = uid_hash_find(uid, uidhashentry(uid));
100 spin_unlock_irqrestore(&uidhash_lock, flags);
101 return ret;
104 void free_uid(struct user_struct *up)
106 unsigned long flags;
108 if (!up)
109 return;
111 local_irq_save(flags);
112 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
113 uid_hash_remove(up);
114 spin_unlock_irqrestore(&uidhash_lock, flags);
115 key_put(up->uid_keyring);
116 key_put(up->session_keyring);
117 kmem_cache_free(uid_cachep, up);
118 } else {
119 local_irq_restore(flags);
123 struct user_struct * alloc_uid(uid_t uid)
125 struct list_head *hashent = uidhashentry(uid);
126 struct user_struct *up;
128 spin_lock_irq(&uidhash_lock);
129 up = uid_hash_find(uid, hashent);
130 spin_unlock_irq(&uidhash_lock);
132 if (!up) {
133 struct user_struct *new;
135 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
136 if (!new)
137 return NULL;
138 new->uid = uid;
139 atomic_set(&new->__count, 1);
140 atomic_set(&new->processes, 0);
141 atomic_set(&new->files, 0);
142 atomic_set(&new->sigpending, 0);
143 #ifdef CONFIG_INOTIFY_USER
144 atomic_set(&new->inotify_watches, 0);
145 atomic_set(&new->inotify_devs, 0);
146 #endif
148 new->mq_bytes = 0;
149 new->locked_shm = 0;
151 if (alloc_uid_keyring(new, current) < 0) {
152 kmem_cache_free(uid_cachep, new);
153 return NULL;
157 * Before adding this, check whether we raced
158 * on adding the same user already..
160 spin_lock_irq(&uidhash_lock);
161 up = uid_hash_find(uid, hashent);
162 if (up) {
163 key_put(new->uid_keyring);
164 key_put(new->session_keyring);
165 kmem_cache_free(uid_cachep, new);
166 } else {
167 uid_hash_insert(new, hashent);
168 up = new;
170 spin_unlock_irq(&uidhash_lock);
173 return up;
176 void switch_uid(struct user_struct *new_user)
178 struct user_struct *old_user;
180 /* What if a process setreuid()'s and this brings the
181 * new uid over his NPROC rlimit? We can check this now
182 * cheaply with the new uid cache, so if it matters
183 * we should be checking for it. -DaveM
185 old_user = current->user;
186 atomic_inc(&new_user->processes);
187 atomic_dec(&old_user->processes);
188 switch_uid_keyring(new_user);
189 current->user = new_user;
192 * We need to synchronize with __sigqueue_alloc()
193 * doing a get_uid(p->user).. If that saw the old
194 * user value, we need to wait until it has exited
195 * its critical region before we can free the old
196 * structure.
198 smp_mb();
199 spin_unlock_wait(&current->sighand->siglock);
201 free_uid(old_user);
202 suid_keys(current);
206 static int __init uid_cache_init(void)
208 int n;
210 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
211 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
213 for(n = 0; n < UIDHASH_SZ; ++n)
214 INIT_LIST_HEAD(uidhash_table + n);
216 /* Insert the root user immediately (init already runs as root) */
217 spin_lock_irq(&uidhash_lock);
218 uid_hash_insert(&root_user, uidhashentry(0));
219 spin_unlock_irq(&uidhash_lock);
221 return 0;
224 module_init(uid_cache_init);