4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
17 * UID task count cache, to get fast user lookup in "alloc_uid"
18 * when changing user ID's (ie setuid() and friends).
20 #define UIDHASH_BITS 8
21 #define UIDHASH_SZ (1 << UIDHASH_BITS)
22 #define UIDHASH_MASK (UIDHASH_SZ - 1)
23 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
24 #define uidhashentry(uid) (uidhash_table + __uidhashfn((uid)))
26 static kmem_cache_t
*uid_cachep
;
27 static struct list_head uidhash_table
[UIDHASH_SZ
];
28 static spinlock_t uidhash_lock
= SPIN_LOCK_UNLOCKED
;
30 struct user_struct root_user
= {
31 .__count
= ATOMIC_INIT(1),
32 .processes
= ATOMIC_INIT(1),
33 .files
= ATOMIC_INIT(0),
34 .sigpending
= ATOMIC_INIT(0),
40 * These routines must be called with the uidhash spinlock held!
42 static inline void uid_hash_insert(struct user_struct
*up
, struct list_head
*hashent
)
44 list_add(&up
->uidhash_list
, hashent
);
47 static inline void uid_hash_remove(struct user_struct
*up
)
49 list_del(&up
->uidhash_list
);
52 static inline struct user_struct
*uid_hash_find(uid_t uid
, struct list_head
*hashent
)
56 list_for_each(up
, hashent
) {
57 struct user_struct
*user
;
59 user
= list_entry(up
, struct user_struct
, uidhash_list
);
61 if(user
->uid
== uid
) {
62 atomic_inc(&user
->__count
);
71 * Locate the user_struct for the passed UID. If found, take a ref on it. The
72 * caller must undo that ref with free_uid().
74 * If the user_struct could not be found, return NULL.
76 struct user_struct
*find_user(uid_t uid
)
78 struct user_struct
*ret
;
80 spin_lock(&uidhash_lock
);
81 ret
= uid_hash_find(uid
, uidhashentry(uid
));
82 spin_unlock(&uidhash_lock
);
86 void free_uid(struct user_struct
*up
)
88 if (up
&& atomic_dec_and_lock(&up
->__count
, &uidhash_lock
)) {
90 kmem_cache_free(uid_cachep
, up
);
91 spin_unlock(&uidhash_lock
);
95 struct user_struct
* alloc_uid(uid_t uid
)
97 struct list_head
*hashent
= uidhashentry(uid
);
98 struct user_struct
*up
;
100 spin_lock(&uidhash_lock
);
101 up
= uid_hash_find(uid
, hashent
);
102 spin_unlock(&uidhash_lock
);
105 struct user_struct
*new;
107 new = kmem_cache_alloc(uid_cachep
, SLAB_KERNEL
);
111 atomic_set(&new->__count
, 1);
112 atomic_set(&new->processes
, 0);
113 atomic_set(&new->files
, 0);
114 atomic_set(&new->sigpending
, 0);
120 * Before adding this, check whether we raced
121 * on adding the same user already..
123 spin_lock(&uidhash_lock
);
124 up
= uid_hash_find(uid
, hashent
);
126 kmem_cache_free(uid_cachep
, new);
128 uid_hash_insert(new, hashent
);
131 spin_unlock(&uidhash_lock
);
137 void switch_uid(struct user_struct
*new_user
)
139 struct user_struct
*old_user
;
141 /* What if a process setreuid()'s and this brings the
142 * new uid over his NPROC rlimit? We can check this now
143 * cheaply with the new uid cache, so if it matters
144 * we should be checking for it. -DaveM
146 old_user
= current
->user
;
147 atomic_inc(&new_user
->processes
);
148 atomic_dec(&old_user
->processes
);
149 current
->user
= new_user
;
154 static int __init
uid_cache_init(void)
158 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
159 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
, NULL
);
161 for(n
= 0; n
< UIDHASH_SZ
; ++n
)
162 INIT_LIST_HEAD(uidhash_table
+ n
);
164 /* Insert the root user immediately (init already runs as root) */
165 spin_lock(&uidhash_lock
);
166 uid_hash_insert(&root_user
, uidhashentry(0));
167 spin_unlock(&uidhash_lock
);
172 module_init(uid_cache_init
);