Merge tag 'rtc-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[linux/fpc-iii.git] / kernel / ucount.c
bloba53cc2b4179c089c2f2e8e80de74741b9b3c7c26
1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/stat.h>
4 #include <linux/sysctl.h>
5 #include <linux/slab.h>
6 #include <linux/cred.h>
7 #include <linux/hash.h>
8 #include <linux/kmemleak.h>
9 #include <linux/user_namespace.h>
11 #define UCOUNTS_HASHTABLE_BITS 10
12 static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
13 static DEFINE_SPINLOCK(ucounts_lock);
15 #define ucounts_hashfn(ns, uid) \
16 hash_long((unsigned long)__kuid_val(uid) + (unsigned long)(ns), \
17 UCOUNTS_HASHTABLE_BITS)
18 #define ucounts_hashentry(ns, uid) \
19 (ucounts_hashtable + ucounts_hashfn(ns, uid))
22 #ifdef CONFIG_SYSCTL
23 static struct ctl_table_set *
24 set_lookup(struct ctl_table_root *root)
26 return &current_user_ns()->set;
29 static int set_is_seen(struct ctl_table_set *set)
31 return &current_user_ns()->set == set;
34 static int set_permissions(struct ctl_table_header *head,
35 struct ctl_table *table)
37 struct user_namespace *user_ns =
38 container_of(head->set, struct user_namespace, set);
39 int mode;
41 /* Allow users with CAP_SYS_RESOURCE unrestrained access */
42 if (ns_capable(user_ns, CAP_SYS_RESOURCE))
43 mode = (table->mode & S_IRWXU) >> 6;
44 else
45 /* Allow all others at most read-only access */
46 mode = table->mode & S_IROTH;
47 return (mode << 6) | (mode << 3) | mode;
50 static struct ctl_table_root set_root = {
51 .lookup = set_lookup,
52 .permissions = set_permissions,
55 #define UCOUNT_ENTRY(name) \
56 { \
57 .procname = name, \
58 .maxlen = sizeof(int), \
59 .mode = 0644, \
60 .proc_handler = proc_dointvec_minmax, \
61 .extra1 = SYSCTL_ZERO, \
62 .extra2 = SYSCTL_INT_MAX, \
64 static struct ctl_table user_table[] = {
65 UCOUNT_ENTRY("max_user_namespaces"),
66 UCOUNT_ENTRY("max_pid_namespaces"),
67 UCOUNT_ENTRY("max_uts_namespaces"),
68 UCOUNT_ENTRY("max_ipc_namespaces"),
69 UCOUNT_ENTRY("max_net_namespaces"),
70 UCOUNT_ENTRY("max_mnt_namespaces"),
71 UCOUNT_ENTRY("max_cgroup_namespaces"),
72 #ifdef CONFIG_INOTIFY_USER
73 UCOUNT_ENTRY("max_inotify_instances"),
74 UCOUNT_ENTRY("max_inotify_watches"),
75 #endif
76 { }
78 #endif /* CONFIG_SYSCTL */
80 bool setup_userns_sysctls(struct user_namespace *ns)
82 #ifdef CONFIG_SYSCTL
83 struct ctl_table *tbl;
84 setup_sysctl_set(&ns->set, &set_root, set_is_seen);
85 tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL);
86 if (tbl) {
87 int i;
88 for (i = 0; i < UCOUNT_COUNTS; i++) {
89 tbl[i].data = &ns->ucount_max[i];
91 ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl);
93 if (!ns->sysctls) {
94 kfree(tbl);
95 retire_sysctl_set(&ns->set);
96 return false;
98 #endif
99 return true;
102 void retire_userns_sysctls(struct user_namespace *ns)
104 #ifdef CONFIG_SYSCTL
105 struct ctl_table *tbl;
107 tbl = ns->sysctls->ctl_table_arg;
108 unregister_sysctl_table(ns->sysctls);
109 retire_sysctl_set(&ns->set);
110 kfree(tbl);
111 #endif
114 static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
116 struct ucounts *ucounts;
118 hlist_for_each_entry(ucounts, hashent, node) {
119 if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
120 return ucounts;
122 return NULL;
125 static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
127 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
128 struct ucounts *ucounts, *new;
130 spin_lock_irq(&ucounts_lock);
131 ucounts = find_ucounts(ns, uid, hashent);
132 if (!ucounts) {
133 spin_unlock_irq(&ucounts_lock);
135 new = kzalloc(sizeof(*new), GFP_KERNEL);
136 if (!new)
137 return NULL;
139 new->ns = ns;
140 new->uid = uid;
141 new->count = 0;
143 spin_lock_irq(&ucounts_lock);
144 ucounts = find_ucounts(ns, uid, hashent);
145 if (ucounts) {
146 kfree(new);
147 } else {
148 hlist_add_head(&new->node, hashent);
149 ucounts = new;
152 if (ucounts->count == INT_MAX)
153 ucounts = NULL;
154 else
155 ucounts->count += 1;
156 spin_unlock_irq(&ucounts_lock);
157 return ucounts;
160 static void put_ucounts(struct ucounts *ucounts)
162 unsigned long flags;
164 spin_lock_irqsave(&ucounts_lock, flags);
165 ucounts->count -= 1;
166 if (!ucounts->count)
167 hlist_del_init(&ucounts->node);
168 else
169 ucounts = NULL;
170 spin_unlock_irqrestore(&ucounts_lock, flags);
172 kfree(ucounts);
175 static inline bool atomic_inc_below(atomic_t *v, int u)
177 int c, old;
178 c = atomic_read(v);
179 for (;;) {
180 if (unlikely(c >= u))
181 return false;
182 old = atomic_cmpxchg(v, c, c+1);
183 if (likely(old == c))
184 return true;
185 c = old;
189 struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
190 enum ucount_type type)
192 struct ucounts *ucounts, *iter, *bad;
193 struct user_namespace *tns;
194 ucounts = get_ucounts(ns, uid);
195 for (iter = ucounts; iter; iter = tns->ucounts) {
196 int max;
197 tns = iter->ns;
198 max = READ_ONCE(tns->ucount_max[type]);
199 if (!atomic_inc_below(&iter->ucount[type], max))
200 goto fail;
202 return ucounts;
203 fail:
204 bad = iter;
205 for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
206 atomic_dec(&iter->ucount[type]);
208 put_ucounts(ucounts);
209 return NULL;
212 void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
214 struct ucounts *iter;
215 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
216 int dec = atomic_dec_if_positive(&iter->ucount[type]);
217 WARN_ON_ONCE(dec < 0);
219 put_ucounts(ucounts);
222 static __init int user_namespace_sysctl_init(void)
224 #ifdef CONFIG_SYSCTL
225 static struct ctl_table_header *user_header;
226 static struct ctl_table empty[1];
228 * It is necessary to register the user directory in the
229 * default set so that registrations in the child sets work
230 * properly.
232 user_header = register_sysctl("user", empty);
233 kmemleak_ignore(user_header);
234 BUG_ON(!user_header);
235 BUG_ON(!setup_userns_sysctls(&init_user_ns));
236 #endif
237 return 0;
239 subsys_initcall(user_namespace_sysctl_init);