serial: exar: Fix GPIO configuration for Sealevel cards based on XR17V35X
[linux/fpc-iii.git] / kernel / bpf / net_namespace.c
blob310241ca79912a62007e83accfe6307cc2e8e01a
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/net_namespace.h>
7 /*
8 * Functions to manage BPF programs attached to netns
9 */
11 struct bpf_netns_link {
12 struct bpf_link link;
13 enum bpf_attach_type type;
14 enum netns_bpf_attach_type netns_type;
16 /* We don't hold a ref to net in order to auto-detach the link
17 * when netns is going away. Instead we rely on pernet
18 * pre_exit callback to clear this pointer. Must be accessed
19 * with netns_bpf_mutex held.
21 struct net *net;
22 struct list_head node; /* node in list of links attached to net */
25 /* Protects updates to netns_bpf */
26 DEFINE_MUTEX(netns_bpf_mutex);
28 /* Must be called with netns_bpf_mutex held. */
29 static void netns_bpf_run_array_detach(struct net *net,
30 enum netns_bpf_attach_type type)
32 struct bpf_prog_array *run_array;
34 run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL,
35 lockdep_is_held(&netns_bpf_mutex));
36 bpf_prog_array_free(run_array);
39 static void bpf_netns_link_release(struct bpf_link *link)
41 struct bpf_netns_link *net_link =
42 container_of(link, struct bpf_netns_link, link);
43 enum netns_bpf_attach_type type = net_link->netns_type;
44 struct net *net;
46 mutex_lock(&netns_bpf_mutex);
48 /* We can race with cleanup_net, but if we see a non-NULL
49 * struct net pointer, pre_exit has not run yet and wait for
50 * netns_bpf_mutex.
52 net = net_link->net;
53 if (!net)
54 goto out_unlock;
56 netns_bpf_run_array_detach(net, type);
57 list_del(&net_link->node);
59 out_unlock:
60 mutex_unlock(&netns_bpf_mutex);
63 static void bpf_netns_link_dealloc(struct bpf_link *link)
65 struct bpf_netns_link *net_link =
66 container_of(link, struct bpf_netns_link, link);
68 kfree(net_link);
71 static int bpf_netns_link_update_prog(struct bpf_link *link,
72 struct bpf_prog *new_prog,
73 struct bpf_prog *old_prog)
75 struct bpf_netns_link *net_link =
76 container_of(link, struct bpf_netns_link, link);
77 enum netns_bpf_attach_type type = net_link->netns_type;
78 struct bpf_prog_array *run_array;
79 struct net *net;
80 int ret = 0;
82 if (old_prog && old_prog != link->prog)
83 return -EPERM;
84 if (new_prog->type != link->prog->type)
85 return -EINVAL;
87 mutex_lock(&netns_bpf_mutex);
89 net = net_link->net;
90 if (!net || !check_net(net)) {
91 /* Link auto-detached or netns dying */
92 ret = -ENOLINK;
93 goto out_unlock;
96 run_array = rcu_dereference_protected(net->bpf.run_array[type],
97 lockdep_is_held(&netns_bpf_mutex));
98 WRITE_ONCE(run_array->items[0].prog, new_prog);
100 old_prog = xchg(&link->prog, new_prog);
101 bpf_prog_put(old_prog);
103 out_unlock:
104 mutex_unlock(&netns_bpf_mutex);
105 return ret;
108 static int bpf_netns_link_fill_info(const struct bpf_link *link,
109 struct bpf_link_info *info)
111 const struct bpf_netns_link *net_link =
112 container_of(link, struct bpf_netns_link, link);
113 unsigned int inum = 0;
114 struct net *net;
116 mutex_lock(&netns_bpf_mutex);
117 net = net_link->net;
118 if (net && check_net(net))
119 inum = net->ns.inum;
120 mutex_unlock(&netns_bpf_mutex);
122 info->netns.netns_ino = inum;
123 info->netns.attach_type = net_link->type;
124 return 0;
127 static void bpf_netns_link_show_fdinfo(const struct bpf_link *link,
128 struct seq_file *seq)
130 struct bpf_link_info info = {};
132 bpf_netns_link_fill_info(link, &info);
133 seq_printf(seq,
134 "netns_ino:\t%u\n"
135 "attach_type:\t%u\n",
136 info.netns.netns_ino,
137 info.netns.attach_type);
140 static const struct bpf_link_ops bpf_netns_link_ops = {
141 .release = bpf_netns_link_release,
142 .dealloc = bpf_netns_link_dealloc,
143 .update_prog = bpf_netns_link_update_prog,
144 .fill_link_info = bpf_netns_link_fill_info,
145 .show_fdinfo = bpf_netns_link_show_fdinfo,
148 /* Must be called with netns_bpf_mutex held. */
149 static int __netns_bpf_prog_query(const union bpf_attr *attr,
150 union bpf_attr __user *uattr,
151 struct net *net,
152 enum netns_bpf_attach_type type)
154 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
155 struct bpf_prog_array *run_array;
156 u32 prog_cnt = 0, flags = 0;
158 run_array = rcu_dereference_protected(net->bpf.run_array[type],
159 lockdep_is_held(&netns_bpf_mutex));
160 if (run_array)
161 prog_cnt = bpf_prog_array_length(run_array);
163 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
164 return -EFAULT;
165 if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
166 return -EFAULT;
167 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
168 return 0;
170 return bpf_prog_array_copy_to_user(run_array, prog_ids,
171 attr->query.prog_cnt);
174 int netns_bpf_prog_query(const union bpf_attr *attr,
175 union bpf_attr __user *uattr)
177 enum netns_bpf_attach_type type;
178 struct net *net;
179 int ret;
181 if (attr->query.query_flags)
182 return -EINVAL;
184 type = to_netns_bpf_attach_type(attr->query.attach_type);
185 if (type < 0)
186 return -EINVAL;
188 net = get_net_ns_by_fd(attr->query.target_fd);
189 if (IS_ERR(net))
190 return PTR_ERR(net);
192 mutex_lock(&netns_bpf_mutex);
193 ret = __netns_bpf_prog_query(attr, uattr, net, type);
194 mutex_unlock(&netns_bpf_mutex);
196 put_net(net);
197 return ret;
200 int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
202 struct bpf_prog_array *run_array;
203 enum netns_bpf_attach_type type;
204 struct bpf_prog *attached;
205 struct net *net;
206 int ret;
208 if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd)
209 return -EINVAL;
211 type = to_netns_bpf_attach_type(attr->attach_type);
212 if (type < 0)
213 return -EINVAL;
215 net = current->nsproxy->net_ns;
216 mutex_lock(&netns_bpf_mutex);
218 /* Attaching prog directly is not compatible with links */
219 if (!list_empty(&net->bpf.links[type])) {
220 ret = -EEXIST;
221 goto out_unlock;
224 switch (type) {
225 case NETNS_BPF_FLOW_DISSECTOR:
226 ret = flow_dissector_bpf_prog_attach_check(net, prog);
227 break;
228 default:
229 ret = -EINVAL;
230 break;
232 if (ret)
233 goto out_unlock;
235 attached = net->bpf.progs[type];
236 if (attached == prog) {
237 /* The same program cannot be attached twice */
238 ret = -EINVAL;
239 goto out_unlock;
242 run_array = rcu_dereference_protected(net->bpf.run_array[type],
243 lockdep_is_held(&netns_bpf_mutex));
244 if (run_array) {
245 WRITE_ONCE(run_array->items[0].prog, prog);
246 } else {
247 run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
248 if (!run_array) {
249 ret = -ENOMEM;
250 goto out_unlock;
252 run_array->items[0].prog = prog;
253 rcu_assign_pointer(net->bpf.run_array[type], run_array);
256 net->bpf.progs[type] = prog;
257 if (attached)
258 bpf_prog_put(attached);
260 out_unlock:
261 mutex_unlock(&netns_bpf_mutex);
263 return ret;
266 /* Must be called with netns_bpf_mutex held. */
267 static int __netns_bpf_prog_detach(struct net *net,
268 enum netns_bpf_attach_type type,
269 struct bpf_prog *old)
271 struct bpf_prog *attached;
273 /* Progs attached via links cannot be detached */
274 if (!list_empty(&net->bpf.links[type]))
275 return -EINVAL;
277 attached = net->bpf.progs[type];
278 if (!attached || attached != old)
279 return -ENOENT;
280 netns_bpf_run_array_detach(net, type);
281 net->bpf.progs[type] = NULL;
282 bpf_prog_put(attached);
283 return 0;
286 int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
288 enum netns_bpf_attach_type type;
289 struct bpf_prog *prog;
290 int ret;
292 if (attr->target_fd)
293 return -EINVAL;
295 type = to_netns_bpf_attach_type(attr->attach_type);
296 if (type < 0)
297 return -EINVAL;
299 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
300 if (IS_ERR(prog))
301 return PTR_ERR(prog);
303 mutex_lock(&netns_bpf_mutex);
304 ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog);
305 mutex_unlock(&netns_bpf_mutex);
307 bpf_prog_put(prog);
309 return ret;
312 static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
313 enum netns_bpf_attach_type type)
315 struct bpf_netns_link *net_link =
316 container_of(link, struct bpf_netns_link, link);
317 struct bpf_prog_array *run_array;
318 int err;
320 mutex_lock(&netns_bpf_mutex);
322 /* Allow attaching only one prog or link for now */
323 if (!list_empty(&net->bpf.links[type])) {
324 err = -E2BIG;
325 goto out_unlock;
327 /* Links are not compatible with attaching prog directly */
328 if (net->bpf.progs[type]) {
329 err = -EEXIST;
330 goto out_unlock;
333 switch (type) {
334 case NETNS_BPF_FLOW_DISSECTOR:
335 err = flow_dissector_bpf_prog_attach_check(net, link->prog);
336 break;
337 default:
338 err = -EINVAL;
339 break;
341 if (err)
342 goto out_unlock;
344 run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
345 if (!run_array) {
346 err = -ENOMEM;
347 goto out_unlock;
349 run_array->items[0].prog = link->prog;
350 rcu_assign_pointer(net->bpf.run_array[type], run_array);
352 list_add_tail(&net_link->node, &net->bpf.links[type]);
354 out_unlock:
355 mutex_unlock(&netns_bpf_mutex);
356 return err;
359 int netns_bpf_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
361 enum netns_bpf_attach_type netns_type;
362 struct bpf_link_primer link_primer;
363 struct bpf_netns_link *net_link;
364 enum bpf_attach_type type;
365 struct net *net;
366 int err;
368 if (attr->link_create.flags)
369 return -EINVAL;
371 type = attr->link_create.attach_type;
372 netns_type = to_netns_bpf_attach_type(type);
373 if (netns_type < 0)
374 return -EINVAL;
376 net = get_net_ns_by_fd(attr->link_create.target_fd);
377 if (IS_ERR(net))
378 return PTR_ERR(net);
380 net_link = kzalloc(sizeof(*net_link), GFP_USER);
381 if (!net_link) {
382 err = -ENOMEM;
383 goto out_put_net;
385 bpf_link_init(&net_link->link, BPF_LINK_TYPE_NETNS,
386 &bpf_netns_link_ops, prog);
387 net_link->net = net;
388 net_link->type = type;
389 net_link->netns_type = netns_type;
391 err = bpf_link_prime(&net_link->link, &link_primer);
392 if (err) {
393 kfree(net_link);
394 goto out_put_net;
397 err = netns_bpf_link_attach(net, &net_link->link, netns_type);
398 if (err) {
399 bpf_link_cleanup(&link_primer);
400 goto out_put_net;
403 put_net(net);
404 return bpf_link_settle(&link_primer);
406 out_put_net:
407 put_net(net);
408 return err;
411 static int __net_init netns_bpf_pernet_init(struct net *net)
413 int type;
415 for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++)
416 INIT_LIST_HEAD(&net->bpf.links[type]);
418 return 0;
421 static void __net_exit netns_bpf_pernet_pre_exit(struct net *net)
423 enum netns_bpf_attach_type type;
424 struct bpf_netns_link *net_link;
426 mutex_lock(&netns_bpf_mutex);
427 for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) {
428 netns_bpf_run_array_detach(net, type);
429 list_for_each_entry(net_link, &net->bpf.links[type], node)
430 net_link->net = NULL; /* auto-detach link */
431 if (net->bpf.progs[type])
432 bpf_prog_put(net->bpf.progs[type]);
434 mutex_unlock(&netns_bpf_mutex);
437 static struct pernet_operations netns_bpf_pernet_ops __net_initdata = {
438 .init = netns_bpf_pernet_init,
439 .pre_exit = netns_bpf_pernet_pre_exit,
442 static int __init netns_bpf_init(void)
444 return register_pernet_subsys(&netns_bpf_pernet_ops);
447 subsys_initcall(netns_bpf_init);