serial: exar: Fix GPIO configuration for Sealevel cards based on XR17V35X
[linux/fpc-iii.git] / kernel / bpf / cgroup.c
blobac53102e244a7a5db7385389daf4aae0dd9293d0
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Functions to manage eBPF programs attached to cgroups
5 * Copyright (c) 2016 Daniel Mack
6 */
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <net/sock.h>
18 #include <net/bpf_sk_storage.h>
20 #include "../cgroup/cgroup-internal.h"
22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
25 void cgroup_bpf_offline(struct cgroup *cgrp)
27 cgroup_get(cgrp);
28 percpu_ref_kill(&cgrp->bpf.refcnt);
31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
33 enum bpf_cgroup_storage_type stype;
35 for_each_cgroup_storage_type(stype)
36 bpf_cgroup_storage_free(storages[stype]);
39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
40 struct bpf_prog *prog)
42 enum bpf_cgroup_storage_type stype;
44 for_each_cgroup_storage_type(stype) {
45 storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
46 if (IS_ERR(storages[stype])) {
47 storages[stype] = NULL;
48 bpf_cgroup_storages_free(storages);
49 return -ENOMEM;
53 return 0;
56 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
57 struct bpf_cgroup_storage *src[])
59 enum bpf_cgroup_storage_type stype;
61 for_each_cgroup_storage_type(stype)
62 dst[stype] = src[stype];
65 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
66 struct cgroup* cgrp,
67 enum bpf_attach_type attach_type)
69 enum bpf_cgroup_storage_type stype;
71 for_each_cgroup_storage_type(stype)
72 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
75 static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[])
77 enum bpf_cgroup_storage_type stype;
79 for_each_cgroup_storage_type(stype)
80 bpf_cgroup_storage_unlink(storages[stype]);
83 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
84 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
85 * doesn't free link memory, which will eventually be done by bpf_link's
86 * release() callback, when its last FD is closed.
88 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
90 cgroup_put(link->cgroup);
91 link->cgroup = NULL;
94 /**
95 * cgroup_bpf_release() - put references of all bpf programs and
96 * release all cgroup bpf data
97 * @work: work structure embedded into the cgroup to modify
99 static void cgroup_bpf_release(struct work_struct *work)
101 struct cgroup *p, *cgrp = container_of(work, struct cgroup,
102 bpf.release_work);
103 struct bpf_prog_array *old_array;
104 unsigned int type;
106 mutex_lock(&cgroup_mutex);
108 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
109 struct list_head *progs = &cgrp->bpf.progs[type];
110 struct bpf_prog_list *pl, *tmp;
112 list_for_each_entry_safe(pl, tmp, progs, node) {
113 list_del(&pl->node);
114 if (pl->prog)
115 bpf_prog_put(pl->prog);
116 if (pl->link)
117 bpf_cgroup_link_auto_detach(pl->link);
118 bpf_cgroup_storages_unlink(pl->storage);
119 bpf_cgroup_storages_free(pl->storage);
120 kfree(pl);
121 static_branch_dec(&cgroup_bpf_enabled_key);
123 old_array = rcu_dereference_protected(
124 cgrp->bpf.effective[type],
125 lockdep_is_held(&cgroup_mutex));
126 bpf_prog_array_free(old_array);
129 mutex_unlock(&cgroup_mutex);
131 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
132 cgroup_bpf_put(p);
134 percpu_ref_exit(&cgrp->bpf.refcnt);
135 cgroup_put(cgrp);
139 * cgroup_bpf_release_fn() - callback used to schedule releasing
140 * of bpf cgroup data
141 * @ref: percpu ref counter structure
143 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
145 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
147 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
148 queue_work(system_wq, &cgrp->bpf.release_work);
151 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
152 * link or direct prog.
154 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
156 if (pl->prog)
157 return pl->prog;
158 if (pl->link)
159 return pl->link->link.prog;
160 return NULL;
163 /* count number of elements in the list.
164 * it's slow but the list cannot be long
166 static u32 prog_list_length(struct list_head *head)
168 struct bpf_prog_list *pl;
169 u32 cnt = 0;
171 list_for_each_entry(pl, head, node) {
172 if (!prog_list_prog(pl))
173 continue;
174 cnt++;
176 return cnt;
179 /* if parent has non-overridable prog attached,
180 * disallow attaching new programs to the descendent cgroup.
181 * if parent has overridable or multi-prog, allow attaching
183 static bool hierarchy_allows_attach(struct cgroup *cgrp,
184 enum bpf_attach_type type)
186 struct cgroup *p;
188 p = cgroup_parent(cgrp);
189 if (!p)
190 return true;
191 do {
192 u32 flags = p->bpf.flags[type];
193 u32 cnt;
195 if (flags & BPF_F_ALLOW_MULTI)
196 return true;
197 cnt = prog_list_length(&p->bpf.progs[type]);
198 WARN_ON_ONCE(cnt > 1);
199 if (cnt == 1)
200 return !!(flags & BPF_F_ALLOW_OVERRIDE);
201 p = cgroup_parent(p);
202 } while (p);
203 return true;
206 /* compute a chain of effective programs for a given cgroup:
207 * start from the list of programs in this cgroup and add
208 * all parent programs.
209 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
210 * to programs in this cgroup
212 static int compute_effective_progs(struct cgroup *cgrp,
213 enum bpf_attach_type type,
214 struct bpf_prog_array **array)
216 struct bpf_prog_array_item *item;
217 struct bpf_prog_array *progs;
218 struct bpf_prog_list *pl;
219 struct cgroup *p = cgrp;
220 int cnt = 0;
222 /* count number of effective programs by walking parents */
223 do {
224 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
225 cnt += prog_list_length(&p->bpf.progs[type]);
226 p = cgroup_parent(p);
227 } while (p);
229 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
230 if (!progs)
231 return -ENOMEM;
233 /* populate the array with effective progs */
234 cnt = 0;
235 p = cgrp;
236 do {
237 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
238 continue;
240 list_for_each_entry(pl, &p->bpf.progs[type], node) {
241 if (!prog_list_prog(pl))
242 continue;
244 item = &progs->items[cnt];
245 item->prog = prog_list_prog(pl);
246 bpf_cgroup_storages_assign(item->cgroup_storage,
247 pl->storage);
248 cnt++;
250 } while ((p = cgroup_parent(p)));
252 *array = progs;
253 return 0;
256 static void activate_effective_progs(struct cgroup *cgrp,
257 enum bpf_attach_type type,
258 struct bpf_prog_array *old_array)
260 old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array,
261 lockdep_is_held(&cgroup_mutex));
262 /* free prog array after grace period, since __cgroup_bpf_run_*()
263 * might be still walking the array
265 bpf_prog_array_free(old_array);
269 * cgroup_bpf_inherit() - inherit effective programs from parent
270 * @cgrp: the cgroup to modify
272 int cgroup_bpf_inherit(struct cgroup *cgrp)
274 /* has to use marco instead of const int, since compiler thinks
275 * that array below is variable length
277 #define NR ARRAY_SIZE(cgrp->bpf.effective)
278 struct bpf_prog_array *arrays[NR] = {};
279 struct cgroup *p;
280 int ret, i;
282 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
283 GFP_KERNEL);
284 if (ret)
285 return ret;
287 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
288 cgroup_bpf_get(p);
290 for (i = 0; i < NR; i++)
291 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
293 for (i = 0; i < NR; i++)
294 if (compute_effective_progs(cgrp, i, &arrays[i]))
295 goto cleanup;
297 for (i = 0; i < NR; i++)
298 activate_effective_progs(cgrp, i, arrays[i]);
300 return 0;
301 cleanup:
302 for (i = 0; i < NR; i++)
303 bpf_prog_array_free(arrays[i]);
305 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
306 cgroup_bpf_put(p);
308 percpu_ref_exit(&cgrp->bpf.refcnt);
310 return -ENOMEM;
313 static int update_effective_progs(struct cgroup *cgrp,
314 enum bpf_attach_type type)
316 struct cgroup_subsys_state *css;
317 int err;
319 /* allocate and recompute effective prog arrays */
320 css_for_each_descendant_pre(css, &cgrp->self) {
321 struct cgroup *desc = container_of(css, struct cgroup, self);
323 if (percpu_ref_is_zero(&desc->bpf.refcnt))
324 continue;
326 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
327 if (err)
328 goto cleanup;
331 /* all allocations were successful. Activate all prog arrays */
332 css_for_each_descendant_pre(css, &cgrp->self) {
333 struct cgroup *desc = container_of(css, struct cgroup, self);
335 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
336 if (unlikely(desc->bpf.inactive)) {
337 bpf_prog_array_free(desc->bpf.inactive);
338 desc->bpf.inactive = NULL;
340 continue;
343 activate_effective_progs(desc, type, desc->bpf.inactive);
344 desc->bpf.inactive = NULL;
347 return 0;
349 cleanup:
350 /* oom while computing effective. Free all computed effective arrays
351 * since they were not activated
353 css_for_each_descendant_pre(css, &cgrp->self) {
354 struct cgroup *desc = container_of(css, struct cgroup, self);
356 bpf_prog_array_free(desc->bpf.inactive);
357 desc->bpf.inactive = NULL;
360 return err;
363 #define BPF_CGROUP_MAX_PROGS 64
365 static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
366 struct bpf_prog *prog,
367 struct bpf_cgroup_link *link,
368 struct bpf_prog *replace_prog,
369 bool allow_multi)
371 struct bpf_prog_list *pl;
373 /* single-attach case */
374 if (!allow_multi) {
375 if (list_empty(progs))
376 return NULL;
377 return list_first_entry(progs, typeof(*pl), node);
380 list_for_each_entry(pl, progs, node) {
381 if (prog && pl->prog == prog && prog != replace_prog)
382 /* disallow attaching the same prog twice */
383 return ERR_PTR(-EINVAL);
384 if (link && pl->link == link)
385 /* disallow attaching the same link twice */
386 return ERR_PTR(-EINVAL);
389 /* direct prog multi-attach w/ replacement case */
390 if (replace_prog) {
391 list_for_each_entry(pl, progs, node) {
392 if (pl->prog == replace_prog)
393 /* a match found */
394 return pl;
396 /* prog to replace not found for cgroup */
397 return ERR_PTR(-ENOENT);
400 return NULL;
404 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
405 * propagate the change to descendants
406 * @cgrp: The cgroup which descendants to traverse
407 * @prog: A program to attach
408 * @link: A link to attach
409 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
410 * @type: Type of attach operation
411 * @flags: Option flags
413 * Exactly one of @prog or @link can be non-null.
414 * Must be called with cgroup_mutex held.
416 int __cgroup_bpf_attach(struct cgroup *cgrp,
417 struct bpf_prog *prog, struct bpf_prog *replace_prog,
418 struct bpf_cgroup_link *link,
419 enum bpf_attach_type type, u32 flags)
421 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
422 struct list_head *progs = &cgrp->bpf.progs[type];
423 struct bpf_prog *old_prog = NULL;
424 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
425 struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
426 struct bpf_prog_list *pl;
427 int err;
429 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
430 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
431 /* invalid combination */
432 return -EINVAL;
433 if (link && (prog || replace_prog))
434 /* only either link or prog/replace_prog can be specified */
435 return -EINVAL;
436 if (!!replace_prog != !!(flags & BPF_F_REPLACE))
437 /* replace_prog implies BPF_F_REPLACE, and vice versa */
438 return -EINVAL;
440 if (!hierarchy_allows_attach(cgrp, type))
441 return -EPERM;
443 if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags)
444 /* Disallow attaching non-overridable on top
445 * of existing overridable in this cgroup.
446 * Disallow attaching multi-prog if overridable or none
448 return -EPERM;
450 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
451 return -E2BIG;
453 pl = find_attach_entry(progs, prog, link, replace_prog,
454 flags & BPF_F_ALLOW_MULTI);
455 if (IS_ERR(pl))
456 return PTR_ERR(pl);
458 if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog))
459 return -ENOMEM;
461 if (pl) {
462 old_prog = pl->prog;
463 bpf_cgroup_storages_unlink(pl->storage);
464 bpf_cgroup_storages_assign(old_storage, pl->storage);
465 } else {
466 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
467 if (!pl) {
468 bpf_cgroup_storages_free(storage);
469 return -ENOMEM;
471 list_add_tail(&pl->node, progs);
474 pl->prog = prog;
475 pl->link = link;
476 bpf_cgroup_storages_assign(pl->storage, storage);
477 cgrp->bpf.flags[type] = saved_flags;
479 err = update_effective_progs(cgrp, type);
480 if (err)
481 goto cleanup;
483 bpf_cgroup_storages_free(old_storage);
484 if (old_prog)
485 bpf_prog_put(old_prog);
486 else
487 static_branch_inc(&cgroup_bpf_enabled_key);
488 bpf_cgroup_storages_link(pl->storage, cgrp, type);
489 return 0;
491 cleanup:
492 if (old_prog) {
493 pl->prog = old_prog;
494 pl->link = NULL;
496 bpf_cgroup_storages_free(pl->storage);
497 bpf_cgroup_storages_assign(pl->storage, old_storage);
498 bpf_cgroup_storages_link(pl->storage, cgrp, type);
499 if (!old_prog) {
500 list_del(&pl->node);
501 kfree(pl);
503 return err;
506 /* Swap updated BPF program for given link in effective program arrays across
507 * all descendant cgroups. This function is guaranteed to succeed.
509 static void replace_effective_prog(struct cgroup *cgrp,
510 enum bpf_attach_type type,
511 struct bpf_cgroup_link *link)
513 struct bpf_prog_array_item *item;
514 struct cgroup_subsys_state *css;
515 struct bpf_prog_array *progs;
516 struct bpf_prog_list *pl;
517 struct list_head *head;
518 struct cgroup *cg;
519 int pos;
521 css_for_each_descendant_pre(css, &cgrp->self) {
522 struct cgroup *desc = container_of(css, struct cgroup, self);
524 if (percpu_ref_is_zero(&desc->bpf.refcnt))
525 continue;
527 /* find position of link in effective progs array */
528 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
529 if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI))
530 continue;
532 head = &cg->bpf.progs[type];
533 list_for_each_entry(pl, head, node) {
534 if (!prog_list_prog(pl))
535 continue;
536 if (pl->link == link)
537 goto found;
538 pos++;
541 found:
542 BUG_ON(!cg);
543 progs = rcu_dereference_protected(
544 desc->bpf.effective[type],
545 lockdep_is_held(&cgroup_mutex));
546 item = &progs->items[pos];
547 WRITE_ONCE(item->prog, link->link.prog);
552 * __cgroup_bpf_replace() - Replace link's program and propagate the change
553 * to descendants
554 * @cgrp: The cgroup which descendants to traverse
555 * @link: A link for which to replace BPF program
556 * @type: Type of attach operation
558 * Must be called with cgroup_mutex held.
560 static int __cgroup_bpf_replace(struct cgroup *cgrp,
561 struct bpf_cgroup_link *link,
562 struct bpf_prog *new_prog)
564 struct list_head *progs = &cgrp->bpf.progs[link->type];
565 struct bpf_prog *old_prog;
566 struct bpf_prog_list *pl;
567 bool found = false;
569 if (link->link.prog->type != new_prog->type)
570 return -EINVAL;
572 list_for_each_entry(pl, progs, node) {
573 if (pl->link == link) {
574 found = true;
575 break;
578 if (!found)
579 return -ENOENT;
581 old_prog = xchg(&link->link.prog, new_prog);
582 replace_effective_prog(cgrp, link->type, link);
583 bpf_prog_put(old_prog);
584 return 0;
587 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
588 struct bpf_prog *old_prog)
590 struct bpf_cgroup_link *cg_link;
591 int ret;
593 cg_link = container_of(link, struct bpf_cgroup_link, link);
595 mutex_lock(&cgroup_mutex);
596 /* link might have been auto-released by dying cgroup, so fail */
597 if (!cg_link->cgroup) {
598 ret = -ENOLINK;
599 goto out_unlock;
601 if (old_prog && link->prog != old_prog) {
602 ret = -EPERM;
603 goto out_unlock;
605 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
606 out_unlock:
607 mutex_unlock(&cgroup_mutex);
608 return ret;
611 static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
612 struct bpf_prog *prog,
613 struct bpf_cgroup_link *link,
614 bool allow_multi)
616 struct bpf_prog_list *pl;
618 if (!allow_multi) {
619 if (list_empty(progs))
620 /* report error when trying to detach and nothing is attached */
621 return ERR_PTR(-ENOENT);
623 /* to maintain backward compatibility NONE and OVERRIDE cgroups
624 * allow detaching with invalid FD (prog==NULL) in legacy mode
626 return list_first_entry(progs, typeof(*pl), node);
629 if (!prog && !link)
630 /* to detach MULTI prog the user has to specify valid FD
631 * of the program or link to be detached
633 return ERR_PTR(-EINVAL);
635 /* find the prog or link and detach it */
636 list_for_each_entry(pl, progs, node) {
637 if (pl->prog == prog && pl->link == link)
638 return pl;
640 return ERR_PTR(-ENOENT);
644 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
645 * propagate the change to descendants
646 * @cgrp: The cgroup which descendants to traverse
647 * @prog: A program to detach or NULL
648 * @prog: A link to detach or NULL
649 * @type: Type of detach operation
651 * At most one of @prog or @link can be non-NULL.
652 * Must be called with cgroup_mutex held.
654 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
655 struct bpf_cgroup_link *link, enum bpf_attach_type type)
657 struct list_head *progs = &cgrp->bpf.progs[type];
658 u32 flags = cgrp->bpf.flags[type];
659 struct bpf_prog_list *pl;
660 struct bpf_prog *old_prog;
661 int err;
663 if (prog && link)
664 /* only one of prog or link can be specified */
665 return -EINVAL;
667 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
668 if (IS_ERR(pl))
669 return PTR_ERR(pl);
671 /* mark it deleted, so it's ignored while recomputing effective */
672 old_prog = pl->prog;
673 pl->prog = NULL;
674 pl->link = NULL;
676 err = update_effective_progs(cgrp, type);
677 if (err)
678 goto cleanup;
680 /* now can actually delete it from this cgroup list */
681 list_del(&pl->node);
682 bpf_cgroup_storages_unlink(pl->storage);
683 bpf_cgroup_storages_free(pl->storage);
684 kfree(pl);
685 if (list_empty(progs))
686 /* last program was detached, reset flags to zero */
687 cgrp->bpf.flags[type] = 0;
688 if (old_prog)
689 bpf_prog_put(old_prog);
690 static_branch_dec(&cgroup_bpf_enabled_key);
691 return 0;
693 cleanup:
694 /* restore back prog or link */
695 pl->prog = old_prog;
696 pl->link = link;
697 return err;
700 /* Must be called with cgroup_mutex held to avoid races. */
701 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
702 union bpf_attr __user *uattr)
704 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
705 enum bpf_attach_type type = attr->query.attach_type;
706 struct list_head *progs = &cgrp->bpf.progs[type];
707 u32 flags = cgrp->bpf.flags[type];
708 struct bpf_prog_array *effective;
709 struct bpf_prog *prog;
710 int cnt, ret = 0, i;
712 effective = rcu_dereference_protected(cgrp->bpf.effective[type],
713 lockdep_is_held(&cgroup_mutex));
715 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
716 cnt = bpf_prog_array_length(effective);
717 else
718 cnt = prog_list_length(progs);
720 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
721 return -EFAULT;
722 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
723 return -EFAULT;
724 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
725 /* return early if user requested only program count + flags */
726 return 0;
727 if (attr->query.prog_cnt < cnt) {
728 cnt = attr->query.prog_cnt;
729 ret = -ENOSPC;
732 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
733 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
734 } else {
735 struct bpf_prog_list *pl;
736 u32 id;
738 i = 0;
739 list_for_each_entry(pl, progs, node) {
740 prog = prog_list_prog(pl);
741 id = prog->aux->id;
742 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
743 return -EFAULT;
744 if (++i == cnt)
745 break;
748 return ret;
751 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
752 enum bpf_prog_type ptype, struct bpf_prog *prog)
754 struct bpf_prog *replace_prog = NULL;
755 struct cgroup *cgrp;
756 int ret;
758 cgrp = cgroup_get_from_fd(attr->target_fd);
759 if (IS_ERR(cgrp))
760 return PTR_ERR(cgrp);
762 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
763 (attr->attach_flags & BPF_F_REPLACE)) {
764 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
765 if (IS_ERR(replace_prog)) {
766 cgroup_put(cgrp);
767 return PTR_ERR(replace_prog);
771 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
772 attr->attach_type, attr->attach_flags);
774 if (replace_prog)
775 bpf_prog_put(replace_prog);
776 cgroup_put(cgrp);
777 return ret;
780 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
782 struct bpf_prog *prog;
783 struct cgroup *cgrp;
784 int ret;
786 cgrp = cgroup_get_from_fd(attr->target_fd);
787 if (IS_ERR(cgrp))
788 return PTR_ERR(cgrp);
790 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
791 if (IS_ERR(prog))
792 prog = NULL;
794 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
795 if (prog)
796 bpf_prog_put(prog);
798 cgroup_put(cgrp);
799 return ret;
802 static void bpf_cgroup_link_release(struct bpf_link *link)
804 struct bpf_cgroup_link *cg_link =
805 container_of(link, struct bpf_cgroup_link, link);
807 /* link might have been auto-detached by dying cgroup already,
808 * in that case our work is done here
810 if (!cg_link->cgroup)
811 return;
813 mutex_lock(&cgroup_mutex);
815 /* re-check cgroup under lock again */
816 if (!cg_link->cgroup) {
817 mutex_unlock(&cgroup_mutex);
818 return;
821 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
822 cg_link->type));
824 mutex_unlock(&cgroup_mutex);
825 cgroup_put(cg_link->cgroup);
828 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
830 struct bpf_cgroup_link *cg_link =
831 container_of(link, struct bpf_cgroup_link, link);
833 kfree(cg_link);
836 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
837 struct seq_file *seq)
839 struct bpf_cgroup_link *cg_link =
840 container_of(link, struct bpf_cgroup_link, link);
841 u64 cg_id = 0;
843 mutex_lock(&cgroup_mutex);
844 if (cg_link->cgroup)
845 cg_id = cgroup_id(cg_link->cgroup);
846 mutex_unlock(&cgroup_mutex);
848 seq_printf(seq,
849 "cgroup_id:\t%llu\n"
850 "attach_type:\t%d\n",
851 cg_id,
852 cg_link->type);
855 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
856 struct bpf_link_info *info)
858 struct bpf_cgroup_link *cg_link =
859 container_of(link, struct bpf_cgroup_link, link);
860 u64 cg_id = 0;
862 mutex_lock(&cgroup_mutex);
863 if (cg_link->cgroup)
864 cg_id = cgroup_id(cg_link->cgroup);
865 mutex_unlock(&cgroup_mutex);
867 info->cgroup.cgroup_id = cg_id;
868 info->cgroup.attach_type = cg_link->type;
869 return 0;
872 static const struct bpf_link_ops bpf_cgroup_link_lops = {
873 .release = bpf_cgroup_link_release,
874 .dealloc = bpf_cgroup_link_dealloc,
875 .update_prog = cgroup_bpf_replace,
876 .show_fdinfo = bpf_cgroup_link_show_fdinfo,
877 .fill_link_info = bpf_cgroup_link_fill_link_info,
880 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
882 struct bpf_link_primer link_primer;
883 struct bpf_cgroup_link *link;
884 struct cgroup *cgrp;
885 int err;
887 if (attr->link_create.flags)
888 return -EINVAL;
890 cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
891 if (IS_ERR(cgrp))
892 return PTR_ERR(cgrp);
894 link = kzalloc(sizeof(*link), GFP_USER);
895 if (!link) {
896 err = -ENOMEM;
897 goto out_put_cgroup;
899 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
900 prog);
901 link->cgroup = cgrp;
902 link->type = attr->link_create.attach_type;
904 err = bpf_link_prime(&link->link, &link_primer);
905 if (err) {
906 kfree(link);
907 goto out_put_cgroup;
910 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type,
911 BPF_F_ALLOW_MULTI);
912 if (err) {
913 bpf_link_cleanup(&link_primer);
914 goto out_put_cgroup;
917 return bpf_link_settle(&link_primer);
919 out_put_cgroup:
920 cgroup_put(cgrp);
921 return err;
924 int cgroup_bpf_prog_query(const union bpf_attr *attr,
925 union bpf_attr __user *uattr)
927 struct cgroup *cgrp;
928 int ret;
930 cgrp = cgroup_get_from_fd(attr->query.target_fd);
931 if (IS_ERR(cgrp))
932 return PTR_ERR(cgrp);
934 ret = cgroup_bpf_query(cgrp, attr, uattr);
936 cgroup_put(cgrp);
937 return ret;
941 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
942 * @sk: The socket sending or receiving traffic
943 * @skb: The skb that is being sent or received
944 * @type: The type of program to be exectuted
946 * If no socket is passed, or the socket is not of type INET or INET6,
947 * this function does nothing and returns 0.
949 * The program type passed in via @type must be suitable for network
950 * filtering. No further check is performed to assert that.
952 * For egress packets, this function can return:
953 * NET_XMIT_SUCCESS (0) - continue with packet output
954 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
955 * NET_XMIT_CN (2) - continue with packet output and notify TCP
956 * to call cwr
957 * -EPERM - drop packet
959 * For ingress packets, this function will return -EPERM if any
960 * attached program was found and if it returned != 1 during execution.
961 * Otherwise 0 is returned.
963 int __cgroup_bpf_run_filter_skb(struct sock *sk,
964 struct sk_buff *skb,
965 enum bpf_attach_type type)
967 unsigned int offset = skb->data - skb_network_header(skb);
968 struct sock *save_sk;
969 void *saved_data_end;
970 struct cgroup *cgrp;
971 int ret;
973 if (!sk || !sk_fullsock(sk))
974 return 0;
976 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
977 return 0;
979 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
980 save_sk = skb->sk;
981 skb->sk = sk;
982 __skb_push(skb, offset);
984 /* compute pointers for the bpf prog */
985 bpf_compute_and_save_data_end(skb, &saved_data_end);
987 if (type == BPF_CGROUP_INET_EGRESS) {
988 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
989 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
990 } else {
991 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
992 __bpf_prog_run_save_cb);
993 ret = (ret == 1 ? 0 : -EPERM);
995 bpf_restore_data_end(skb, saved_data_end);
996 __skb_pull(skb, offset);
997 skb->sk = save_sk;
999 return ret;
1001 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1004 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1005 * @sk: sock structure to manipulate
1006 * @type: The type of program to be exectuted
1008 * socket is passed is expected to be of type INET or INET6.
1010 * The program type passed in via @type must be suitable for sock
1011 * filtering. No further check is performed to assert that.
1013 * This function will return %-EPERM if any if an attached program was found
1014 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1016 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1017 enum bpf_attach_type type)
1019 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1020 int ret;
1022 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
1023 return ret == 1 ? 0 : -EPERM;
1025 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1028 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1029 * provided by user sockaddr
1030 * @sk: sock struct that will use sockaddr
1031 * @uaddr: sockaddr struct provided by user
1032 * @type: The type of program to be exectuted
1033 * @t_ctx: Pointer to attach type specific context
1035 * socket is expected to be of type INET or INET6.
1037 * This function will return %-EPERM if an attached program is found and
1038 * returned value != 1 during execution. In all other cases, 0 is returned.
1040 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1041 struct sockaddr *uaddr,
1042 enum bpf_attach_type type,
1043 void *t_ctx)
1045 struct bpf_sock_addr_kern ctx = {
1046 .sk = sk,
1047 .uaddr = uaddr,
1048 .t_ctx = t_ctx,
1050 struct sockaddr_storage unspec;
1051 struct cgroup *cgrp;
1052 int ret;
1054 /* Check socket family since not all sockets represent network
1055 * endpoint (e.g. AF_UNIX).
1057 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1058 return 0;
1060 if (!ctx.uaddr) {
1061 memset(&unspec, 0, sizeof(unspec));
1062 ctx.uaddr = (struct sockaddr *)&unspec;
1065 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1066 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
1068 return ret == 1 ? 0 : -EPERM;
1070 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1073 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1074 * @sk: socket to get cgroup from
1075 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1076 * sk with connection information (IP addresses, etc.) May not contain
1077 * cgroup info if it is a req sock.
1078 * @type: The type of program to be exectuted
1080 * socket passed is expected to be of type INET or INET6.
1082 * The program type passed in via @type must be suitable for sock_ops
1083 * filtering. No further check is performed to assert that.
1085 * This function will return %-EPERM if any if an attached program was found
1086 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1088 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1089 struct bpf_sock_ops_kern *sock_ops,
1090 enum bpf_attach_type type)
1092 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1093 int ret;
1095 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
1096 BPF_PROG_RUN);
1097 return ret == 1 ? 0 : -EPERM;
1099 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1101 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1102 short access, enum bpf_attach_type type)
1104 struct cgroup *cgrp;
1105 struct bpf_cgroup_dev_ctx ctx = {
1106 .access_type = (access << 16) | dev_type,
1107 .major = major,
1108 .minor = minor,
1110 int allow = 1;
1112 rcu_read_lock();
1113 cgrp = task_dfl_cgroup(current);
1114 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
1115 BPF_PROG_RUN);
1116 rcu_read_unlock();
1118 return !allow;
1121 static const struct bpf_func_proto *
1122 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1124 switch (func_id) {
1125 case BPF_FUNC_get_current_uid_gid:
1126 return &bpf_get_current_uid_gid_proto;
1127 case BPF_FUNC_get_local_storage:
1128 return &bpf_get_local_storage_proto;
1129 case BPF_FUNC_get_current_cgroup_id:
1130 return &bpf_get_current_cgroup_id_proto;
1131 case BPF_FUNC_perf_event_output:
1132 return &bpf_event_output_data_proto;
1133 default:
1134 return bpf_base_func_proto(func_id);
1138 static const struct bpf_func_proto *
1139 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1141 return cgroup_base_func_proto(func_id, prog);
1144 static bool cgroup_dev_is_valid_access(int off, int size,
1145 enum bpf_access_type type,
1146 const struct bpf_prog *prog,
1147 struct bpf_insn_access_aux *info)
1149 const int size_default = sizeof(__u32);
1151 if (type == BPF_WRITE)
1152 return false;
1154 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1155 return false;
1156 /* The verifier guarantees that size > 0. */
1157 if (off % size != 0)
1158 return false;
1160 switch (off) {
1161 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1162 bpf_ctx_record_field_size(info, size_default);
1163 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1164 return false;
1165 break;
1166 default:
1167 if (size != size_default)
1168 return false;
1171 return true;
1174 const struct bpf_prog_ops cg_dev_prog_ops = {
1177 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1178 .get_func_proto = cgroup_dev_func_proto,
1179 .is_valid_access = cgroup_dev_is_valid_access,
1183 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1185 * @head: sysctl table header
1186 * @table: sysctl table
1187 * @write: sysctl is being read (= 0) or written (= 1)
1188 * @buf: pointer to buffer (in and out)
1189 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1190 * result is size of @new_buf if program set new value, initial value
1191 * otherwise
1192 * @ppos: value-result argument: value is position at which read from or write
1193 * to sysctl is happening, result is new position if program overrode it,
1194 * initial value otherwise
1195 * @type: type of program to be executed
1197 * Program is run when sysctl is being accessed, either read or written, and
1198 * can allow or deny such access.
1200 * This function will return %-EPERM if an attached program is found and
1201 * returned value != 1 during execution. In all other cases 0 is returned.
1203 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1204 struct ctl_table *table, int write,
1205 void **buf, size_t *pcount, loff_t *ppos,
1206 enum bpf_attach_type type)
1208 struct bpf_sysctl_kern ctx = {
1209 .head = head,
1210 .table = table,
1211 .write = write,
1212 .ppos = ppos,
1213 .cur_val = NULL,
1214 .cur_len = PAGE_SIZE,
1215 .new_val = NULL,
1216 .new_len = 0,
1217 .new_updated = 0,
1219 struct cgroup *cgrp;
1220 loff_t pos = 0;
1221 int ret;
1223 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1224 if (!ctx.cur_val ||
1225 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1226 /* Let BPF program decide how to proceed. */
1227 ctx.cur_len = 0;
1230 if (write && *buf && *pcount) {
1231 /* BPF program should be able to override new value with a
1232 * buffer bigger than provided by user.
1234 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1235 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1236 if (ctx.new_val) {
1237 memcpy(ctx.new_val, *buf, ctx.new_len);
1238 } else {
1239 /* Let BPF program decide how to proceed. */
1240 ctx.new_len = 0;
1244 rcu_read_lock();
1245 cgrp = task_dfl_cgroup(current);
1246 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
1247 rcu_read_unlock();
1249 kfree(ctx.cur_val);
1251 if (ret == 1 && ctx.new_updated) {
1252 kfree(*buf);
1253 *buf = ctx.new_val;
1254 *pcount = ctx.new_len;
1255 } else {
1256 kfree(ctx.new_val);
1259 return ret == 1 ? 0 : -EPERM;
1262 #ifdef CONFIG_NET
1263 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
1264 enum bpf_attach_type attach_type)
1266 struct bpf_prog_array *prog_array;
1267 bool empty;
1269 rcu_read_lock();
1270 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
1271 empty = bpf_prog_array_is_empty(prog_array);
1272 rcu_read_unlock();
1274 return empty;
1277 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
1279 if (unlikely(max_optlen < 0))
1280 return -EINVAL;
1282 if (unlikely(max_optlen > PAGE_SIZE)) {
1283 /* We don't expose optvals that are greater than PAGE_SIZE
1284 * to the BPF program.
1286 max_optlen = PAGE_SIZE;
1289 ctx->optval = kzalloc(max_optlen, GFP_USER);
1290 if (!ctx->optval)
1291 return -ENOMEM;
1293 ctx->optval_end = ctx->optval + max_optlen;
1295 return max_optlen;
1298 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
1300 kfree(ctx->optval);
1303 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1304 int *optname, char __user *optval,
1305 int *optlen, char **kernel_optval)
1307 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1308 struct bpf_sockopt_kern ctx = {
1309 .sk = sk,
1310 .level = *level,
1311 .optname = *optname,
1313 int ret, max_optlen;
1315 /* Opportunistic check to see whether we have any BPF program
1316 * attached to the hook so we don't waste time allocating
1317 * memory and locking the socket.
1319 if (!cgroup_bpf_enabled ||
1320 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
1321 return 0;
1323 /* Allocate a bit more than the initial user buffer for
1324 * BPF program. The canonical use case is overriding
1325 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1327 max_optlen = max_t(int, 16, *optlen);
1329 max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
1330 if (max_optlen < 0)
1331 return max_optlen;
1333 ctx.optlen = *optlen;
1335 if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1336 ret = -EFAULT;
1337 goto out;
1340 lock_sock(sk);
1341 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
1342 &ctx, BPF_PROG_RUN);
1343 release_sock(sk);
1345 if (!ret) {
1346 ret = -EPERM;
1347 goto out;
1350 if (ctx.optlen == -1) {
1351 /* optlen set to -1, bypass kernel */
1352 ret = 1;
1353 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1354 /* optlen is out of bounds */
1355 ret = -EFAULT;
1356 } else {
1357 /* optlen within bounds, run kernel handler */
1358 ret = 0;
1360 /* export any potential modifications */
1361 *level = ctx.level;
1362 *optname = ctx.optname;
1364 /* optlen == 0 from BPF indicates that we should
1365 * use original userspace data.
1367 if (ctx.optlen != 0) {
1368 *optlen = ctx.optlen;
1369 *kernel_optval = ctx.optval;
1373 out:
1374 if (ret)
1375 sockopt_free_buf(&ctx);
1376 return ret;
1379 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1380 int optname, char __user *optval,
1381 int __user *optlen, int max_optlen,
1382 int retval)
1384 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1385 struct bpf_sockopt_kern ctx = {
1386 .sk = sk,
1387 .level = level,
1388 .optname = optname,
1389 .retval = retval,
1391 int ret;
1393 /* Opportunistic check to see whether we have any BPF program
1394 * attached to the hook so we don't waste time allocating
1395 * memory and locking the socket.
1397 if (!cgroup_bpf_enabled ||
1398 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
1399 return retval;
1401 ctx.optlen = max_optlen;
1403 max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
1404 if (max_optlen < 0)
1405 return max_optlen;
1407 if (!retval) {
1408 /* If kernel getsockopt finished successfully,
1409 * copy whatever was returned to the user back
1410 * into our temporary buffer. Set optlen to the
1411 * one that kernel returned as well to let
1412 * BPF programs inspect the value.
1415 if (get_user(ctx.optlen, optlen)) {
1416 ret = -EFAULT;
1417 goto out;
1420 if (copy_from_user(ctx.optval, optval,
1421 min(ctx.optlen, max_optlen)) != 0) {
1422 ret = -EFAULT;
1423 goto out;
1427 lock_sock(sk);
1428 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1429 &ctx, BPF_PROG_RUN);
1430 release_sock(sk);
1432 if (!ret) {
1433 ret = -EPERM;
1434 goto out;
1437 if (ctx.optlen > max_optlen) {
1438 ret = -EFAULT;
1439 goto out;
1442 /* BPF programs only allowed to set retval to 0, not some
1443 * arbitrary value.
1445 if (ctx.retval != 0 && ctx.retval != retval) {
1446 ret = -EFAULT;
1447 goto out;
1450 if (ctx.optlen != 0) {
1451 if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1452 put_user(ctx.optlen, optlen)) {
1453 ret = -EFAULT;
1454 goto out;
1458 ret = ctx.retval;
1460 out:
1461 sockopt_free_buf(&ctx);
1462 return ret;
1464 #endif
1466 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1467 size_t *lenp)
1469 ssize_t tmp_ret = 0, ret;
1471 if (dir->header.parent) {
1472 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1473 if (tmp_ret < 0)
1474 return tmp_ret;
1477 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1478 if (ret < 0)
1479 return ret;
1480 *bufp += ret;
1481 *lenp -= ret;
1482 ret += tmp_ret;
1484 /* Avoid leading slash. */
1485 if (!ret)
1486 return ret;
1488 tmp_ret = strscpy(*bufp, "/", *lenp);
1489 if (tmp_ret < 0)
1490 return tmp_ret;
1491 *bufp += tmp_ret;
1492 *lenp -= tmp_ret;
1494 return ret + tmp_ret;
1497 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1498 size_t, buf_len, u64, flags)
1500 ssize_t tmp_ret = 0, ret;
1502 if (!buf)
1503 return -EINVAL;
1505 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1506 if (!ctx->head)
1507 return -EINVAL;
1508 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1509 if (tmp_ret < 0)
1510 return tmp_ret;
1513 ret = strscpy(buf, ctx->table->procname, buf_len);
1515 return ret < 0 ? ret : tmp_ret + ret;
1518 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1519 .func = bpf_sysctl_get_name,
1520 .gpl_only = false,
1521 .ret_type = RET_INTEGER,
1522 .arg1_type = ARG_PTR_TO_CTX,
1523 .arg2_type = ARG_PTR_TO_MEM,
1524 .arg3_type = ARG_CONST_SIZE,
1525 .arg4_type = ARG_ANYTHING,
1528 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1529 size_t src_len)
1531 if (!dst)
1532 return -EINVAL;
1534 if (!dst_len)
1535 return -E2BIG;
1537 if (!src || !src_len) {
1538 memset(dst, 0, dst_len);
1539 return -EINVAL;
1542 memcpy(dst, src, min(dst_len, src_len));
1544 if (dst_len > src_len) {
1545 memset(dst + src_len, '\0', dst_len - src_len);
1546 return src_len;
1549 dst[dst_len - 1] = '\0';
1551 return -E2BIG;
1554 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1555 char *, buf, size_t, buf_len)
1557 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1560 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1561 .func = bpf_sysctl_get_current_value,
1562 .gpl_only = false,
1563 .ret_type = RET_INTEGER,
1564 .arg1_type = ARG_PTR_TO_CTX,
1565 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1566 .arg3_type = ARG_CONST_SIZE,
1569 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1570 size_t, buf_len)
1572 if (!ctx->write) {
1573 if (buf && buf_len)
1574 memset(buf, '\0', buf_len);
1575 return -EINVAL;
1577 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1580 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1581 .func = bpf_sysctl_get_new_value,
1582 .gpl_only = false,
1583 .ret_type = RET_INTEGER,
1584 .arg1_type = ARG_PTR_TO_CTX,
1585 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1586 .arg3_type = ARG_CONST_SIZE,
1589 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1590 const char *, buf, size_t, buf_len)
1592 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1593 return -EINVAL;
1595 if (buf_len > PAGE_SIZE - 1)
1596 return -E2BIG;
1598 memcpy(ctx->new_val, buf, buf_len);
1599 ctx->new_len = buf_len;
1600 ctx->new_updated = 1;
1602 return 0;
1605 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1606 .func = bpf_sysctl_set_new_value,
1607 .gpl_only = false,
1608 .ret_type = RET_INTEGER,
1609 .arg1_type = ARG_PTR_TO_CTX,
1610 .arg2_type = ARG_PTR_TO_MEM,
1611 .arg3_type = ARG_CONST_SIZE,
1614 static const struct bpf_func_proto *
1615 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1617 switch (func_id) {
1618 case BPF_FUNC_strtol:
1619 return &bpf_strtol_proto;
1620 case BPF_FUNC_strtoul:
1621 return &bpf_strtoul_proto;
1622 case BPF_FUNC_sysctl_get_name:
1623 return &bpf_sysctl_get_name_proto;
1624 case BPF_FUNC_sysctl_get_current_value:
1625 return &bpf_sysctl_get_current_value_proto;
1626 case BPF_FUNC_sysctl_get_new_value:
1627 return &bpf_sysctl_get_new_value_proto;
1628 case BPF_FUNC_sysctl_set_new_value:
1629 return &bpf_sysctl_set_new_value_proto;
1630 default:
1631 return cgroup_base_func_proto(func_id, prog);
1635 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1636 const struct bpf_prog *prog,
1637 struct bpf_insn_access_aux *info)
1639 const int size_default = sizeof(__u32);
1641 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1642 return false;
1644 switch (off) {
1645 case bpf_ctx_range(struct bpf_sysctl, write):
1646 if (type != BPF_READ)
1647 return false;
1648 bpf_ctx_record_field_size(info, size_default);
1649 return bpf_ctx_narrow_access_ok(off, size, size_default);
1650 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1651 if (type == BPF_READ) {
1652 bpf_ctx_record_field_size(info, size_default);
1653 return bpf_ctx_narrow_access_ok(off, size, size_default);
1654 } else {
1655 return size == size_default;
1657 default:
1658 return false;
1662 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1663 const struct bpf_insn *si,
1664 struct bpf_insn *insn_buf,
1665 struct bpf_prog *prog, u32 *target_size)
1667 struct bpf_insn *insn = insn_buf;
1668 u32 read_size;
1670 switch (si->off) {
1671 case offsetof(struct bpf_sysctl, write):
1672 *insn++ = BPF_LDX_MEM(
1673 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1674 bpf_target_off(struct bpf_sysctl_kern, write,
1675 sizeof_field(struct bpf_sysctl_kern,
1676 write),
1677 target_size));
1678 break;
1679 case offsetof(struct bpf_sysctl, file_pos):
1680 /* ppos is a pointer so it should be accessed via indirect
1681 * loads and stores. Also for stores additional temporary
1682 * register is used since neither src_reg nor dst_reg can be
1683 * overridden.
1685 if (type == BPF_WRITE) {
1686 int treg = BPF_REG_9;
1688 if (si->src_reg == treg || si->dst_reg == treg)
1689 --treg;
1690 if (si->src_reg == treg || si->dst_reg == treg)
1691 --treg;
1692 *insn++ = BPF_STX_MEM(
1693 BPF_DW, si->dst_reg, treg,
1694 offsetof(struct bpf_sysctl_kern, tmp_reg));
1695 *insn++ = BPF_LDX_MEM(
1696 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1697 treg, si->dst_reg,
1698 offsetof(struct bpf_sysctl_kern, ppos));
1699 *insn++ = BPF_STX_MEM(
1700 BPF_SIZEOF(u32), treg, si->src_reg,
1701 bpf_ctx_narrow_access_offset(
1702 0, sizeof(u32), sizeof(loff_t)));
1703 *insn++ = BPF_LDX_MEM(
1704 BPF_DW, treg, si->dst_reg,
1705 offsetof(struct bpf_sysctl_kern, tmp_reg));
1706 } else {
1707 *insn++ = BPF_LDX_MEM(
1708 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1709 si->dst_reg, si->src_reg,
1710 offsetof(struct bpf_sysctl_kern, ppos));
1711 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1712 *insn++ = BPF_LDX_MEM(
1713 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1714 bpf_ctx_narrow_access_offset(
1715 0, read_size, sizeof(loff_t)));
1717 *target_size = sizeof(u32);
1718 break;
1721 return insn - insn_buf;
1724 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1725 .get_func_proto = sysctl_func_proto,
1726 .is_valid_access = sysctl_is_valid_access,
1727 .convert_ctx_access = sysctl_convert_ctx_access,
1730 const struct bpf_prog_ops cg_sysctl_prog_ops = {
1733 static const struct bpf_func_proto *
1734 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1736 switch (func_id) {
1737 #ifdef CONFIG_NET
1738 case BPF_FUNC_sk_storage_get:
1739 return &bpf_sk_storage_get_proto;
1740 case BPF_FUNC_sk_storage_delete:
1741 return &bpf_sk_storage_delete_proto;
1742 #endif
1743 #ifdef CONFIG_INET
1744 case BPF_FUNC_tcp_sock:
1745 return &bpf_tcp_sock_proto;
1746 #endif
1747 default:
1748 return cgroup_base_func_proto(func_id, prog);
1752 static bool cg_sockopt_is_valid_access(int off, int size,
1753 enum bpf_access_type type,
1754 const struct bpf_prog *prog,
1755 struct bpf_insn_access_aux *info)
1757 const int size_default = sizeof(__u32);
1759 if (off < 0 || off >= sizeof(struct bpf_sockopt))
1760 return false;
1762 if (off % size != 0)
1763 return false;
1765 if (type == BPF_WRITE) {
1766 switch (off) {
1767 case offsetof(struct bpf_sockopt, retval):
1768 if (size != size_default)
1769 return false;
1770 return prog->expected_attach_type ==
1771 BPF_CGROUP_GETSOCKOPT;
1772 case offsetof(struct bpf_sockopt, optname):
1773 /* fallthrough */
1774 case offsetof(struct bpf_sockopt, level):
1775 if (size != size_default)
1776 return false;
1777 return prog->expected_attach_type ==
1778 BPF_CGROUP_SETSOCKOPT;
1779 case offsetof(struct bpf_sockopt, optlen):
1780 return size == size_default;
1781 default:
1782 return false;
1786 switch (off) {
1787 case offsetof(struct bpf_sockopt, sk):
1788 if (size != sizeof(__u64))
1789 return false;
1790 info->reg_type = PTR_TO_SOCKET;
1791 break;
1792 case offsetof(struct bpf_sockopt, optval):
1793 if (size != sizeof(__u64))
1794 return false;
1795 info->reg_type = PTR_TO_PACKET;
1796 break;
1797 case offsetof(struct bpf_sockopt, optval_end):
1798 if (size != sizeof(__u64))
1799 return false;
1800 info->reg_type = PTR_TO_PACKET_END;
1801 break;
1802 case offsetof(struct bpf_sockopt, retval):
1803 if (size != size_default)
1804 return false;
1805 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
1806 default:
1807 if (size != size_default)
1808 return false;
1809 break;
1811 return true;
1814 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
1815 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
1816 si->dst_reg, si->src_reg, \
1817 offsetof(struct bpf_sockopt_kern, F))
1819 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
1820 const struct bpf_insn *si,
1821 struct bpf_insn *insn_buf,
1822 struct bpf_prog *prog,
1823 u32 *target_size)
1825 struct bpf_insn *insn = insn_buf;
1827 switch (si->off) {
1828 case offsetof(struct bpf_sockopt, sk):
1829 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
1830 break;
1831 case offsetof(struct bpf_sockopt, level):
1832 if (type == BPF_WRITE)
1833 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
1834 else
1835 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
1836 break;
1837 case offsetof(struct bpf_sockopt, optname):
1838 if (type == BPF_WRITE)
1839 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
1840 else
1841 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
1842 break;
1843 case offsetof(struct bpf_sockopt, optlen):
1844 if (type == BPF_WRITE)
1845 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
1846 else
1847 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
1848 break;
1849 case offsetof(struct bpf_sockopt, retval):
1850 if (type == BPF_WRITE)
1851 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
1852 else
1853 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
1854 break;
1855 case offsetof(struct bpf_sockopt, optval):
1856 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
1857 break;
1858 case offsetof(struct bpf_sockopt, optval_end):
1859 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
1860 break;
1863 return insn - insn_buf;
1866 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
1867 bool direct_write,
1868 const struct bpf_prog *prog)
1870 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
1872 return 0;
1875 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
1876 .get_func_proto = cg_sockopt_func_proto,
1877 .is_valid_access = cg_sockopt_is_valid_access,
1878 .convert_ctx_access = cg_sockopt_convert_ctx_access,
1879 .gen_prologue = cg_sockopt_get_prologue,
1882 const struct bpf_prog_ops cg_sockopt_prog_ops = {