drm/panfrost: Remove set but not used variable 'bo'
[linux/fpc-iii.git] / kernel / bpf / cgroup.c
blob9a500fadbef5f0282d88f87add6ddf74eb34beb6
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Functions to manage eBPF programs attached to cgroups
5 * Copyright (c) 2016 Daniel Mack
6 */
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <net/sock.h>
18 #include <net/bpf_sk_storage.h>
20 #include "../cgroup/cgroup-internal.h"
22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
25 void cgroup_bpf_offline(struct cgroup *cgrp)
27 cgroup_get(cgrp);
28 percpu_ref_kill(&cgrp->bpf.refcnt);
31 /**
32 * cgroup_bpf_release() - put references of all bpf programs and
33 * release all cgroup bpf data
34 * @work: work structure embedded into the cgroup to modify
36 static void cgroup_bpf_release(struct work_struct *work)
38 struct cgroup *p, *cgrp = container_of(work, struct cgroup,
39 bpf.release_work);
40 enum bpf_cgroup_storage_type stype;
41 struct bpf_prog_array *old_array;
42 unsigned int type;
44 mutex_lock(&cgroup_mutex);
46 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
47 struct list_head *progs = &cgrp->bpf.progs[type];
48 struct bpf_prog_list *pl, *tmp;
50 list_for_each_entry_safe(pl, tmp, progs, node) {
51 list_del(&pl->node);
52 bpf_prog_put(pl->prog);
53 for_each_cgroup_storage_type(stype) {
54 bpf_cgroup_storage_unlink(pl->storage[stype]);
55 bpf_cgroup_storage_free(pl->storage[stype]);
57 kfree(pl);
58 static_branch_dec(&cgroup_bpf_enabled_key);
60 old_array = rcu_dereference_protected(
61 cgrp->bpf.effective[type],
62 lockdep_is_held(&cgroup_mutex));
63 bpf_prog_array_free(old_array);
66 mutex_unlock(&cgroup_mutex);
68 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
69 cgroup_bpf_put(p);
71 percpu_ref_exit(&cgrp->bpf.refcnt);
72 cgroup_put(cgrp);
75 /**
76 * cgroup_bpf_release_fn() - callback used to schedule releasing
77 * of bpf cgroup data
78 * @ref: percpu ref counter structure
80 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
82 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
84 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
85 queue_work(system_wq, &cgrp->bpf.release_work);
88 /* count number of elements in the list.
89 * it's slow but the list cannot be long
91 static u32 prog_list_length(struct list_head *head)
93 struct bpf_prog_list *pl;
94 u32 cnt = 0;
96 list_for_each_entry(pl, head, node) {
97 if (!pl->prog)
98 continue;
99 cnt++;
101 return cnt;
104 /* if parent has non-overridable prog attached,
105 * disallow attaching new programs to the descendent cgroup.
106 * if parent has overridable or multi-prog, allow attaching
108 static bool hierarchy_allows_attach(struct cgroup *cgrp,
109 enum bpf_attach_type type)
111 struct cgroup *p;
113 p = cgroup_parent(cgrp);
114 if (!p)
115 return true;
116 do {
117 u32 flags = p->bpf.flags[type];
118 u32 cnt;
120 if (flags & BPF_F_ALLOW_MULTI)
121 return true;
122 cnt = prog_list_length(&p->bpf.progs[type]);
123 WARN_ON_ONCE(cnt > 1);
124 if (cnt == 1)
125 return !!(flags & BPF_F_ALLOW_OVERRIDE);
126 p = cgroup_parent(p);
127 } while (p);
128 return true;
131 /* compute a chain of effective programs for a given cgroup:
132 * start from the list of programs in this cgroup and add
133 * all parent programs.
134 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
135 * to programs in this cgroup
137 static int compute_effective_progs(struct cgroup *cgrp,
138 enum bpf_attach_type type,
139 struct bpf_prog_array **array)
141 enum bpf_cgroup_storage_type stype;
142 struct bpf_prog_array *progs;
143 struct bpf_prog_list *pl;
144 struct cgroup *p = cgrp;
145 int cnt = 0;
147 /* count number of effective programs by walking parents */
148 do {
149 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
150 cnt += prog_list_length(&p->bpf.progs[type]);
151 p = cgroup_parent(p);
152 } while (p);
154 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
155 if (!progs)
156 return -ENOMEM;
158 /* populate the array with effective progs */
159 cnt = 0;
160 p = cgrp;
161 do {
162 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
163 continue;
165 list_for_each_entry(pl, &p->bpf.progs[type], node) {
166 if (!pl->prog)
167 continue;
169 progs->items[cnt].prog = pl->prog;
170 for_each_cgroup_storage_type(stype)
171 progs->items[cnt].cgroup_storage[stype] =
172 pl->storage[stype];
173 cnt++;
175 } while ((p = cgroup_parent(p)));
177 *array = progs;
178 return 0;
181 static void activate_effective_progs(struct cgroup *cgrp,
182 enum bpf_attach_type type,
183 struct bpf_prog_array *old_array)
185 old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array,
186 lockdep_is_held(&cgroup_mutex));
187 /* free prog array after grace period, since __cgroup_bpf_run_*()
188 * might be still walking the array
190 bpf_prog_array_free(old_array);
194 * cgroup_bpf_inherit() - inherit effective programs from parent
195 * @cgrp: the cgroup to modify
197 int cgroup_bpf_inherit(struct cgroup *cgrp)
199 /* has to use marco instead of const int, since compiler thinks
200 * that array below is variable length
202 #define NR ARRAY_SIZE(cgrp->bpf.effective)
203 struct bpf_prog_array *arrays[NR] = {};
204 struct cgroup *p;
205 int ret, i;
207 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
208 GFP_KERNEL);
209 if (ret)
210 return ret;
212 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
213 cgroup_bpf_get(p);
215 for (i = 0; i < NR; i++)
216 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
218 for (i = 0; i < NR; i++)
219 if (compute_effective_progs(cgrp, i, &arrays[i]))
220 goto cleanup;
222 for (i = 0; i < NR; i++)
223 activate_effective_progs(cgrp, i, arrays[i]);
225 return 0;
226 cleanup:
227 for (i = 0; i < NR; i++)
228 bpf_prog_array_free(arrays[i]);
230 percpu_ref_exit(&cgrp->bpf.refcnt);
232 return -ENOMEM;
235 static int update_effective_progs(struct cgroup *cgrp,
236 enum bpf_attach_type type)
238 struct cgroup_subsys_state *css;
239 int err;
241 /* allocate and recompute effective prog arrays */
242 css_for_each_descendant_pre(css, &cgrp->self) {
243 struct cgroup *desc = container_of(css, struct cgroup, self);
245 if (percpu_ref_is_zero(&desc->bpf.refcnt))
246 continue;
248 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
249 if (err)
250 goto cleanup;
253 /* all allocations were successful. Activate all prog arrays */
254 css_for_each_descendant_pre(css, &cgrp->self) {
255 struct cgroup *desc = container_of(css, struct cgroup, self);
257 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
258 if (unlikely(desc->bpf.inactive)) {
259 bpf_prog_array_free(desc->bpf.inactive);
260 desc->bpf.inactive = NULL;
262 continue;
265 activate_effective_progs(desc, type, desc->bpf.inactive);
266 desc->bpf.inactive = NULL;
269 return 0;
271 cleanup:
272 /* oom while computing effective. Free all computed effective arrays
273 * since they were not activated
275 css_for_each_descendant_pre(css, &cgrp->self) {
276 struct cgroup *desc = container_of(css, struct cgroup, self);
278 bpf_prog_array_free(desc->bpf.inactive);
279 desc->bpf.inactive = NULL;
282 return err;
285 #define BPF_CGROUP_MAX_PROGS 64
288 * __cgroup_bpf_attach() - Attach the program to a cgroup, and
289 * propagate the change to descendants
290 * @cgrp: The cgroup which descendants to traverse
291 * @prog: A program to attach
292 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
293 * @type: Type of attach operation
294 * @flags: Option flags
296 * Must be called with cgroup_mutex held.
298 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
299 struct bpf_prog *replace_prog,
300 enum bpf_attach_type type, u32 flags)
302 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
303 struct list_head *progs = &cgrp->bpf.progs[type];
304 struct bpf_prog *old_prog = NULL;
305 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
306 *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
307 struct bpf_prog_list *pl, *replace_pl = NULL;
308 enum bpf_cgroup_storage_type stype;
309 int err;
311 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
312 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
313 /* invalid combination */
314 return -EINVAL;
316 if (!hierarchy_allows_attach(cgrp, type))
317 return -EPERM;
319 if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags)
320 /* Disallow attaching non-overridable on top
321 * of existing overridable in this cgroup.
322 * Disallow attaching multi-prog if overridable or none
324 return -EPERM;
326 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
327 return -E2BIG;
329 if (flags & BPF_F_ALLOW_MULTI) {
330 list_for_each_entry(pl, progs, node) {
331 if (pl->prog == prog)
332 /* disallow attaching the same prog twice */
333 return -EINVAL;
334 if (pl->prog == replace_prog)
335 replace_pl = pl;
337 if ((flags & BPF_F_REPLACE) && !replace_pl)
338 /* prog to replace not found for cgroup */
339 return -ENOENT;
340 } else if (!list_empty(progs)) {
341 replace_pl = list_first_entry(progs, typeof(*pl), node);
344 for_each_cgroup_storage_type(stype) {
345 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
346 if (IS_ERR(storage[stype])) {
347 storage[stype] = NULL;
348 for_each_cgroup_storage_type(stype)
349 bpf_cgroup_storage_free(storage[stype]);
350 return -ENOMEM;
354 if (replace_pl) {
355 pl = replace_pl;
356 old_prog = pl->prog;
357 for_each_cgroup_storage_type(stype) {
358 old_storage[stype] = pl->storage[stype];
359 bpf_cgroup_storage_unlink(old_storage[stype]);
361 } else {
362 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
363 if (!pl) {
364 for_each_cgroup_storage_type(stype)
365 bpf_cgroup_storage_free(storage[stype]);
366 return -ENOMEM;
368 list_add_tail(&pl->node, progs);
371 pl->prog = prog;
372 for_each_cgroup_storage_type(stype)
373 pl->storage[stype] = storage[stype];
375 cgrp->bpf.flags[type] = saved_flags;
377 err = update_effective_progs(cgrp, type);
378 if (err)
379 goto cleanup;
381 static_branch_inc(&cgroup_bpf_enabled_key);
382 for_each_cgroup_storage_type(stype) {
383 if (!old_storage[stype])
384 continue;
385 bpf_cgroup_storage_free(old_storage[stype]);
387 if (old_prog) {
388 bpf_prog_put(old_prog);
389 static_branch_dec(&cgroup_bpf_enabled_key);
391 for_each_cgroup_storage_type(stype)
392 bpf_cgroup_storage_link(storage[stype], cgrp, type);
393 return 0;
395 cleanup:
396 /* and cleanup the prog list */
397 pl->prog = old_prog;
398 for_each_cgroup_storage_type(stype) {
399 bpf_cgroup_storage_free(pl->storage[stype]);
400 pl->storage[stype] = old_storage[stype];
401 bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
403 if (!replace_pl) {
404 list_del(&pl->node);
405 kfree(pl);
407 return err;
411 * __cgroup_bpf_detach() - Detach the program from a cgroup, and
412 * propagate the change to descendants
413 * @cgrp: The cgroup which descendants to traverse
414 * @prog: A program to detach or NULL
415 * @type: Type of detach operation
417 * Must be called with cgroup_mutex held.
419 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
420 enum bpf_attach_type type)
422 struct list_head *progs = &cgrp->bpf.progs[type];
423 enum bpf_cgroup_storage_type stype;
424 u32 flags = cgrp->bpf.flags[type];
425 struct bpf_prog *old_prog = NULL;
426 struct bpf_prog_list *pl;
427 int err;
429 if (flags & BPF_F_ALLOW_MULTI) {
430 if (!prog)
431 /* to detach MULTI prog the user has to specify valid FD
432 * of the program to be detached
434 return -EINVAL;
435 } else {
436 if (list_empty(progs))
437 /* report error when trying to detach and nothing is attached */
438 return -ENOENT;
441 if (flags & BPF_F_ALLOW_MULTI) {
442 /* find the prog and detach it */
443 list_for_each_entry(pl, progs, node) {
444 if (pl->prog != prog)
445 continue;
446 old_prog = prog;
447 /* mark it deleted, so it's ignored while
448 * recomputing effective
450 pl->prog = NULL;
451 break;
453 if (!old_prog)
454 return -ENOENT;
455 } else {
456 /* to maintain backward compatibility NONE and OVERRIDE cgroups
457 * allow detaching with invalid FD (prog==NULL)
459 pl = list_first_entry(progs, typeof(*pl), node);
460 old_prog = pl->prog;
461 pl->prog = NULL;
464 err = update_effective_progs(cgrp, type);
465 if (err)
466 goto cleanup;
468 /* now can actually delete it from this cgroup list */
469 list_del(&pl->node);
470 for_each_cgroup_storage_type(stype) {
471 bpf_cgroup_storage_unlink(pl->storage[stype]);
472 bpf_cgroup_storage_free(pl->storage[stype]);
474 kfree(pl);
475 if (list_empty(progs))
476 /* last program was detached, reset flags to zero */
477 cgrp->bpf.flags[type] = 0;
479 bpf_prog_put(old_prog);
480 static_branch_dec(&cgroup_bpf_enabled_key);
481 return 0;
483 cleanup:
484 /* and restore back old_prog */
485 pl->prog = old_prog;
486 return err;
489 /* Must be called with cgroup_mutex held to avoid races. */
490 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
491 union bpf_attr __user *uattr)
493 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
494 enum bpf_attach_type type = attr->query.attach_type;
495 struct list_head *progs = &cgrp->bpf.progs[type];
496 u32 flags = cgrp->bpf.flags[type];
497 struct bpf_prog_array *effective;
498 int cnt, ret = 0, i;
500 effective = rcu_dereference_protected(cgrp->bpf.effective[type],
501 lockdep_is_held(&cgroup_mutex));
503 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
504 cnt = bpf_prog_array_length(effective);
505 else
506 cnt = prog_list_length(progs);
508 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
509 return -EFAULT;
510 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
511 return -EFAULT;
512 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
513 /* return early if user requested only program count + flags */
514 return 0;
515 if (attr->query.prog_cnt < cnt) {
516 cnt = attr->query.prog_cnt;
517 ret = -ENOSPC;
520 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
521 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
522 } else {
523 struct bpf_prog_list *pl;
524 u32 id;
526 i = 0;
527 list_for_each_entry(pl, progs, node) {
528 id = pl->prog->aux->id;
529 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
530 return -EFAULT;
531 if (++i == cnt)
532 break;
535 return ret;
538 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
539 enum bpf_prog_type ptype, struct bpf_prog *prog)
541 struct bpf_prog *replace_prog = NULL;
542 struct cgroup *cgrp;
543 int ret;
545 cgrp = cgroup_get_from_fd(attr->target_fd);
546 if (IS_ERR(cgrp))
547 return PTR_ERR(cgrp);
549 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
550 (attr->attach_flags & BPF_F_REPLACE)) {
551 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
552 if (IS_ERR(replace_prog)) {
553 cgroup_put(cgrp);
554 return PTR_ERR(replace_prog);
558 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, attr->attach_type,
559 attr->attach_flags);
561 if (replace_prog)
562 bpf_prog_put(replace_prog);
563 cgroup_put(cgrp);
564 return ret;
567 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
569 struct bpf_prog *prog;
570 struct cgroup *cgrp;
571 int ret;
573 cgrp = cgroup_get_from_fd(attr->target_fd);
574 if (IS_ERR(cgrp))
575 return PTR_ERR(cgrp);
577 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
578 if (IS_ERR(prog))
579 prog = NULL;
581 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
582 if (prog)
583 bpf_prog_put(prog);
585 cgroup_put(cgrp);
586 return ret;
589 int cgroup_bpf_prog_query(const union bpf_attr *attr,
590 union bpf_attr __user *uattr)
592 struct cgroup *cgrp;
593 int ret;
595 cgrp = cgroup_get_from_fd(attr->query.target_fd);
596 if (IS_ERR(cgrp))
597 return PTR_ERR(cgrp);
599 ret = cgroup_bpf_query(cgrp, attr, uattr);
601 cgroup_put(cgrp);
602 return ret;
606 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
607 * @sk: The socket sending or receiving traffic
608 * @skb: The skb that is being sent or received
609 * @type: The type of program to be exectuted
611 * If no socket is passed, or the socket is not of type INET or INET6,
612 * this function does nothing and returns 0.
614 * The program type passed in via @type must be suitable for network
615 * filtering. No further check is performed to assert that.
617 * For egress packets, this function can return:
618 * NET_XMIT_SUCCESS (0) - continue with packet output
619 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
620 * NET_XMIT_CN (2) - continue with packet output and notify TCP
621 * to call cwr
622 * -EPERM - drop packet
624 * For ingress packets, this function will return -EPERM if any
625 * attached program was found and if it returned != 1 during execution.
626 * Otherwise 0 is returned.
628 int __cgroup_bpf_run_filter_skb(struct sock *sk,
629 struct sk_buff *skb,
630 enum bpf_attach_type type)
632 unsigned int offset = skb->data - skb_network_header(skb);
633 struct sock *save_sk;
634 void *saved_data_end;
635 struct cgroup *cgrp;
636 int ret;
638 if (!sk || !sk_fullsock(sk))
639 return 0;
641 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
642 return 0;
644 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
645 save_sk = skb->sk;
646 skb->sk = sk;
647 __skb_push(skb, offset);
649 /* compute pointers for the bpf prog */
650 bpf_compute_and_save_data_end(skb, &saved_data_end);
652 if (type == BPF_CGROUP_INET_EGRESS) {
653 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
654 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
655 } else {
656 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
657 __bpf_prog_run_save_cb);
658 ret = (ret == 1 ? 0 : -EPERM);
660 bpf_restore_data_end(skb, saved_data_end);
661 __skb_pull(skb, offset);
662 skb->sk = save_sk;
664 return ret;
666 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
669 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
670 * @sk: sock structure to manipulate
671 * @type: The type of program to be exectuted
673 * socket is passed is expected to be of type INET or INET6.
675 * The program type passed in via @type must be suitable for sock
676 * filtering. No further check is performed to assert that.
678 * This function will return %-EPERM if any if an attached program was found
679 * and if it returned != 1 during execution. In all other cases, 0 is returned.
681 int __cgroup_bpf_run_filter_sk(struct sock *sk,
682 enum bpf_attach_type type)
684 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
685 int ret;
687 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
688 return ret == 1 ? 0 : -EPERM;
690 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
693 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
694 * provided by user sockaddr
695 * @sk: sock struct that will use sockaddr
696 * @uaddr: sockaddr struct provided by user
697 * @type: The type of program to be exectuted
698 * @t_ctx: Pointer to attach type specific context
700 * socket is expected to be of type INET or INET6.
702 * This function will return %-EPERM if an attached program is found and
703 * returned value != 1 during execution. In all other cases, 0 is returned.
705 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
706 struct sockaddr *uaddr,
707 enum bpf_attach_type type,
708 void *t_ctx)
710 struct bpf_sock_addr_kern ctx = {
711 .sk = sk,
712 .uaddr = uaddr,
713 .t_ctx = t_ctx,
715 struct sockaddr_storage unspec;
716 struct cgroup *cgrp;
717 int ret;
719 /* Check socket family since not all sockets represent network
720 * endpoint (e.g. AF_UNIX).
722 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
723 return 0;
725 if (!ctx.uaddr) {
726 memset(&unspec, 0, sizeof(unspec));
727 ctx.uaddr = (struct sockaddr *)&unspec;
730 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
731 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
733 return ret == 1 ? 0 : -EPERM;
735 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
738 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
739 * @sk: socket to get cgroup from
740 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
741 * sk with connection information (IP addresses, etc.) May not contain
742 * cgroup info if it is a req sock.
743 * @type: The type of program to be exectuted
745 * socket passed is expected to be of type INET or INET6.
747 * The program type passed in via @type must be suitable for sock_ops
748 * filtering. No further check is performed to assert that.
750 * This function will return %-EPERM if any if an attached program was found
751 * and if it returned != 1 during execution. In all other cases, 0 is returned.
753 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
754 struct bpf_sock_ops_kern *sock_ops,
755 enum bpf_attach_type type)
757 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
758 int ret;
760 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
761 BPF_PROG_RUN);
762 return ret == 1 ? 0 : -EPERM;
764 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
766 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
767 short access, enum bpf_attach_type type)
769 struct cgroup *cgrp;
770 struct bpf_cgroup_dev_ctx ctx = {
771 .access_type = (access << 16) | dev_type,
772 .major = major,
773 .minor = minor,
775 int allow = 1;
777 rcu_read_lock();
778 cgrp = task_dfl_cgroup(current);
779 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
780 BPF_PROG_RUN);
781 rcu_read_unlock();
783 return !allow;
785 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
787 static const struct bpf_func_proto *
788 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
790 switch (func_id) {
791 case BPF_FUNC_map_lookup_elem:
792 return &bpf_map_lookup_elem_proto;
793 case BPF_FUNC_map_update_elem:
794 return &bpf_map_update_elem_proto;
795 case BPF_FUNC_map_delete_elem:
796 return &bpf_map_delete_elem_proto;
797 case BPF_FUNC_map_push_elem:
798 return &bpf_map_push_elem_proto;
799 case BPF_FUNC_map_pop_elem:
800 return &bpf_map_pop_elem_proto;
801 case BPF_FUNC_map_peek_elem:
802 return &bpf_map_peek_elem_proto;
803 case BPF_FUNC_get_current_uid_gid:
804 return &bpf_get_current_uid_gid_proto;
805 case BPF_FUNC_get_local_storage:
806 return &bpf_get_local_storage_proto;
807 case BPF_FUNC_get_current_cgroup_id:
808 return &bpf_get_current_cgroup_id_proto;
809 case BPF_FUNC_trace_printk:
810 if (capable(CAP_SYS_ADMIN))
811 return bpf_get_trace_printk_proto();
812 /* fall through */
813 default:
814 return NULL;
818 static const struct bpf_func_proto *
819 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
821 return cgroup_base_func_proto(func_id, prog);
824 static bool cgroup_dev_is_valid_access(int off, int size,
825 enum bpf_access_type type,
826 const struct bpf_prog *prog,
827 struct bpf_insn_access_aux *info)
829 const int size_default = sizeof(__u32);
831 if (type == BPF_WRITE)
832 return false;
834 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
835 return false;
836 /* The verifier guarantees that size > 0. */
837 if (off % size != 0)
838 return false;
840 switch (off) {
841 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
842 bpf_ctx_record_field_size(info, size_default);
843 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
844 return false;
845 break;
846 default:
847 if (size != size_default)
848 return false;
851 return true;
854 const struct bpf_prog_ops cg_dev_prog_ops = {
857 const struct bpf_verifier_ops cg_dev_verifier_ops = {
858 .get_func_proto = cgroup_dev_func_proto,
859 .is_valid_access = cgroup_dev_is_valid_access,
863 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
865 * @head: sysctl table header
866 * @table: sysctl table
867 * @write: sysctl is being read (= 0) or written (= 1)
868 * @buf: pointer to buffer passed by user space
869 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
870 * result is size of @new_buf if program set new value, initial value
871 * otherwise
872 * @ppos: value-result argument: value is position at which read from or write
873 * to sysctl is happening, result is new position if program overrode it,
874 * initial value otherwise
875 * @new_buf: pointer to pointer to new buffer that will be allocated if program
876 * overrides new value provided by user space on sysctl write
877 * NOTE: it's caller responsibility to free *new_buf if it was set
878 * @type: type of program to be executed
880 * Program is run when sysctl is being accessed, either read or written, and
881 * can allow or deny such access.
883 * This function will return %-EPERM if an attached program is found and
884 * returned value != 1 during execution. In all other cases 0 is returned.
886 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
887 struct ctl_table *table, int write,
888 void __user *buf, size_t *pcount,
889 loff_t *ppos, void **new_buf,
890 enum bpf_attach_type type)
892 struct bpf_sysctl_kern ctx = {
893 .head = head,
894 .table = table,
895 .write = write,
896 .ppos = ppos,
897 .cur_val = NULL,
898 .cur_len = PAGE_SIZE,
899 .new_val = NULL,
900 .new_len = 0,
901 .new_updated = 0,
903 struct cgroup *cgrp;
904 int ret;
906 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
907 if (ctx.cur_val) {
908 mm_segment_t old_fs;
909 loff_t pos = 0;
911 old_fs = get_fs();
912 set_fs(KERNEL_DS);
913 if (table->proc_handler(table, 0, (void __user *)ctx.cur_val,
914 &ctx.cur_len, &pos)) {
915 /* Let BPF program decide how to proceed. */
916 ctx.cur_len = 0;
918 set_fs(old_fs);
919 } else {
920 /* Let BPF program decide how to proceed. */
921 ctx.cur_len = 0;
924 if (write && buf && *pcount) {
925 /* BPF program should be able to override new value with a
926 * buffer bigger than provided by user.
928 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
929 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
930 if (!ctx.new_val ||
931 copy_from_user(ctx.new_val, buf, ctx.new_len))
932 /* Let BPF program decide how to proceed. */
933 ctx.new_len = 0;
936 rcu_read_lock();
937 cgrp = task_dfl_cgroup(current);
938 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
939 rcu_read_unlock();
941 kfree(ctx.cur_val);
943 if (ret == 1 && ctx.new_updated) {
944 *new_buf = ctx.new_val;
945 *pcount = ctx.new_len;
946 } else {
947 kfree(ctx.new_val);
950 return ret == 1 ? 0 : -EPERM;
952 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl);
954 #ifdef CONFIG_NET
955 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
956 enum bpf_attach_type attach_type)
958 struct bpf_prog_array *prog_array;
959 bool empty;
961 rcu_read_lock();
962 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
963 empty = bpf_prog_array_is_empty(prog_array);
964 rcu_read_unlock();
966 return empty;
969 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
971 if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
972 return -EINVAL;
974 ctx->optval = kzalloc(max_optlen, GFP_USER);
975 if (!ctx->optval)
976 return -ENOMEM;
978 ctx->optval_end = ctx->optval + max_optlen;
980 return 0;
983 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
985 kfree(ctx->optval);
988 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
989 int *optname, char __user *optval,
990 int *optlen, char **kernel_optval)
992 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
993 struct bpf_sockopt_kern ctx = {
994 .sk = sk,
995 .level = *level,
996 .optname = *optname,
998 int ret, max_optlen;
1000 /* Opportunistic check to see whether we have any BPF program
1001 * attached to the hook so we don't waste time allocating
1002 * memory and locking the socket.
1004 if (!cgroup_bpf_enabled ||
1005 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
1006 return 0;
1008 /* Allocate a bit more than the initial user buffer for
1009 * BPF program. The canonical use case is overriding
1010 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1012 max_optlen = max_t(int, 16, *optlen);
1014 ret = sockopt_alloc_buf(&ctx, max_optlen);
1015 if (ret)
1016 return ret;
1018 ctx.optlen = *optlen;
1020 if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
1021 ret = -EFAULT;
1022 goto out;
1025 lock_sock(sk);
1026 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
1027 &ctx, BPF_PROG_RUN);
1028 release_sock(sk);
1030 if (!ret) {
1031 ret = -EPERM;
1032 goto out;
1035 if (ctx.optlen == -1) {
1036 /* optlen set to -1, bypass kernel */
1037 ret = 1;
1038 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1039 /* optlen is out of bounds */
1040 ret = -EFAULT;
1041 } else {
1042 /* optlen within bounds, run kernel handler */
1043 ret = 0;
1045 /* export any potential modifications */
1046 *level = ctx.level;
1047 *optname = ctx.optname;
1048 *optlen = ctx.optlen;
1049 *kernel_optval = ctx.optval;
1052 out:
1053 if (ret)
1054 sockopt_free_buf(&ctx);
1055 return ret;
1057 EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt);
1059 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1060 int optname, char __user *optval,
1061 int __user *optlen, int max_optlen,
1062 int retval)
1064 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1065 struct bpf_sockopt_kern ctx = {
1066 .sk = sk,
1067 .level = level,
1068 .optname = optname,
1069 .retval = retval,
1071 int ret;
1073 /* Opportunistic check to see whether we have any BPF program
1074 * attached to the hook so we don't waste time allocating
1075 * memory and locking the socket.
1077 if (!cgroup_bpf_enabled ||
1078 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
1079 return retval;
1081 ret = sockopt_alloc_buf(&ctx, max_optlen);
1082 if (ret)
1083 return ret;
1085 ctx.optlen = max_optlen;
1087 if (!retval) {
1088 /* If kernel getsockopt finished successfully,
1089 * copy whatever was returned to the user back
1090 * into our temporary buffer. Set optlen to the
1091 * one that kernel returned as well to let
1092 * BPF programs inspect the value.
1095 if (get_user(ctx.optlen, optlen)) {
1096 ret = -EFAULT;
1097 goto out;
1100 if (ctx.optlen > max_optlen)
1101 ctx.optlen = max_optlen;
1103 if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
1104 ret = -EFAULT;
1105 goto out;
1109 lock_sock(sk);
1110 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1111 &ctx, BPF_PROG_RUN);
1112 release_sock(sk);
1114 if (!ret) {
1115 ret = -EPERM;
1116 goto out;
1119 if (ctx.optlen > max_optlen) {
1120 ret = -EFAULT;
1121 goto out;
1124 /* BPF programs only allowed to set retval to 0, not some
1125 * arbitrary value.
1127 if (ctx.retval != 0 && ctx.retval != retval) {
1128 ret = -EFAULT;
1129 goto out;
1132 if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1133 put_user(ctx.optlen, optlen)) {
1134 ret = -EFAULT;
1135 goto out;
1138 ret = ctx.retval;
1140 out:
1141 sockopt_free_buf(&ctx);
1142 return ret;
1144 EXPORT_SYMBOL(__cgroup_bpf_run_filter_getsockopt);
1145 #endif
1147 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1148 size_t *lenp)
1150 ssize_t tmp_ret = 0, ret;
1152 if (dir->header.parent) {
1153 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1154 if (tmp_ret < 0)
1155 return tmp_ret;
1158 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1159 if (ret < 0)
1160 return ret;
1161 *bufp += ret;
1162 *lenp -= ret;
1163 ret += tmp_ret;
1165 /* Avoid leading slash. */
1166 if (!ret)
1167 return ret;
1169 tmp_ret = strscpy(*bufp, "/", *lenp);
1170 if (tmp_ret < 0)
1171 return tmp_ret;
1172 *bufp += tmp_ret;
1173 *lenp -= tmp_ret;
1175 return ret + tmp_ret;
1178 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1179 size_t, buf_len, u64, flags)
1181 ssize_t tmp_ret = 0, ret;
1183 if (!buf)
1184 return -EINVAL;
1186 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1187 if (!ctx->head)
1188 return -EINVAL;
1189 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1190 if (tmp_ret < 0)
1191 return tmp_ret;
1194 ret = strscpy(buf, ctx->table->procname, buf_len);
1196 return ret < 0 ? ret : tmp_ret + ret;
1199 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1200 .func = bpf_sysctl_get_name,
1201 .gpl_only = false,
1202 .ret_type = RET_INTEGER,
1203 .arg1_type = ARG_PTR_TO_CTX,
1204 .arg2_type = ARG_PTR_TO_MEM,
1205 .arg3_type = ARG_CONST_SIZE,
1206 .arg4_type = ARG_ANYTHING,
1209 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1210 size_t src_len)
1212 if (!dst)
1213 return -EINVAL;
1215 if (!dst_len)
1216 return -E2BIG;
1218 if (!src || !src_len) {
1219 memset(dst, 0, dst_len);
1220 return -EINVAL;
1223 memcpy(dst, src, min(dst_len, src_len));
1225 if (dst_len > src_len) {
1226 memset(dst + src_len, '\0', dst_len - src_len);
1227 return src_len;
1230 dst[dst_len - 1] = '\0';
1232 return -E2BIG;
1235 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1236 char *, buf, size_t, buf_len)
1238 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1241 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1242 .func = bpf_sysctl_get_current_value,
1243 .gpl_only = false,
1244 .ret_type = RET_INTEGER,
1245 .arg1_type = ARG_PTR_TO_CTX,
1246 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1247 .arg3_type = ARG_CONST_SIZE,
1250 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1251 size_t, buf_len)
1253 if (!ctx->write) {
1254 if (buf && buf_len)
1255 memset(buf, '\0', buf_len);
1256 return -EINVAL;
1258 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1261 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1262 .func = bpf_sysctl_get_new_value,
1263 .gpl_only = false,
1264 .ret_type = RET_INTEGER,
1265 .arg1_type = ARG_PTR_TO_CTX,
1266 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1267 .arg3_type = ARG_CONST_SIZE,
1270 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1271 const char *, buf, size_t, buf_len)
1273 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1274 return -EINVAL;
1276 if (buf_len > PAGE_SIZE - 1)
1277 return -E2BIG;
1279 memcpy(ctx->new_val, buf, buf_len);
1280 ctx->new_len = buf_len;
1281 ctx->new_updated = 1;
1283 return 0;
1286 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1287 .func = bpf_sysctl_set_new_value,
1288 .gpl_only = false,
1289 .ret_type = RET_INTEGER,
1290 .arg1_type = ARG_PTR_TO_CTX,
1291 .arg2_type = ARG_PTR_TO_MEM,
1292 .arg3_type = ARG_CONST_SIZE,
1295 static const struct bpf_func_proto *
1296 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1298 switch (func_id) {
1299 case BPF_FUNC_strtol:
1300 return &bpf_strtol_proto;
1301 case BPF_FUNC_strtoul:
1302 return &bpf_strtoul_proto;
1303 case BPF_FUNC_sysctl_get_name:
1304 return &bpf_sysctl_get_name_proto;
1305 case BPF_FUNC_sysctl_get_current_value:
1306 return &bpf_sysctl_get_current_value_proto;
1307 case BPF_FUNC_sysctl_get_new_value:
1308 return &bpf_sysctl_get_new_value_proto;
1309 case BPF_FUNC_sysctl_set_new_value:
1310 return &bpf_sysctl_set_new_value_proto;
1311 default:
1312 return cgroup_base_func_proto(func_id, prog);
1316 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1317 const struct bpf_prog *prog,
1318 struct bpf_insn_access_aux *info)
1320 const int size_default = sizeof(__u32);
1322 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1323 return false;
1325 switch (off) {
1326 case bpf_ctx_range(struct bpf_sysctl, write):
1327 if (type != BPF_READ)
1328 return false;
1329 bpf_ctx_record_field_size(info, size_default);
1330 return bpf_ctx_narrow_access_ok(off, size, size_default);
1331 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1332 if (type == BPF_READ) {
1333 bpf_ctx_record_field_size(info, size_default);
1334 return bpf_ctx_narrow_access_ok(off, size, size_default);
1335 } else {
1336 return size == size_default;
1338 default:
1339 return false;
1343 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1344 const struct bpf_insn *si,
1345 struct bpf_insn *insn_buf,
1346 struct bpf_prog *prog, u32 *target_size)
1348 struct bpf_insn *insn = insn_buf;
1349 u32 read_size;
1351 switch (si->off) {
1352 case offsetof(struct bpf_sysctl, write):
1353 *insn++ = BPF_LDX_MEM(
1354 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1355 bpf_target_off(struct bpf_sysctl_kern, write,
1356 sizeof_field(struct bpf_sysctl_kern,
1357 write),
1358 target_size));
1359 break;
1360 case offsetof(struct bpf_sysctl, file_pos):
1361 /* ppos is a pointer so it should be accessed via indirect
1362 * loads and stores. Also for stores additional temporary
1363 * register is used since neither src_reg nor dst_reg can be
1364 * overridden.
1366 if (type == BPF_WRITE) {
1367 int treg = BPF_REG_9;
1369 if (si->src_reg == treg || si->dst_reg == treg)
1370 --treg;
1371 if (si->src_reg == treg || si->dst_reg == treg)
1372 --treg;
1373 *insn++ = BPF_STX_MEM(
1374 BPF_DW, si->dst_reg, treg,
1375 offsetof(struct bpf_sysctl_kern, tmp_reg));
1376 *insn++ = BPF_LDX_MEM(
1377 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1378 treg, si->dst_reg,
1379 offsetof(struct bpf_sysctl_kern, ppos));
1380 *insn++ = BPF_STX_MEM(
1381 BPF_SIZEOF(u32), treg, si->src_reg,
1382 bpf_ctx_narrow_access_offset(
1383 0, sizeof(u32), sizeof(loff_t)));
1384 *insn++ = BPF_LDX_MEM(
1385 BPF_DW, treg, si->dst_reg,
1386 offsetof(struct bpf_sysctl_kern, tmp_reg));
1387 } else {
1388 *insn++ = BPF_LDX_MEM(
1389 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1390 si->dst_reg, si->src_reg,
1391 offsetof(struct bpf_sysctl_kern, ppos));
1392 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1393 *insn++ = BPF_LDX_MEM(
1394 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1395 bpf_ctx_narrow_access_offset(
1396 0, read_size, sizeof(loff_t)));
1398 *target_size = sizeof(u32);
1399 break;
1402 return insn - insn_buf;
1405 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1406 .get_func_proto = sysctl_func_proto,
1407 .is_valid_access = sysctl_is_valid_access,
1408 .convert_ctx_access = sysctl_convert_ctx_access,
1411 const struct bpf_prog_ops cg_sysctl_prog_ops = {
1414 static const struct bpf_func_proto *
1415 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1417 switch (func_id) {
1418 #ifdef CONFIG_NET
1419 case BPF_FUNC_sk_storage_get:
1420 return &bpf_sk_storage_get_proto;
1421 case BPF_FUNC_sk_storage_delete:
1422 return &bpf_sk_storage_delete_proto;
1423 #endif
1424 #ifdef CONFIG_INET
1425 case BPF_FUNC_tcp_sock:
1426 return &bpf_tcp_sock_proto;
1427 #endif
1428 default:
1429 return cgroup_base_func_proto(func_id, prog);
1433 static bool cg_sockopt_is_valid_access(int off, int size,
1434 enum bpf_access_type type,
1435 const struct bpf_prog *prog,
1436 struct bpf_insn_access_aux *info)
1438 const int size_default = sizeof(__u32);
1440 if (off < 0 || off >= sizeof(struct bpf_sockopt))
1441 return false;
1443 if (off % size != 0)
1444 return false;
1446 if (type == BPF_WRITE) {
1447 switch (off) {
1448 case offsetof(struct bpf_sockopt, retval):
1449 if (size != size_default)
1450 return false;
1451 return prog->expected_attach_type ==
1452 BPF_CGROUP_GETSOCKOPT;
1453 case offsetof(struct bpf_sockopt, optname):
1454 /* fallthrough */
1455 case offsetof(struct bpf_sockopt, level):
1456 if (size != size_default)
1457 return false;
1458 return prog->expected_attach_type ==
1459 BPF_CGROUP_SETSOCKOPT;
1460 case offsetof(struct bpf_sockopt, optlen):
1461 return size == size_default;
1462 default:
1463 return false;
1467 switch (off) {
1468 case offsetof(struct bpf_sockopt, sk):
1469 if (size != sizeof(__u64))
1470 return false;
1471 info->reg_type = PTR_TO_SOCKET;
1472 break;
1473 case offsetof(struct bpf_sockopt, optval):
1474 if (size != sizeof(__u64))
1475 return false;
1476 info->reg_type = PTR_TO_PACKET;
1477 break;
1478 case offsetof(struct bpf_sockopt, optval_end):
1479 if (size != sizeof(__u64))
1480 return false;
1481 info->reg_type = PTR_TO_PACKET_END;
1482 break;
1483 case offsetof(struct bpf_sockopt, retval):
1484 if (size != size_default)
1485 return false;
1486 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
1487 default:
1488 if (size != size_default)
1489 return false;
1490 break;
1492 return true;
1495 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
1496 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
1497 si->dst_reg, si->src_reg, \
1498 offsetof(struct bpf_sockopt_kern, F))
1500 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
1501 const struct bpf_insn *si,
1502 struct bpf_insn *insn_buf,
1503 struct bpf_prog *prog,
1504 u32 *target_size)
1506 struct bpf_insn *insn = insn_buf;
1508 switch (si->off) {
1509 case offsetof(struct bpf_sockopt, sk):
1510 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
1511 break;
1512 case offsetof(struct bpf_sockopt, level):
1513 if (type == BPF_WRITE)
1514 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
1515 else
1516 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
1517 break;
1518 case offsetof(struct bpf_sockopt, optname):
1519 if (type == BPF_WRITE)
1520 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
1521 else
1522 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
1523 break;
1524 case offsetof(struct bpf_sockopt, optlen):
1525 if (type == BPF_WRITE)
1526 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
1527 else
1528 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
1529 break;
1530 case offsetof(struct bpf_sockopt, retval):
1531 if (type == BPF_WRITE)
1532 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
1533 else
1534 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
1535 break;
1536 case offsetof(struct bpf_sockopt, optval):
1537 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
1538 break;
1539 case offsetof(struct bpf_sockopt, optval_end):
1540 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
1541 break;
1544 return insn - insn_buf;
1547 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
1548 bool direct_write,
1549 const struct bpf_prog *prog)
1551 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
1553 return 0;
1556 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
1557 .get_func_proto = cg_sockopt_func_proto,
1558 .is_valid_access = cg_sockopt_is_valid_access,
1559 .convert_ctx_access = cg_sockopt_convert_ctx_access,
1560 .gen_prologue = cg_sockopt_get_prologue,
1563 const struct bpf_prog_ops cg_sockopt_prog_ops = {