1 // SPDX-License-Identifier: GPL-2.0-only
3 * Functions to manage eBPF programs attached to cgroups
5 * Copyright (c) 2016 Daniel Mack
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
18 #include <net/bpf_sk_storage.h>
20 #include "../cgroup/cgroup-internal.h"
22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key
);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key
);
25 void cgroup_bpf_offline(struct cgroup
*cgrp
)
28 percpu_ref_kill(&cgrp
->bpf
.refcnt
);
31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage
*storages
[])
33 enum bpf_cgroup_storage_type stype
;
35 for_each_cgroup_storage_type(stype
)
36 bpf_cgroup_storage_free(storages
[stype
]);
39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage
*storages
[],
40 struct bpf_cgroup_storage
*new_storages
[],
41 enum bpf_attach_type type
,
42 struct bpf_prog
*prog
,
45 enum bpf_cgroup_storage_type stype
;
46 struct bpf_cgroup_storage_key key
;
49 key
.cgroup_inode_id
= cgroup_id(cgrp
);
50 key
.attach_type
= type
;
52 for_each_cgroup_storage_type(stype
) {
53 map
= prog
->aux
->cgroup_storage
[stype
];
57 storages
[stype
] = cgroup_storage_lookup((void *)map
, &key
, false);
61 storages
[stype
] = bpf_cgroup_storage_alloc(prog
, stype
);
62 if (IS_ERR(storages
[stype
])) {
63 bpf_cgroup_storages_free(new_storages
);
67 new_storages
[stype
] = storages
[stype
];
73 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage
*dst
[],
74 struct bpf_cgroup_storage
*src
[])
76 enum bpf_cgroup_storage_type stype
;
78 for_each_cgroup_storage_type(stype
)
79 dst
[stype
] = src
[stype
];
82 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage
*storages
[],
84 enum bpf_attach_type attach_type
)
86 enum bpf_cgroup_storage_type stype
;
88 for_each_cgroup_storage_type(stype
)
89 bpf_cgroup_storage_link(storages
[stype
], cgrp
, attach_type
);
92 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
94 * doesn't free link memory, which will eventually be done by bpf_link's
95 * release() callback, when its last FD is closed.
97 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link
*link
)
99 cgroup_put(link
->cgroup
);
104 * cgroup_bpf_release() - put references of all bpf programs and
105 * release all cgroup bpf data
106 * @work: work structure embedded into the cgroup to modify
108 static void cgroup_bpf_release(struct work_struct
*work
)
110 struct cgroup
*p
, *cgrp
= container_of(work
, struct cgroup
,
112 struct bpf_prog_array
*old_array
;
113 struct list_head
*storages
= &cgrp
->bpf
.storages
;
114 struct bpf_cgroup_storage
*storage
, *stmp
;
118 mutex_lock(&cgroup_mutex
);
120 for (type
= 0; type
< ARRAY_SIZE(cgrp
->bpf
.progs
); type
++) {
121 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
122 struct bpf_prog_list
*pl
, *pltmp
;
124 list_for_each_entry_safe(pl
, pltmp
, progs
, node
) {
127 bpf_prog_put(pl
->prog
);
129 bpf_cgroup_link_auto_detach(pl
->link
);
131 static_branch_dec(&cgroup_bpf_enabled_key
);
133 old_array
= rcu_dereference_protected(
134 cgrp
->bpf
.effective
[type
],
135 lockdep_is_held(&cgroup_mutex
));
136 bpf_prog_array_free(old_array
);
139 list_for_each_entry_safe(storage
, stmp
, storages
, list_cg
) {
140 bpf_cgroup_storage_unlink(storage
);
141 bpf_cgroup_storage_free(storage
);
144 mutex_unlock(&cgroup_mutex
);
146 for (p
= cgroup_parent(cgrp
); p
; p
= cgroup_parent(p
))
149 percpu_ref_exit(&cgrp
->bpf
.refcnt
);
154 * cgroup_bpf_release_fn() - callback used to schedule releasing
156 * @ref: percpu ref counter structure
158 static void cgroup_bpf_release_fn(struct percpu_ref
*ref
)
160 struct cgroup
*cgrp
= container_of(ref
, struct cgroup
, bpf
.refcnt
);
162 INIT_WORK(&cgrp
->bpf
.release_work
, cgroup_bpf_release
);
163 queue_work(system_wq
, &cgrp
->bpf
.release_work
);
166 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
167 * link or direct prog.
169 static struct bpf_prog
*prog_list_prog(struct bpf_prog_list
*pl
)
174 return pl
->link
->link
.prog
;
178 /* count number of elements in the list.
179 * it's slow but the list cannot be long
181 static u32
prog_list_length(struct list_head
*head
)
183 struct bpf_prog_list
*pl
;
186 list_for_each_entry(pl
, head
, node
) {
187 if (!prog_list_prog(pl
))
194 /* if parent has non-overridable prog attached,
195 * disallow attaching new programs to the descendent cgroup.
196 * if parent has overridable or multi-prog, allow attaching
198 static bool hierarchy_allows_attach(struct cgroup
*cgrp
,
199 enum bpf_attach_type type
)
203 p
= cgroup_parent(cgrp
);
207 u32 flags
= p
->bpf
.flags
[type
];
210 if (flags
& BPF_F_ALLOW_MULTI
)
212 cnt
= prog_list_length(&p
->bpf
.progs
[type
]);
213 WARN_ON_ONCE(cnt
> 1);
215 return !!(flags
& BPF_F_ALLOW_OVERRIDE
);
216 p
= cgroup_parent(p
);
221 /* compute a chain of effective programs for a given cgroup:
222 * start from the list of programs in this cgroup and add
223 * all parent programs.
224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
225 * to programs in this cgroup
227 static int compute_effective_progs(struct cgroup
*cgrp
,
228 enum bpf_attach_type type
,
229 struct bpf_prog_array
**array
)
231 struct bpf_prog_array_item
*item
;
232 struct bpf_prog_array
*progs
;
233 struct bpf_prog_list
*pl
;
234 struct cgroup
*p
= cgrp
;
237 /* count number of effective programs by walking parents */
239 if (cnt
== 0 || (p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
240 cnt
+= prog_list_length(&p
->bpf
.progs
[type
]);
241 p
= cgroup_parent(p
);
244 progs
= bpf_prog_array_alloc(cnt
, GFP_KERNEL
);
248 /* populate the array with effective progs */
252 if (cnt
> 0 && !(p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
255 list_for_each_entry(pl
, &p
->bpf
.progs
[type
], node
) {
256 if (!prog_list_prog(pl
))
259 item
= &progs
->items
[cnt
];
260 item
->prog
= prog_list_prog(pl
);
261 bpf_cgroup_storages_assign(item
->cgroup_storage
,
265 } while ((p
= cgroup_parent(p
)));
271 static void activate_effective_progs(struct cgroup
*cgrp
,
272 enum bpf_attach_type type
,
273 struct bpf_prog_array
*old_array
)
275 old_array
= rcu_replace_pointer(cgrp
->bpf
.effective
[type
], old_array
,
276 lockdep_is_held(&cgroup_mutex
));
277 /* free prog array after grace period, since __cgroup_bpf_run_*()
278 * might be still walking the array
280 bpf_prog_array_free(old_array
);
284 * cgroup_bpf_inherit() - inherit effective programs from parent
285 * @cgrp: the cgroup to modify
287 int cgroup_bpf_inherit(struct cgroup
*cgrp
)
289 /* has to use marco instead of const int, since compiler thinks
290 * that array below is variable length
292 #define NR ARRAY_SIZE(cgrp->bpf.effective)
293 struct bpf_prog_array
*arrays
[NR
] = {};
297 ret
= percpu_ref_init(&cgrp
->bpf
.refcnt
, cgroup_bpf_release_fn
, 0,
302 for (p
= cgroup_parent(cgrp
); p
; p
= cgroup_parent(p
))
305 for (i
= 0; i
< NR
; i
++)
306 INIT_LIST_HEAD(&cgrp
->bpf
.progs
[i
]);
308 INIT_LIST_HEAD(&cgrp
->bpf
.storages
);
310 for (i
= 0; i
< NR
; i
++)
311 if (compute_effective_progs(cgrp
, i
, &arrays
[i
]))
314 for (i
= 0; i
< NR
; i
++)
315 activate_effective_progs(cgrp
, i
, arrays
[i
]);
319 for (i
= 0; i
< NR
; i
++)
320 bpf_prog_array_free(arrays
[i
]);
322 for (p
= cgroup_parent(cgrp
); p
; p
= cgroup_parent(p
))
325 percpu_ref_exit(&cgrp
->bpf
.refcnt
);
330 static int update_effective_progs(struct cgroup
*cgrp
,
331 enum bpf_attach_type type
)
333 struct cgroup_subsys_state
*css
;
336 /* allocate and recompute effective prog arrays */
337 css_for_each_descendant_pre(css
, &cgrp
->self
) {
338 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
340 if (percpu_ref_is_zero(&desc
->bpf
.refcnt
))
343 err
= compute_effective_progs(desc
, type
, &desc
->bpf
.inactive
);
348 /* all allocations were successful. Activate all prog arrays */
349 css_for_each_descendant_pre(css
, &cgrp
->self
) {
350 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
352 if (percpu_ref_is_zero(&desc
->bpf
.refcnt
)) {
353 if (unlikely(desc
->bpf
.inactive
)) {
354 bpf_prog_array_free(desc
->bpf
.inactive
);
355 desc
->bpf
.inactive
= NULL
;
360 activate_effective_progs(desc
, type
, desc
->bpf
.inactive
);
361 desc
->bpf
.inactive
= NULL
;
367 /* oom while computing effective. Free all computed effective arrays
368 * since they were not activated
370 css_for_each_descendant_pre(css
, &cgrp
->self
) {
371 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
373 bpf_prog_array_free(desc
->bpf
.inactive
);
374 desc
->bpf
.inactive
= NULL
;
380 #define BPF_CGROUP_MAX_PROGS 64
382 static struct bpf_prog_list
*find_attach_entry(struct list_head
*progs
,
383 struct bpf_prog
*prog
,
384 struct bpf_cgroup_link
*link
,
385 struct bpf_prog
*replace_prog
,
388 struct bpf_prog_list
*pl
;
390 /* single-attach case */
392 if (list_empty(progs
))
394 return list_first_entry(progs
, typeof(*pl
), node
);
397 list_for_each_entry(pl
, progs
, node
) {
398 if (prog
&& pl
->prog
== prog
&& prog
!= replace_prog
)
399 /* disallow attaching the same prog twice */
400 return ERR_PTR(-EINVAL
);
401 if (link
&& pl
->link
== link
)
402 /* disallow attaching the same link twice */
403 return ERR_PTR(-EINVAL
);
406 /* direct prog multi-attach w/ replacement case */
408 list_for_each_entry(pl
, progs
, node
) {
409 if (pl
->prog
== replace_prog
)
413 /* prog to replace not found for cgroup */
414 return ERR_PTR(-ENOENT
);
421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
422 * propagate the change to descendants
423 * @cgrp: The cgroup which descendants to traverse
424 * @prog: A program to attach
425 * @link: A link to attach
426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
427 * @type: Type of attach operation
428 * @flags: Option flags
430 * Exactly one of @prog or @link can be non-null.
431 * Must be called with cgroup_mutex held.
433 int __cgroup_bpf_attach(struct cgroup
*cgrp
,
434 struct bpf_prog
*prog
, struct bpf_prog
*replace_prog
,
435 struct bpf_cgroup_link
*link
,
436 enum bpf_attach_type type
, u32 flags
)
438 u32 saved_flags
= (flags
& (BPF_F_ALLOW_OVERRIDE
| BPF_F_ALLOW_MULTI
));
439 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
440 struct bpf_prog
*old_prog
= NULL
;
441 struct bpf_cgroup_storage
*storage
[MAX_BPF_CGROUP_STORAGE_TYPE
] = {};
442 struct bpf_cgroup_storage
*new_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
] = {};
443 struct bpf_prog_list
*pl
;
446 if (((flags
& BPF_F_ALLOW_OVERRIDE
) && (flags
& BPF_F_ALLOW_MULTI
)) ||
447 ((flags
& BPF_F_REPLACE
) && !(flags
& BPF_F_ALLOW_MULTI
)))
448 /* invalid combination */
450 if (link
&& (prog
|| replace_prog
))
451 /* only either link or prog/replace_prog can be specified */
453 if (!!replace_prog
!= !!(flags
& BPF_F_REPLACE
))
454 /* replace_prog implies BPF_F_REPLACE, and vice versa */
457 if (!hierarchy_allows_attach(cgrp
, type
))
460 if (!list_empty(progs
) && cgrp
->bpf
.flags
[type
] != saved_flags
)
461 /* Disallow attaching non-overridable on top
462 * of existing overridable in this cgroup.
463 * Disallow attaching multi-prog if overridable or none
467 if (prog_list_length(progs
) >= BPF_CGROUP_MAX_PROGS
)
470 pl
= find_attach_entry(progs
, prog
, link
, replace_prog
,
471 flags
& BPF_F_ALLOW_MULTI
);
475 if (bpf_cgroup_storages_alloc(storage
, new_storage
, type
,
476 prog
? : link
->link
.prog
, cgrp
))
482 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
484 bpf_cgroup_storages_free(new_storage
);
487 list_add_tail(&pl
->node
, progs
);
492 bpf_cgroup_storages_assign(pl
->storage
, storage
);
493 cgrp
->bpf
.flags
[type
] = saved_flags
;
495 err
= update_effective_progs(cgrp
, type
);
500 bpf_prog_put(old_prog
);
502 static_branch_inc(&cgroup_bpf_enabled_key
);
503 bpf_cgroup_storages_link(new_storage
, cgrp
, type
);
511 bpf_cgroup_storages_free(new_storage
);
519 /* Swap updated BPF program for given link in effective program arrays across
520 * all descendant cgroups. This function is guaranteed to succeed.
522 static void replace_effective_prog(struct cgroup
*cgrp
,
523 enum bpf_attach_type type
,
524 struct bpf_cgroup_link
*link
)
526 struct bpf_prog_array_item
*item
;
527 struct cgroup_subsys_state
*css
;
528 struct bpf_prog_array
*progs
;
529 struct bpf_prog_list
*pl
;
530 struct list_head
*head
;
534 css_for_each_descendant_pre(css
, &cgrp
->self
) {
535 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
537 if (percpu_ref_is_zero(&desc
->bpf
.refcnt
))
540 /* find position of link in effective progs array */
541 for (pos
= 0, cg
= desc
; cg
; cg
= cgroup_parent(cg
)) {
542 if (pos
&& !(cg
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
545 head
= &cg
->bpf
.progs
[type
];
546 list_for_each_entry(pl
, head
, node
) {
547 if (!prog_list_prog(pl
))
549 if (pl
->link
== link
)
556 progs
= rcu_dereference_protected(
557 desc
->bpf
.effective
[type
],
558 lockdep_is_held(&cgroup_mutex
));
559 item
= &progs
->items
[pos
];
560 WRITE_ONCE(item
->prog
, link
->link
.prog
);
565 * __cgroup_bpf_replace() - Replace link's program and propagate the change
567 * @cgrp: The cgroup which descendants to traverse
568 * @link: A link for which to replace BPF program
569 * @type: Type of attach operation
571 * Must be called with cgroup_mutex held.
573 static int __cgroup_bpf_replace(struct cgroup
*cgrp
,
574 struct bpf_cgroup_link
*link
,
575 struct bpf_prog
*new_prog
)
577 struct list_head
*progs
= &cgrp
->bpf
.progs
[link
->type
];
578 struct bpf_prog
*old_prog
;
579 struct bpf_prog_list
*pl
;
582 if (link
->link
.prog
->type
!= new_prog
->type
)
585 list_for_each_entry(pl
, progs
, node
) {
586 if (pl
->link
== link
) {
594 old_prog
= xchg(&link
->link
.prog
, new_prog
);
595 replace_effective_prog(cgrp
, link
->type
, link
);
596 bpf_prog_put(old_prog
);
600 static int cgroup_bpf_replace(struct bpf_link
*link
, struct bpf_prog
*new_prog
,
601 struct bpf_prog
*old_prog
)
603 struct bpf_cgroup_link
*cg_link
;
606 cg_link
= container_of(link
, struct bpf_cgroup_link
, link
);
608 mutex_lock(&cgroup_mutex
);
609 /* link might have been auto-released by dying cgroup, so fail */
610 if (!cg_link
->cgroup
) {
614 if (old_prog
&& link
->prog
!= old_prog
) {
618 ret
= __cgroup_bpf_replace(cg_link
->cgroup
, cg_link
, new_prog
);
620 mutex_unlock(&cgroup_mutex
);
624 static struct bpf_prog_list
*find_detach_entry(struct list_head
*progs
,
625 struct bpf_prog
*prog
,
626 struct bpf_cgroup_link
*link
,
629 struct bpf_prog_list
*pl
;
632 if (list_empty(progs
))
633 /* report error when trying to detach and nothing is attached */
634 return ERR_PTR(-ENOENT
);
636 /* to maintain backward compatibility NONE and OVERRIDE cgroups
637 * allow detaching with invalid FD (prog==NULL) in legacy mode
639 return list_first_entry(progs
, typeof(*pl
), node
);
643 /* to detach MULTI prog the user has to specify valid FD
644 * of the program or link to be detached
646 return ERR_PTR(-EINVAL
);
648 /* find the prog or link and detach it */
649 list_for_each_entry(pl
, progs
, node
) {
650 if (pl
->prog
== prog
&& pl
->link
== link
)
653 return ERR_PTR(-ENOENT
);
657 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
658 * propagate the change to descendants
659 * @cgrp: The cgroup which descendants to traverse
660 * @prog: A program to detach or NULL
661 * @prog: A link to detach or NULL
662 * @type: Type of detach operation
664 * At most one of @prog or @link can be non-NULL.
665 * Must be called with cgroup_mutex held.
667 int __cgroup_bpf_detach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
668 struct bpf_cgroup_link
*link
, enum bpf_attach_type type
)
670 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
671 u32 flags
= cgrp
->bpf
.flags
[type
];
672 struct bpf_prog_list
*pl
;
673 struct bpf_prog
*old_prog
;
677 /* only one of prog or link can be specified */
680 pl
= find_detach_entry(progs
, prog
, link
, flags
& BPF_F_ALLOW_MULTI
);
684 /* mark it deleted, so it's ignored while recomputing effective */
689 err
= update_effective_progs(cgrp
, type
);
693 /* now can actually delete it from this cgroup list */
696 if (list_empty(progs
))
697 /* last program was detached, reset flags to zero */
698 cgrp
->bpf
.flags
[type
] = 0;
700 bpf_prog_put(old_prog
);
701 static_branch_dec(&cgroup_bpf_enabled_key
);
705 /* restore back prog or link */
711 /* Must be called with cgroup_mutex held to avoid races. */
712 int __cgroup_bpf_query(struct cgroup
*cgrp
, const union bpf_attr
*attr
,
713 union bpf_attr __user
*uattr
)
715 __u32 __user
*prog_ids
= u64_to_user_ptr(attr
->query
.prog_ids
);
716 enum bpf_attach_type type
= attr
->query
.attach_type
;
717 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
718 u32 flags
= cgrp
->bpf
.flags
[type
];
719 struct bpf_prog_array
*effective
;
720 struct bpf_prog
*prog
;
723 effective
= rcu_dereference_protected(cgrp
->bpf
.effective
[type
],
724 lockdep_is_held(&cgroup_mutex
));
726 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
)
727 cnt
= bpf_prog_array_length(effective
);
729 cnt
= prog_list_length(progs
);
731 if (copy_to_user(&uattr
->query
.attach_flags
, &flags
, sizeof(flags
)))
733 if (copy_to_user(&uattr
->query
.prog_cnt
, &cnt
, sizeof(cnt
)))
735 if (attr
->query
.prog_cnt
== 0 || !prog_ids
|| !cnt
)
736 /* return early if user requested only program count + flags */
738 if (attr
->query
.prog_cnt
< cnt
) {
739 cnt
= attr
->query
.prog_cnt
;
743 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
) {
744 return bpf_prog_array_copy_to_user(effective
, prog_ids
, cnt
);
746 struct bpf_prog_list
*pl
;
750 list_for_each_entry(pl
, progs
, node
) {
751 prog
= prog_list_prog(pl
);
753 if (copy_to_user(prog_ids
+ i
, &id
, sizeof(id
)))
762 int cgroup_bpf_prog_attach(const union bpf_attr
*attr
,
763 enum bpf_prog_type ptype
, struct bpf_prog
*prog
)
765 struct bpf_prog
*replace_prog
= NULL
;
769 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
771 return PTR_ERR(cgrp
);
773 if ((attr
->attach_flags
& BPF_F_ALLOW_MULTI
) &&
774 (attr
->attach_flags
& BPF_F_REPLACE
)) {
775 replace_prog
= bpf_prog_get_type(attr
->replace_bpf_fd
, ptype
);
776 if (IS_ERR(replace_prog
)) {
778 return PTR_ERR(replace_prog
);
782 ret
= cgroup_bpf_attach(cgrp
, prog
, replace_prog
, NULL
,
783 attr
->attach_type
, attr
->attach_flags
);
786 bpf_prog_put(replace_prog
);
791 int cgroup_bpf_prog_detach(const union bpf_attr
*attr
, enum bpf_prog_type ptype
)
793 struct bpf_prog
*prog
;
797 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
799 return PTR_ERR(cgrp
);
801 prog
= bpf_prog_get_type(attr
->attach_bpf_fd
, ptype
);
805 ret
= cgroup_bpf_detach(cgrp
, prog
, attr
->attach_type
);
813 static void bpf_cgroup_link_release(struct bpf_link
*link
)
815 struct bpf_cgroup_link
*cg_link
=
816 container_of(link
, struct bpf_cgroup_link
, link
);
819 /* link might have been auto-detached by dying cgroup already,
820 * in that case our work is done here
822 if (!cg_link
->cgroup
)
825 mutex_lock(&cgroup_mutex
);
827 /* re-check cgroup under lock again */
828 if (!cg_link
->cgroup
) {
829 mutex_unlock(&cgroup_mutex
);
833 WARN_ON(__cgroup_bpf_detach(cg_link
->cgroup
, NULL
, cg_link
,
836 cg
= cg_link
->cgroup
;
837 cg_link
->cgroup
= NULL
;
839 mutex_unlock(&cgroup_mutex
);
844 static void bpf_cgroup_link_dealloc(struct bpf_link
*link
)
846 struct bpf_cgroup_link
*cg_link
=
847 container_of(link
, struct bpf_cgroup_link
, link
);
852 static int bpf_cgroup_link_detach(struct bpf_link
*link
)
854 bpf_cgroup_link_release(link
);
859 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link
*link
,
860 struct seq_file
*seq
)
862 struct bpf_cgroup_link
*cg_link
=
863 container_of(link
, struct bpf_cgroup_link
, link
);
866 mutex_lock(&cgroup_mutex
);
868 cg_id
= cgroup_id(cg_link
->cgroup
);
869 mutex_unlock(&cgroup_mutex
);
873 "attach_type:\t%d\n",
878 static int bpf_cgroup_link_fill_link_info(const struct bpf_link
*link
,
879 struct bpf_link_info
*info
)
881 struct bpf_cgroup_link
*cg_link
=
882 container_of(link
, struct bpf_cgroup_link
, link
);
885 mutex_lock(&cgroup_mutex
);
887 cg_id
= cgroup_id(cg_link
->cgroup
);
888 mutex_unlock(&cgroup_mutex
);
890 info
->cgroup
.cgroup_id
= cg_id
;
891 info
->cgroup
.attach_type
= cg_link
->type
;
895 static const struct bpf_link_ops bpf_cgroup_link_lops
= {
896 .release
= bpf_cgroup_link_release
,
897 .dealloc
= bpf_cgroup_link_dealloc
,
898 .detach
= bpf_cgroup_link_detach
,
899 .update_prog
= cgroup_bpf_replace
,
900 .show_fdinfo
= bpf_cgroup_link_show_fdinfo
,
901 .fill_link_info
= bpf_cgroup_link_fill_link_info
,
904 int cgroup_bpf_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
906 struct bpf_link_primer link_primer
;
907 struct bpf_cgroup_link
*link
;
911 if (attr
->link_create
.flags
)
914 cgrp
= cgroup_get_from_fd(attr
->link_create
.target_fd
);
916 return PTR_ERR(cgrp
);
918 link
= kzalloc(sizeof(*link
), GFP_USER
);
923 bpf_link_init(&link
->link
, BPF_LINK_TYPE_CGROUP
, &bpf_cgroup_link_lops
,
926 link
->type
= attr
->link_create
.attach_type
;
928 err
= bpf_link_prime(&link
->link
, &link_primer
);
934 err
= cgroup_bpf_attach(cgrp
, NULL
, NULL
, link
, link
->type
,
937 bpf_link_cleanup(&link_primer
);
941 return bpf_link_settle(&link_primer
);
948 int cgroup_bpf_prog_query(const union bpf_attr
*attr
,
949 union bpf_attr __user
*uattr
)
954 cgrp
= cgroup_get_from_fd(attr
->query
.target_fd
);
956 return PTR_ERR(cgrp
);
958 ret
= cgroup_bpf_query(cgrp
, attr
, uattr
);
965 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
966 * @sk: The socket sending or receiving traffic
967 * @skb: The skb that is being sent or received
968 * @type: The type of program to be exectuted
970 * If no socket is passed, or the socket is not of type INET or INET6,
971 * this function does nothing and returns 0.
973 * The program type passed in via @type must be suitable for network
974 * filtering. No further check is performed to assert that.
976 * For egress packets, this function can return:
977 * NET_XMIT_SUCCESS (0) - continue with packet output
978 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
979 * NET_XMIT_CN (2) - continue with packet output and notify TCP
981 * -EPERM - drop packet
983 * For ingress packets, this function will return -EPERM if any
984 * attached program was found and if it returned != 1 during execution.
985 * Otherwise 0 is returned.
987 int __cgroup_bpf_run_filter_skb(struct sock
*sk
,
989 enum bpf_attach_type type
)
991 unsigned int offset
= skb
->data
- skb_network_header(skb
);
992 struct sock
*save_sk
;
993 void *saved_data_end
;
997 if (!sk
|| !sk_fullsock(sk
))
1000 if (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)
1003 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
1006 __skb_push(skb
, offset
);
1008 /* compute pointers for the bpf prog */
1009 bpf_compute_and_save_data_end(skb
, &saved_data_end
);
1011 if (type
== BPF_CGROUP_INET_EGRESS
) {
1012 ret
= BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
1013 cgrp
->bpf
.effective
[type
], skb
, __bpf_prog_run_save_cb
);
1015 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], skb
,
1016 __bpf_prog_run_save_cb
);
1017 ret
= (ret
== 1 ? 0 : -EPERM
);
1019 bpf_restore_data_end(skb
, saved_data_end
);
1020 __skb_pull(skb
, offset
);
1025 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb
);
1028 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1029 * @sk: sock structure to manipulate
1030 * @type: The type of program to be exectuted
1032 * socket is passed is expected to be of type INET or INET6.
1034 * The program type passed in via @type must be suitable for sock
1035 * filtering. No further check is performed to assert that.
1037 * This function will return %-EPERM if any if an attached program was found
1038 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1040 int __cgroup_bpf_run_filter_sk(struct sock
*sk
,
1041 enum bpf_attach_type type
)
1043 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
1046 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sk
, BPF_PROG_RUN
);
1047 return ret
== 1 ? 0 : -EPERM
;
1049 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk
);
1052 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1053 * provided by user sockaddr
1054 * @sk: sock struct that will use sockaddr
1055 * @uaddr: sockaddr struct provided by user
1056 * @type: The type of program to be exectuted
1057 * @t_ctx: Pointer to attach type specific context
1059 * socket is expected to be of type INET or INET6.
1061 * This function will return %-EPERM if an attached program is found and
1062 * returned value != 1 during execution. In all other cases, 0 is returned.
1064 int __cgroup_bpf_run_filter_sock_addr(struct sock
*sk
,
1065 struct sockaddr
*uaddr
,
1066 enum bpf_attach_type type
,
1069 struct bpf_sock_addr_kern ctx
= {
1074 struct sockaddr_storage unspec
;
1075 struct cgroup
*cgrp
;
1078 /* Check socket family since not all sockets represent network
1079 * endpoint (e.g. AF_UNIX).
1081 if (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)
1085 memset(&unspec
, 0, sizeof(unspec
));
1086 ctx
.uaddr
= (struct sockaddr
*)&unspec
;
1089 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
1090 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
, BPF_PROG_RUN
);
1092 return ret
== 1 ? 0 : -EPERM
;
1094 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr
);
1097 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1098 * @sk: socket to get cgroup from
1099 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1100 * sk with connection information (IP addresses, etc.) May not contain
1101 * cgroup info if it is a req sock.
1102 * @type: The type of program to be exectuted
1104 * socket passed is expected to be of type INET or INET6.
1106 * The program type passed in via @type must be suitable for sock_ops
1107 * filtering. No further check is performed to assert that.
1109 * This function will return %-EPERM if any if an attached program was found
1110 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1112 int __cgroup_bpf_run_filter_sock_ops(struct sock
*sk
,
1113 struct bpf_sock_ops_kern
*sock_ops
,
1114 enum bpf_attach_type type
)
1116 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
1119 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sock_ops
,
1121 return ret
== 1 ? 0 : -EPERM
;
1123 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops
);
1125 int __cgroup_bpf_check_dev_permission(short dev_type
, u32 major
, u32 minor
,
1126 short access
, enum bpf_attach_type type
)
1128 struct cgroup
*cgrp
;
1129 struct bpf_cgroup_dev_ctx ctx
= {
1130 .access_type
= (access
<< 16) | dev_type
,
1137 cgrp
= task_dfl_cgroup(current
);
1138 allow
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
,
1145 static const struct bpf_func_proto
*
1146 cgroup_base_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1149 case BPF_FUNC_get_current_uid_gid
:
1150 return &bpf_get_current_uid_gid_proto
;
1151 case BPF_FUNC_get_local_storage
:
1152 return &bpf_get_local_storage_proto
;
1153 case BPF_FUNC_get_current_cgroup_id
:
1154 return &bpf_get_current_cgroup_id_proto
;
1155 case BPF_FUNC_perf_event_output
:
1156 return &bpf_event_output_data_proto
;
1158 return bpf_base_func_proto(func_id
);
1162 static const struct bpf_func_proto
*
1163 cgroup_dev_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1165 return cgroup_base_func_proto(func_id
, prog
);
1168 static bool cgroup_dev_is_valid_access(int off
, int size
,
1169 enum bpf_access_type type
,
1170 const struct bpf_prog
*prog
,
1171 struct bpf_insn_access_aux
*info
)
1173 const int size_default
= sizeof(__u32
);
1175 if (type
== BPF_WRITE
)
1178 if (off
< 0 || off
+ size
> sizeof(struct bpf_cgroup_dev_ctx
))
1180 /* The verifier guarantees that size > 0. */
1181 if (off
% size
!= 0)
1185 case bpf_ctx_range(struct bpf_cgroup_dev_ctx
, access_type
):
1186 bpf_ctx_record_field_size(info
, size_default
);
1187 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
1191 if (size
!= size_default
)
1198 const struct bpf_prog_ops cg_dev_prog_ops
= {
1201 const struct bpf_verifier_ops cg_dev_verifier_ops
= {
1202 .get_func_proto
= cgroup_dev_func_proto
,
1203 .is_valid_access
= cgroup_dev_is_valid_access
,
1207 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1209 * @head: sysctl table header
1210 * @table: sysctl table
1211 * @write: sysctl is being read (= 0) or written (= 1)
1212 * @buf: pointer to buffer (in and out)
1213 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1214 * result is size of @new_buf if program set new value, initial value
1216 * @ppos: value-result argument: value is position at which read from or write
1217 * to sysctl is happening, result is new position if program overrode it,
1218 * initial value otherwise
1219 * @type: type of program to be executed
1221 * Program is run when sysctl is being accessed, either read or written, and
1222 * can allow or deny such access.
1224 * This function will return %-EPERM if an attached program is found and
1225 * returned value != 1 during execution. In all other cases 0 is returned.
1227 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header
*head
,
1228 struct ctl_table
*table
, int write
,
1229 char **buf
, size_t *pcount
, loff_t
*ppos
,
1230 enum bpf_attach_type type
)
1232 struct bpf_sysctl_kern ctx
= {
1238 .cur_len
= PAGE_SIZE
,
1243 struct cgroup
*cgrp
;
1247 ctx
.cur_val
= kmalloc_track_caller(ctx
.cur_len
, GFP_KERNEL
);
1249 table
->proc_handler(table
, 0, ctx
.cur_val
, &ctx
.cur_len
, &pos
)) {
1250 /* Let BPF program decide how to proceed. */
1254 if (write
&& *buf
&& *pcount
) {
1255 /* BPF program should be able to override new value with a
1256 * buffer bigger than provided by user.
1258 ctx
.new_val
= kmalloc_track_caller(PAGE_SIZE
, GFP_KERNEL
);
1259 ctx
.new_len
= min_t(size_t, PAGE_SIZE
, *pcount
);
1261 memcpy(ctx
.new_val
, *buf
, ctx
.new_len
);
1263 /* Let BPF program decide how to proceed. */
1269 cgrp
= task_dfl_cgroup(current
);
1270 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
, BPF_PROG_RUN
);
1275 if (ret
== 1 && ctx
.new_updated
) {
1278 *pcount
= ctx
.new_len
;
1283 return ret
== 1 ? 0 : -EPERM
;
1287 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup
*cgrp
,
1288 enum bpf_attach_type attach_type
)
1290 struct bpf_prog_array
*prog_array
;
1294 prog_array
= rcu_dereference(cgrp
->bpf
.effective
[attach_type
]);
1295 empty
= bpf_prog_array_is_empty(prog_array
);
1301 static int sockopt_alloc_buf(struct bpf_sockopt_kern
*ctx
, int max_optlen
)
1303 if (unlikely(max_optlen
< 0))
1306 if (unlikely(max_optlen
> PAGE_SIZE
)) {
1307 /* We don't expose optvals that are greater than PAGE_SIZE
1308 * to the BPF program.
1310 max_optlen
= PAGE_SIZE
;
1313 ctx
->optval
= kzalloc(max_optlen
, GFP_USER
);
1317 ctx
->optval_end
= ctx
->optval
+ max_optlen
;
1322 static void sockopt_free_buf(struct bpf_sockopt_kern
*ctx
)
1327 int __cgroup_bpf_run_filter_setsockopt(struct sock
*sk
, int *level
,
1328 int *optname
, char __user
*optval
,
1329 int *optlen
, char **kernel_optval
)
1331 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
1332 struct bpf_sockopt_kern ctx
= {
1335 .optname
= *optname
,
1337 int ret
, max_optlen
;
1339 /* Opportunistic check to see whether we have any BPF program
1340 * attached to the hook so we don't waste time allocating
1341 * memory and locking the socket.
1343 if (!cgroup_bpf_enabled
||
1344 __cgroup_bpf_prog_array_is_empty(cgrp
, BPF_CGROUP_SETSOCKOPT
))
1347 /* Allocate a bit more than the initial user buffer for
1348 * BPF program. The canonical use case is overriding
1349 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1351 max_optlen
= max_t(int, 16, *optlen
);
1353 max_optlen
= sockopt_alloc_buf(&ctx
, max_optlen
);
1357 ctx
.optlen
= *optlen
;
1359 if (copy_from_user(ctx
.optval
, optval
, min(*optlen
, max_optlen
)) != 0) {
1365 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[BPF_CGROUP_SETSOCKOPT
],
1366 &ctx
, BPF_PROG_RUN
);
1374 if (ctx
.optlen
== -1) {
1375 /* optlen set to -1, bypass kernel */
1377 } else if (ctx
.optlen
> max_optlen
|| ctx
.optlen
< -1) {
1378 /* optlen is out of bounds */
1381 /* optlen within bounds, run kernel handler */
1384 /* export any potential modifications */
1386 *optname
= ctx
.optname
;
1388 /* optlen == 0 from BPF indicates that we should
1389 * use original userspace data.
1391 if (ctx
.optlen
!= 0) {
1392 *optlen
= ctx
.optlen
;
1393 *kernel_optval
= ctx
.optval
;
1399 sockopt_free_buf(&ctx
);
1403 int __cgroup_bpf_run_filter_getsockopt(struct sock
*sk
, int level
,
1404 int optname
, char __user
*optval
,
1405 int __user
*optlen
, int max_optlen
,
1408 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
1409 struct bpf_sockopt_kern ctx
= {
1417 /* Opportunistic check to see whether we have any BPF program
1418 * attached to the hook so we don't waste time allocating
1419 * memory and locking the socket.
1421 if (!cgroup_bpf_enabled
||
1422 __cgroup_bpf_prog_array_is_empty(cgrp
, BPF_CGROUP_GETSOCKOPT
))
1425 ctx
.optlen
= max_optlen
;
1427 max_optlen
= sockopt_alloc_buf(&ctx
, max_optlen
);
1432 /* If kernel getsockopt finished successfully,
1433 * copy whatever was returned to the user back
1434 * into our temporary buffer. Set optlen to the
1435 * one that kernel returned as well to let
1436 * BPF programs inspect the value.
1439 if (get_user(ctx
.optlen
, optlen
)) {
1444 if (copy_from_user(ctx
.optval
, optval
,
1445 min(ctx
.optlen
, max_optlen
)) != 0) {
1452 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[BPF_CGROUP_GETSOCKOPT
],
1453 &ctx
, BPF_PROG_RUN
);
1461 if (ctx
.optlen
> max_optlen
) {
1466 /* BPF programs only allowed to set retval to 0, not some
1469 if (ctx
.retval
!= 0 && ctx
.retval
!= retval
) {
1474 if (ctx
.optlen
!= 0) {
1475 if (copy_to_user(optval
, ctx
.optval
, ctx
.optlen
) ||
1476 put_user(ctx
.optlen
, optlen
)) {
1485 sockopt_free_buf(&ctx
);
1490 static ssize_t
sysctl_cpy_dir(const struct ctl_dir
*dir
, char **bufp
,
1493 ssize_t tmp_ret
= 0, ret
;
1495 if (dir
->header
.parent
) {
1496 tmp_ret
= sysctl_cpy_dir(dir
->header
.parent
, bufp
, lenp
);
1501 ret
= strscpy(*bufp
, dir
->header
.ctl_table
[0].procname
, *lenp
);
1508 /* Avoid leading slash. */
1512 tmp_ret
= strscpy(*bufp
, "/", *lenp
);
1518 return ret
+ tmp_ret
;
1521 BPF_CALL_4(bpf_sysctl_get_name
, struct bpf_sysctl_kern
*, ctx
, char *, buf
,
1522 size_t, buf_len
, u64
, flags
)
1524 ssize_t tmp_ret
= 0, ret
;
1529 if (!(flags
& BPF_F_SYSCTL_BASE_NAME
)) {
1532 tmp_ret
= sysctl_cpy_dir(ctx
->head
->parent
, &buf
, &buf_len
);
1537 ret
= strscpy(buf
, ctx
->table
->procname
, buf_len
);
1539 return ret
< 0 ? ret
: tmp_ret
+ ret
;
1542 static const struct bpf_func_proto bpf_sysctl_get_name_proto
= {
1543 .func
= bpf_sysctl_get_name
,
1545 .ret_type
= RET_INTEGER
,
1546 .arg1_type
= ARG_PTR_TO_CTX
,
1547 .arg2_type
= ARG_PTR_TO_MEM
,
1548 .arg3_type
= ARG_CONST_SIZE
,
1549 .arg4_type
= ARG_ANYTHING
,
1552 static int copy_sysctl_value(char *dst
, size_t dst_len
, char *src
,
1561 if (!src
|| !src_len
) {
1562 memset(dst
, 0, dst_len
);
1566 memcpy(dst
, src
, min(dst_len
, src_len
));
1568 if (dst_len
> src_len
) {
1569 memset(dst
+ src_len
, '\0', dst_len
- src_len
);
1573 dst
[dst_len
- 1] = '\0';
1578 BPF_CALL_3(bpf_sysctl_get_current_value
, struct bpf_sysctl_kern
*, ctx
,
1579 char *, buf
, size_t, buf_len
)
1581 return copy_sysctl_value(buf
, buf_len
, ctx
->cur_val
, ctx
->cur_len
);
1584 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto
= {
1585 .func
= bpf_sysctl_get_current_value
,
1587 .ret_type
= RET_INTEGER
,
1588 .arg1_type
= ARG_PTR_TO_CTX
,
1589 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
1590 .arg3_type
= ARG_CONST_SIZE
,
1593 BPF_CALL_3(bpf_sysctl_get_new_value
, struct bpf_sysctl_kern
*, ctx
, char *, buf
,
1598 memset(buf
, '\0', buf_len
);
1601 return copy_sysctl_value(buf
, buf_len
, ctx
->new_val
, ctx
->new_len
);
1604 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto
= {
1605 .func
= bpf_sysctl_get_new_value
,
1607 .ret_type
= RET_INTEGER
,
1608 .arg1_type
= ARG_PTR_TO_CTX
,
1609 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
1610 .arg3_type
= ARG_CONST_SIZE
,
1613 BPF_CALL_3(bpf_sysctl_set_new_value
, struct bpf_sysctl_kern
*, ctx
,
1614 const char *, buf
, size_t, buf_len
)
1616 if (!ctx
->write
|| !ctx
->new_val
|| !ctx
->new_len
|| !buf
|| !buf_len
)
1619 if (buf_len
> PAGE_SIZE
- 1)
1622 memcpy(ctx
->new_val
, buf
, buf_len
);
1623 ctx
->new_len
= buf_len
;
1624 ctx
->new_updated
= 1;
1629 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto
= {
1630 .func
= bpf_sysctl_set_new_value
,
1632 .ret_type
= RET_INTEGER
,
1633 .arg1_type
= ARG_PTR_TO_CTX
,
1634 .arg2_type
= ARG_PTR_TO_MEM
,
1635 .arg3_type
= ARG_CONST_SIZE
,
1638 static const struct bpf_func_proto
*
1639 sysctl_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1642 case BPF_FUNC_strtol
:
1643 return &bpf_strtol_proto
;
1644 case BPF_FUNC_strtoul
:
1645 return &bpf_strtoul_proto
;
1646 case BPF_FUNC_sysctl_get_name
:
1647 return &bpf_sysctl_get_name_proto
;
1648 case BPF_FUNC_sysctl_get_current_value
:
1649 return &bpf_sysctl_get_current_value_proto
;
1650 case BPF_FUNC_sysctl_get_new_value
:
1651 return &bpf_sysctl_get_new_value_proto
;
1652 case BPF_FUNC_sysctl_set_new_value
:
1653 return &bpf_sysctl_set_new_value_proto
;
1655 return cgroup_base_func_proto(func_id
, prog
);
1659 static bool sysctl_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1660 const struct bpf_prog
*prog
,
1661 struct bpf_insn_access_aux
*info
)
1663 const int size_default
= sizeof(__u32
);
1665 if (off
< 0 || off
+ size
> sizeof(struct bpf_sysctl
) || off
% size
)
1669 case bpf_ctx_range(struct bpf_sysctl
, write
):
1670 if (type
!= BPF_READ
)
1672 bpf_ctx_record_field_size(info
, size_default
);
1673 return bpf_ctx_narrow_access_ok(off
, size
, size_default
);
1674 case bpf_ctx_range(struct bpf_sysctl
, file_pos
):
1675 if (type
== BPF_READ
) {
1676 bpf_ctx_record_field_size(info
, size_default
);
1677 return bpf_ctx_narrow_access_ok(off
, size
, size_default
);
1679 return size
== size_default
;
1686 static u32
sysctl_convert_ctx_access(enum bpf_access_type type
,
1687 const struct bpf_insn
*si
,
1688 struct bpf_insn
*insn_buf
,
1689 struct bpf_prog
*prog
, u32
*target_size
)
1691 struct bpf_insn
*insn
= insn_buf
;
1695 case offsetof(struct bpf_sysctl
, write
):
1696 *insn
++ = BPF_LDX_MEM(
1697 BPF_SIZE(si
->code
), si
->dst_reg
, si
->src_reg
,
1698 bpf_target_off(struct bpf_sysctl_kern
, write
,
1699 sizeof_field(struct bpf_sysctl_kern
,
1703 case offsetof(struct bpf_sysctl
, file_pos
):
1704 /* ppos is a pointer so it should be accessed via indirect
1705 * loads and stores. Also for stores additional temporary
1706 * register is used since neither src_reg nor dst_reg can be
1709 if (type
== BPF_WRITE
) {
1710 int treg
= BPF_REG_9
;
1712 if (si
->src_reg
== treg
|| si
->dst_reg
== treg
)
1714 if (si
->src_reg
== treg
|| si
->dst_reg
== treg
)
1716 *insn
++ = BPF_STX_MEM(
1717 BPF_DW
, si
->dst_reg
, treg
,
1718 offsetof(struct bpf_sysctl_kern
, tmp_reg
));
1719 *insn
++ = BPF_LDX_MEM(
1720 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern
, ppos
),
1722 offsetof(struct bpf_sysctl_kern
, ppos
));
1723 *insn
++ = BPF_STX_MEM(
1724 BPF_SIZEOF(u32
), treg
, si
->src_reg
,
1725 bpf_ctx_narrow_access_offset(
1726 0, sizeof(u32
), sizeof(loff_t
)));
1727 *insn
++ = BPF_LDX_MEM(
1728 BPF_DW
, treg
, si
->dst_reg
,
1729 offsetof(struct bpf_sysctl_kern
, tmp_reg
));
1731 *insn
++ = BPF_LDX_MEM(
1732 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern
, ppos
),
1733 si
->dst_reg
, si
->src_reg
,
1734 offsetof(struct bpf_sysctl_kern
, ppos
));
1735 read_size
= bpf_size_to_bytes(BPF_SIZE(si
->code
));
1736 *insn
++ = BPF_LDX_MEM(
1737 BPF_SIZE(si
->code
), si
->dst_reg
, si
->dst_reg
,
1738 bpf_ctx_narrow_access_offset(
1739 0, read_size
, sizeof(loff_t
)));
1741 *target_size
= sizeof(u32
);
1745 return insn
- insn_buf
;
1748 const struct bpf_verifier_ops cg_sysctl_verifier_ops
= {
1749 .get_func_proto
= sysctl_func_proto
,
1750 .is_valid_access
= sysctl_is_valid_access
,
1751 .convert_ctx_access
= sysctl_convert_ctx_access
,
1754 const struct bpf_prog_ops cg_sysctl_prog_ops
= {
1757 static const struct bpf_func_proto
*
1758 cg_sockopt_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1762 case BPF_FUNC_sk_storage_get
:
1763 return &bpf_sk_storage_get_proto
;
1764 case BPF_FUNC_sk_storage_delete
:
1765 return &bpf_sk_storage_delete_proto
;
1768 case BPF_FUNC_tcp_sock
:
1769 return &bpf_tcp_sock_proto
;
1772 return cgroup_base_func_proto(func_id
, prog
);
1776 static bool cg_sockopt_is_valid_access(int off
, int size
,
1777 enum bpf_access_type type
,
1778 const struct bpf_prog
*prog
,
1779 struct bpf_insn_access_aux
*info
)
1781 const int size_default
= sizeof(__u32
);
1783 if (off
< 0 || off
>= sizeof(struct bpf_sockopt
))
1786 if (off
% size
!= 0)
1789 if (type
== BPF_WRITE
) {
1791 case offsetof(struct bpf_sockopt
, retval
):
1792 if (size
!= size_default
)
1794 return prog
->expected_attach_type
==
1795 BPF_CGROUP_GETSOCKOPT
;
1796 case offsetof(struct bpf_sockopt
, optname
):
1798 case offsetof(struct bpf_sockopt
, level
):
1799 if (size
!= size_default
)
1801 return prog
->expected_attach_type
==
1802 BPF_CGROUP_SETSOCKOPT
;
1803 case offsetof(struct bpf_sockopt
, optlen
):
1804 return size
== size_default
;
1811 case offsetof(struct bpf_sockopt
, sk
):
1812 if (size
!= sizeof(__u64
))
1814 info
->reg_type
= PTR_TO_SOCKET
;
1816 case offsetof(struct bpf_sockopt
, optval
):
1817 if (size
!= sizeof(__u64
))
1819 info
->reg_type
= PTR_TO_PACKET
;
1821 case offsetof(struct bpf_sockopt
, optval_end
):
1822 if (size
!= sizeof(__u64
))
1824 info
->reg_type
= PTR_TO_PACKET_END
;
1826 case offsetof(struct bpf_sockopt
, retval
):
1827 if (size
!= size_default
)
1829 return prog
->expected_attach_type
== BPF_CGROUP_GETSOCKOPT
;
1831 if (size
!= size_default
)
1838 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
1839 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
1840 si->dst_reg, si->src_reg, \
1841 offsetof(struct bpf_sockopt_kern, F))
1843 static u32
cg_sockopt_convert_ctx_access(enum bpf_access_type type
,
1844 const struct bpf_insn
*si
,
1845 struct bpf_insn
*insn_buf
,
1846 struct bpf_prog
*prog
,
1849 struct bpf_insn
*insn
= insn_buf
;
1852 case offsetof(struct bpf_sockopt
, sk
):
1853 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, sk
);
1855 case offsetof(struct bpf_sockopt
, level
):
1856 if (type
== BPF_WRITE
)
1857 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM
, level
);
1859 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, level
);
1861 case offsetof(struct bpf_sockopt
, optname
):
1862 if (type
== BPF_WRITE
)
1863 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM
, optname
);
1865 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, optname
);
1867 case offsetof(struct bpf_sockopt
, optlen
):
1868 if (type
== BPF_WRITE
)
1869 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM
, optlen
);
1871 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, optlen
);
1873 case offsetof(struct bpf_sockopt
, retval
):
1874 if (type
== BPF_WRITE
)
1875 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM
, retval
);
1877 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, retval
);
1879 case offsetof(struct bpf_sockopt
, optval
):
1880 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, optval
);
1882 case offsetof(struct bpf_sockopt
, optval_end
):
1883 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, optval_end
);
1887 return insn
- insn_buf
;
1890 static int cg_sockopt_get_prologue(struct bpf_insn
*insn_buf
,
1892 const struct bpf_prog
*prog
)
1894 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
1899 const struct bpf_verifier_ops cg_sockopt_verifier_ops
= {
1900 .get_func_proto
= cg_sockopt_func_proto
,
1901 .is_valid_access
= cg_sockopt_is_valid_access
,
1902 .convert_ctx_access
= cg_sockopt_convert_ctx_access
,
1903 .gen_prologue
= cg_sockopt_get_prologue
,
1906 const struct bpf_prog_ops cg_sockopt_prog_ops
= {