2 * Functions to manage eBPF programs attached to cgroups
4 * Copyright (c) 2016 Daniel Mack
6 * This file is subject to the terms and conditions of version 2 of the GNU
7 * General Public License. See the file COPYING in the main directory of the
8 * Linux distribution for more details.
11 #include <linux/kernel.h>
12 #include <linux/atomic.h>
13 #include <linux/cgroup.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
19 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key
);
20 EXPORT_SYMBOL(cgroup_bpf_enabled_key
);
23 * cgroup_bpf_put() - put references of all bpf programs
24 * @cgrp: the cgroup to modify
26 void cgroup_bpf_put(struct cgroup
*cgrp
)
30 for (type
= 0; type
< ARRAY_SIZE(cgrp
->bpf
.progs
); type
++) {
31 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
32 struct bpf_prog_list
*pl
, *tmp
;
34 list_for_each_entry_safe(pl
, tmp
, progs
, node
) {
36 bpf_prog_put(pl
->prog
);
38 static_branch_dec(&cgroup_bpf_enabled_key
);
40 bpf_prog_array_free(cgrp
->bpf
.effective
[type
]);
44 /* count number of elements in the list.
45 * it's slow but the list cannot be long
47 static u32
prog_list_length(struct list_head
*head
)
49 struct bpf_prog_list
*pl
;
52 list_for_each_entry(pl
, head
, node
) {
60 /* if parent has non-overridable prog attached,
61 * disallow attaching new programs to the descendent cgroup.
62 * if parent has overridable or multi-prog, allow attaching
64 static bool hierarchy_allows_attach(struct cgroup
*cgrp
,
65 enum bpf_attach_type type
,
70 p
= cgroup_parent(cgrp
);
74 u32 flags
= p
->bpf
.flags
[type
];
77 if (flags
& BPF_F_ALLOW_MULTI
)
79 cnt
= prog_list_length(&p
->bpf
.progs
[type
]);
80 WARN_ON_ONCE(cnt
> 1);
82 return !!(flags
& BPF_F_ALLOW_OVERRIDE
);
88 /* compute a chain of effective programs for a given cgroup:
89 * start from the list of programs in this cgroup and add
90 * all parent programs.
91 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
92 * to programs in this cgroup
94 static int compute_effective_progs(struct cgroup
*cgrp
,
95 enum bpf_attach_type type
,
96 struct bpf_prog_array __rcu
**array
)
98 struct bpf_prog_array __rcu
*progs
;
99 struct bpf_prog_list
*pl
;
100 struct cgroup
*p
= cgrp
;
103 /* count number of effective programs by walking parents */
105 if (cnt
== 0 || (p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
106 cnt
+= prog_list_length(&p
->bpf
.progs
[type
]);
107 p
= cgroup_parent(p
);
110 progs
= bpf_prog_array_alloc(cnt
, GFP_KERNEL
);
114 /* populate the array with effective progs */
118 if (cnt
== 0 || (p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
119 list_for_each_entry(pl
,
120 &p
->bpf
.progs
[type
], node
) {
123 rcu_dereference_protected(progs
, 1)->
124 progs
[cnt
++] = pl
->prog
;
126 p
= cgroup_parent(p
);
133 static void activate_effective_progs(struct cgroup
*cgrp
,
134 enum bpf_attach_type type
,
135 struct bpf_prog_array __rcu
*array
)
137 struct bpf_prog_array __rcu
*old_array
;
139 old_array
= xchg(&cgrp
->bpf
.effective
[type
], array
);
140 /* free prog array after grace period, since __cgroup_bpf_run_*()
141 * might be still walking the array
143 bpf_prog_array_free(old_array
);
147 * cgroup_bpf_inherit() - inherit effective programs from parent
148 * @cgrp: the cgroup to modify
150 int cgroup_bpf_inherit(struct cgroup
*cgrp
)
152 /* has to use marco instead of const int, since compiler thinks
153 * that array below is variable length
155 #define NR ARRAY_SIZE(cgrp->bpf.effective)
156 struct bpf_prog_array __rcu
*arrays
[NR
] = {};
159 for (i
= 0; i
< NR
; i
++)
160 INIT_LIST_HEAD(&cgrp
->bpf
.progs
[i
]);
162 for (i
= 0; i
< NR
; i
++)
163 if (compute_effective_progs(cgrp
, i
, &arrays
[i
]))
166 for (i
= 0; i
< NR
; i
++)
167 activate_effective_progs(cgrp
, i
, arrays
[i
]);
171 for (i
= 0; i
< NR
; i
++)
172 bpf_prog_array_free(arrays
[i
]);
176 #define BPF_CGROUP_MAX_PROGS 64
179 * __cgroup_bpf_attach() - Attach the program to a cgroup, and
180 * propagate the change to descendants
181 * @cgrp: The cgroup which descendants to traverse
182 * @prog: A program to attach
183 * @type: Type of attach operation
185 * Must be called with cgroup_mutex held.
187 int __cgroup_bpf_attach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
188 enum bpf_attach_type type
, u32 flags
)
190 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
191 struct bpf_prog
*old_prog
= NULL
;
192 struct cgroup_subsys_state
*css
;
193 struct bpf_prog_list
*pl
;
194 bool pl_was_allocated
;
197 if ((flags
& BPF_F_ALLOW_OVERRIDE
) && (flags
& BPF_F_ALLOW_MULTI
))
198 /* invalid combination */
201 if (!hierarchy_allows_attach(cgrp
, type
, flags
))
204 if (!list_empty(progs
) && cgrp
->bpf
.flags
[type
] != flags
)
205 /* Disallow attaching non-overridable on top
206 * of existing overridable in this cgroup.
207 * Disallow attaching multi-prog if overridable or none
211 if (prog_list_length(progs
) >= BPF_CGROUP_MAX_PROGS
)
214 if (flags
& BPF_F_ALLOW_MULTI
) {
215 list_for_each_entry(pl
, progs
, node
)
216 if (pl
->prog
== prog
)
217 /* disallow attaching the same prog twice */
220 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
223 pl_was_allocated
= true;
225 list_add_tail(&pl
->node
, progs
);
227 if (list_empty(progs
)) {
228 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
231 pl_was_allocated
= true;
232 list_add_tail(&pl
->node
, progs
);
234 pl
= list_first_entry(progs
, typeof(*pl
), node
);
236 pl_was_allocated
= false;
241 cgrp
->bpf
.flags
[type
] = flags
;
243 /* allocate and recompute effective prog arrays */
244 css_for_each_descendant_pre(css
, &cgrp
->self
) {
245 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
247 err
= compute_effective_progs(desc
, type
, &desc
->bpf
.inactive
);
252 /* all allocations were successful. Activate all prog arrays */
253 css_for_each_descendant_pre(css
, &cgrp
->self
) {
254 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
256 activate_effective_progs(desc
, type
, desc
->bpf
.inactive
);
257 desc
->bpf
.inactive
= NULL
;
260 static_branch_inc(&cgroup_bpf_enabled_key
);
262 bpf_prog_put(old_prog
);
263 static_branch_dec(&cgroup_bpf_enabled_key
);
268 /* oom while computing effective. Free all computed effective arrays
269 * since they were not activated
271 css_for_each_descendant_pre(css
, &cgrp
->self
) {
272 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
274 bpf_prog_array_free(desc
->bpf
.inactive
);
275 desc
->bpf
.inactive
= NULL
;
278 /* and cleanup the prog list */
280 if (pl_was_allocated
) {
288 * __cgroup_bpf_detach() - Detach the program from a cgroup, and
289 * propagate the change to descendants
290 * @cgrp: The cgroup which descendants to traverse
291 * @prog: A program to detach or NULL
292 * @type: Type of detach operation
294 * Must be called with cgroup_mutex held.
296 int __cgroup_bpf_detach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
297 enum bpf_attach_type type
, u32 unused_flags
)
299 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
300 u32 flags
= cgrp
->bpf
.flags
[type
];
301 struct bpf_prog
*old_prog
= NULL
;
302 struct cgroup_subsys_state
*css
;
303 struct bpf_prog_list
*pl
;
306 if (flags
& BPF_F_ALLOW_MULTI
) {
308 /* to detach MULTI prog the user has to specify valid FD
309 * of the program to be detached
313 if (list_empty(progs
))
314 /* report error when trying to detach and nothing is attached */
318 if (flags
& BPF_F_ALLOW_MULTI
) {
319 /* find the prog and detach it */
320 list_for_each_entry(pl
, progs
, node
) {
321 if (pl
->prog
!= prog
)
324 /* mark it deleted, so it's ignored while
325 * recomputing effective
333 /* to maintain backward compatibility NONE and OVERRIDE cgroups
334 * allow detaching with invalid FD (prog==NULL)
336 pl
= list_first_entry(progs
, typeof(*pl
), node
);
341 /* allocate and recompute effective prog arrays */
342 css_for_each_descendant_pre(css
, &cgrp
->self
) {
343 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
345 err
= compute_effective_progs(desc
, type
, &desc
->bpf
.inactive
);
350 /* all allocations were successful. Activate all prog arrays */
351 css_for_each_descendant_pre(css
, &cgrp
->self
) {
352 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
354 activate_effective_progs(desc
, type
, desc
->bpf
.inactive
);
355 desc
->bpf
.inactive
= NULL
;
358 /* now can actually delete it from this cgroup list */
361 if (list_empty(progs
))
362 /* last program was detached, reset flags to zero */
363 cgrp
->bpf
.flags
[type
] = 0;
365 bpf_prog_put(old_prog
);
366 static_branch_dec(&cgroup_bpf_enabled_key
);
370 /* oom while computing effective. Free all computed effective arrays
371 * since they were not activated
373 css_for_each_descendant_pre(css
, &cgrp
->self
) {
374 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
376 bpf_prog_array_free(desc
->bpf
.inactive
);
377 desc
->bpf
.inactive
= NULL
;
380 /* and restore back old_prog */
385 /* Must be called with cgroup_mutex held to avoid races. */
386 int __cgroup_bpf_query(struct cgroup
*cgrp
, const union bpf_attr
*attr
,
387 union bpf_attr __user
*uattr
)
389 __u32 __user
*prog_ids
= u64_to_user_ptr(attr
->query
.prog_ids
);
390 enum bpf_attach_type type
= attr
->query
.attach_type
;
391 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
392 u32 flags
= cgrp
->bpf
.flags
[type
];
395 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
)
396 cnt
= bpf_prog_array_length(cgrp
->bpf
.effective
[type
]);
398 cnt
= prog_list_length(progs
);
400 if (copy_to_user(&uattr
->query
.attach_flags
, &flags
, sizeof(flags
)))
402 if (copy_to_user(&uattr
->query
.prog_cnt
, &cnt
, sizeof(cnt
)))
404 if (attr
->query
.prog_cnt
== 0 || !prog_ids
|| !cnt
)
405 /* return early if user requested only program count + flags */
407 if (attr
->query
.prog_cnt
< cnt
) {
408 cnt
= attr
->query
.prog_cnt
;
412 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
) {
413 return bpf_prog_array_copy_to_user(cgrp
->bpf
.effective
[type
],
416 struct bpf_prog_list
*pl
;
420 list_for_each_entry(pl
, progs
, node
) {
421 id
= pl
->prog
->aux
->id
;
422 if (copy_to_user(prog_ids
+ i
, &id
, sizeof(id
)))
432 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
433 * @sk: The socket sending or receiving traffic
434 * @skb: The skb that is being sent or received
435 * @type: The type of program to be exectuted
437 * If no socket is passed, or the socket is not of type INET or INET6,
438 * this function does nothing and returns 0.
440 * The program type passed in via @type must be suitable for network
441 * filtering. No further check is performed to assert that.
443 * This function will return %-EPERM if any if an attached program was found
444 * and if it returned != 1 during execution. In all other cases, 0 is returned.
446 int __cgroup_bpf_run_filter_skb(struct sock
*sk
,
448 enum bpf_attach_type type
)
450 unsigned int offset
= skb
->data
- skb_network_header(skb
);
451 struct sock
*save_sk
;
455 if (!sk
|| !sk_fullsock(sk
))
458 if (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)
461 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
464 __skb_push(skb
, offset
);
465 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], skb
,
466 bpf_prog_run_save_cb
);
467 __skb_pull(skb
, offset
);
469 return ret
== 1 ? 0 : -EPERM
;
471 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb
);
474 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
475 * @sk: sock structure to manipulate
476 * @type: The type of program to be exectuted
478 * socket is passed is expected to be of type INET or INET6.
480 * The program type passed in via @type must be suitable for sock
481 * filtering. No further check is performed to assert that.
483 * This function will return %-EPERM if any if an attached program was found
484 * and if it returned != 1 during execution. In all other cases, 0 is returned.
486 int __cgroup_bpf_run_filter_sk(struct sock
*sk
,
487 enum bpf_attach_type type
)
489 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
492 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sk
, BPF_PROG_RUN
);
493 return ret
== 1 ? 0 : -EPERM
;
495 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk
);
498 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
499 * @sk: socket to get cgroup from
500 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
501 * sk with connection information (IP addresses, etc.) May not contain
502 * cgroup info if it is a req sock.
503 * @type: The type of program to be exectuted
505 * socket passed is expected to be of type INET or INET6.
507 * The program type passed in via @type must be suitable for sock_ops
508 * filtering. No further check is performed to assert that.
510 * This function will return %-EPERM if any if an attached program was found
511 * and if it returned != 1 during execution. In all other cases, 0 is returned.
513 int __cgroup_bpf_run_filter_sock_ops(struct sock
*sk
,
514 struct bpf_sock_ops_kern
*sock_ops
,
515 enum bpf_attach_type type
)
517 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
520 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sock_ops
,
522 return ret
== 1 ? 0 : -EPERM
;
524 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops
);
526 int __cgroup_bpf_check_dev_permission(short dev_type
, u32 major
, u32 minor
,
527 short access
, enum bpf_attach_type type
)
530 struct bpf_cgroup_dev_ctx ctx
= {
531 .access_type
= (access
<< 16) | dev_type
,
538 cgrp
= task_dfl_cgroup(current
);
539 allow
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
,
545 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission
);
547 static const struct bpf_func_proto
*
548 cgroup_dev_func_proto(enum bpf_func_id func_id
)
551 case BPF_FUNC_map_lookup_elem
:
552 return &bpf_map_lookup_elem_proto
;
553 case BPF_FUNC_map_update_elem
:
554 return &bpf_map_update_elem_proto
;
555 case BPF_FUNC_map_delete_elem
:
556 return &bpf_map_delete_elem_proto
;
557 case BPF_FUNC_get_current_uid_gid
:
558 return &bpf_get_current_uid_gid_proto
;
559 case BPF_FUNC_trace_printk
:
560 if (capable(CAP_SYS_ADMIN
))
561 return bpf_get_trace_printk_proto();
567 static bool cgroup_dev_is_valid_access(int off
, int size
,
568 enum bpf_access_type type
,
569 struct bpf_insn_access_aux
*info
)
571 if (type
== BPF_WRITE
)
574 if (off
< 0 || off
+ size
> sizeof(struct bpf_cgroup_dev_ctx
))
576 /* The verifier guarantees that size > 0. */
579 if (size
!= sizeof(__u32
))
585 const struct bpf_prog_ops cg_dev_prog_ops
= {
588 const struct bpf_verifier_ops cg_dev_verifier_ops
= {
589 .get_func_proto
= cgroup_dev_func_proto
,
590 .is_valid_access
= cgroup_dev_is_valid_access
,