2 * Functions to manage eBPF programs attached to cgroups
4 * Copyright (c) 2016 Daniel Mack
6 * This file is subject to the terms and conditions of version 2 of the GNU
7 * General Public License. See the file COPYING in the main directory of the
8 * Linux distribution for more details.
11 #include <linux/kernel.h>
12 #include <linux/atomic.h>
13 #include <linux/cgroup.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
19 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key
);
20 EXPORT_SYMBOL(cgroup_bpf_enabled_key
);
23 * cgroup_bpf_put() - put references of all bpf programs
24 * @cgrp: the cgroup to modify
26 void cgroup_bpf_put(struct cgroup
*cgrp
)
28 enum bpf_cgroup_storage_type stype
;
31 for (type
= 0; type
< ARRAY_SIZE(cgrp
->bpf
.progs
); type
++) {
32 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
33 struct bpf_prog_list
*pl
, *tmp
;
35 list_for_each_entry_safe(pl
, tmp
, progs
, node
) {
37 bpf_prog_put(pl
->prog
);
38 for_each_cgroup_storage_type(stype
) {
39 bpf_cgroup_storage_unlink(pl
->storage
[stype
]);
40 bpf_cgroup_storage_free(pl
->storage
[stype
]);
43 static_branch_dec(&cgroup_bpf_enabled_key
);
45 bpf_prog_array_free(cgrp
->bpf
.effective
[type
]);
49 /* count number of elements in the list.
50 * it's slow but the list cannot be long
52 static u32
prog_list_length(struct list_head
*head
)
54 struct bpf_prog_list
*pl
;
57 list_for_each_entry(pl
, head
, node
) {
65 /* if parent has non-overridable prog attached,
66 * disallow attaching new programs to the descendent cgroup.
67 * if parent has overridable or multi-prog, allow attaching
69 static bool hierarchy_allows_attach(struct cgroup
*cgrp
,
70 enum bpf_attach_type type
,
75 p
= cgroup_parent(cgrp
);
79 u32 flags
= p
->bpf
.flags
[type
];
82 if (flags
& BPF_F_ALLOW_MULTI
)
84 cnt
= prog_list_length(&p
->bpf
.progs
[type
]);
85 WARN_ON_ONCE(cnt
> 1);
87 return !!(flags
& BPF_F_ALLOW_OVERRIDE
);
93 /* compute a chain of effective programs for a given cgroup:
94 * start from the list of programs in this cgroup and add
95 * all parent programs.
96 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
97 * to programs in this cgroup
99 static int compute_effective_progs(struct cgroup
*cgrp
,
100 enum bpf_attach_type type
,
101 struct bpf_prog_array __rcu
**array
)
103 enum bpf_cgroup_storage_type stype
;
104 struct bpf_prog_array
*progs
;
105 struct bpf_prog_list
*pl
;
106 struct cgroup
*p
= cgrp
;
109 /* count number of effective programs by walking parents */
111 if (cnt
== 0 || (p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
112 cnt
+= prog_list_length(&p
->bpf
.progs
[type
]);
113 p
= cgroup_parent(p
);
116 progs
= bpf_prog_array_alloc(cnt
, GFP_KERNEL
);
120 /* populate the array with effective progs */
124 if (cnt
> 0 && !(p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
127 list_for_each_entry(pl
, &p
->bpf
.progs
[type
], node
) {
131 progs
->items
[cnt
].prog
= pl
->prog
;
132 for_each_cgroup_storage_type(stype
)
133 progs
->items
[cnt
].cgroup_storage
[stype
] =
137 } while ((p
= cgroup_parent(p
)));
139 rcu_assign_pointer(*array
, progs
);
143 static void activate_effective_progs(struct cgroup
*cgrp
,
144 enum bpf_attach_type type
,
145 struct bpf_prog_array __rcu
*array
)
147 struct bpf_prog_array __rcu
*old_array
;
149 old_array
= xchg(&cgrp
->bpf
.effective
[type
], array
);
150 /* free prog array after grace period, since __cgroup_bpf_run_*()
151 * might be still walking the array
153 bpf_prog_array_free(old_array
);
157 * cgroup_bpf_inherit() - inherit effective programs from parent
158 * @cgrp: the cgroup to modify
160 int cgroup_bpf_inherit(struct cgroup
*cgrp
)
162 /* has to use marco instead of const int, since compiler thinks
163 * that array below is variable length
165 #define NR ARRAY_SIZE(cgrp->bpf.effective)
166 struct bpf_prog_array __rcu
*arrays
[NR
] = {};
169 for (i
= 0; i
< NR
; i
++)
170 INIT_LIST_HEAD(&cgrp
->bpf
.progs
[i
]);
172 for (i
= 0; i
< NR
; i
++)
173 if (compute_effective_progs(cgrp
, i
, &arrays
[i
]))
176 for (i
= 0; i
< NR
; i
++)
177 activate_effective_progs(cgrp
, i
, arrays
[i
]);
181 for (i
= 0; i
< NR
; i
++)
182 bpf_prog_array_free(arrays
[i
]);
186 static int update_effective_progs(struct cgroup
*cgrp
,
187 enum bpf_attach_type type
)
189 struct cgroup_subsys_state
*css
;
192 /* allocate and recompute effective prog arrays */
193 css_for_each_descendant_pre(css
, &cgrp
->self
) {
194 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
196 err
= compute_effective_progs(desc
, type
, &desc
->bpf
.inactive
);
201 /* all allocations were successful. Activate all prog arrays */
202 css_for_each_descendant_pre(css
, &cgrp
->self
) {
203 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
205 activate_effective_progs(desc
, type
, desc
->bpf
.inactive
);
206 desc
->bpf
.inactive
= NULL
;
212 /* oom while computing effective. Free all computed effective arrays
213 * since they were not activated
215 css_for_each_descendant_pre(css
, &cgrp
->self
) {
216 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
218 bpf_prog_array_free(desc
->bpf
.inactive
);
219 desc
->bpf
.inactive
= NULL
;
225 #define BPF_CGROUP_MAX_PROGS 64
228 * __cgroup_bpf_attach() - Attach the program to a cgroup, and
229 * propagate the change to descendants
230 * @cgrp: The cgroup which descendants to traverse
231 * @prog: A program to attach
232 * @type: Type of attach operation
234 * Must be called with cgroup_mutex held.
236 int __cgroup_bpf_attach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
237 enum bpf_attach_type type
, u32 flags
)
239 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
240 struct bpf_prog
*old_prog
= NULL
;
241 struct bpf_cgroup_storage
*storage
[MAX_BPF_CGROUP_STORAGE_TYPE
],
242 *old_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
] = {NULL
};
243 enum bpf_cgroup_storage_type stype
;
244 struct bpf_prog_list
*pl
;
245 bool pl_was_allocated
;
248 if ((flags
& BPF_F_ALLOW_OVERRIDE
) && (flags
& BPF_F_ALLOW_MULTI
))
249 /* invalid combination */
252 if (!hierarchy_allows_attach(cgrp
, type
, flags
))
255 if (!list_empty(progs
) && cgrp
->bpf
.flags
[type
] != flags
)
256 /* Disallow attaching non-overridable on top
257 * of existing overridable in this cgroup.
258 * Disallow attaching multi-prog if overridable or none
262 if (prog_list_length(progs
) >= BPF_CGROUP_MAX_PROGS
)
265 for_each_cgroup_storage_type(stype
) {
266 storage
[stype
] = bpf_cgroup_storage_alloc(prog
, stype
);
267 if (IS_ERR(storage
[stype
])) {
268 storage
[stype
] = NULL
;
269 for_each_cgroup_storage_type(stype
)
270 bpf_cgroup_storage_free(storage
[stype
]);
275 if (flags
& BPF_F_ALLOW_MULTI
) {
276 list_for_each_entry(pl
, progs
, node
) {
277 if (pl
->prog
== prog
) {
278 /* disallow attaching the same prog twice */
279 for_each_cgroup_storage_type(stype
)
280 bpf_cgroup_storage_free(storage
[stype
]);
285 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
287 for_each_cgroup_storage_type(stype
)
288 bpf_cgroup_storage_free(storage
[stype
]);
292 pl_was_allocated
= true;
294 for_each_cgroup_storage_type(stype
)
295 pl
->storage
[stype
] = storage
[stype
];
296 list_add_tail(&pl
->node
, progs
);
298 if (list_empty(progs
)) {
299 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
301 for_each_cgroup_storage_type(stype
)
302 bpf_cgroup_storage_free(storage
[stype
]);
305 pl_was_allocated
= true;
306 list_add_tail(&pl
->node
, progs
);
308 pl
= list_first_entry(progs
, typeof(*pl
), node
);
310 for_each_cgroup_storage_type(stype
) {
311 old_storage
[stype
] = pl
->storage
[stype
];
312 bpf_cgroup_storage_unlink(old_storage
[stype
]);
314 pl_was_allocated
= false;
317 for_each_cgroup_storage_type(stype
)
318 pl
->storage
[stype
] = storage
[stype
];
321 cgrp
->bpf
.flags
[type
] = flags
;
323 err
= update_effective_progs(cgrp
, type
);
327 static_branch_inc(&cgroup_bpf_enabled_key
);
328 for_each_cgroup_storage_type(stype
) {
329 if (!old_storage
[stype
])
331 bpf_cgroup_storage_free(old_storage
[stype
]);
334 bpf_prog_put(old_prog
);
335 static_branch_dec(&cgroup_bpf_enabled_key
);
337 for_each_cgroup_storage_type(stype
)
338 bpf_cgroup_storage_link(storage
[stype
], cgrp
, type
);
342 /* and cleanup the prog list */
344 for_each_cgroup_storage_type(stype
) {
345 bpf_cgroup_storage_free(pl
->storage
[stype
]);
346 pl
->storage
[stype
] = old_storage
[stype
];
347 bpf_cgroup_storage_link(old_storage
[stype
], cgrp
, type
);
349 if (pl_was_allocated
) {
357 * __cgroup_bpf_detach() - Detach the program from a cgroup, and
358 * propagate the change to descendants
359 * @cgrp: The cgroup which descendants to traverse
360 * @prog: A program to detach or NULL
361 * @type: Type of detach operation
363 * Must be called with cgroup_mutex held.
365 int __cgroup_bpf_detach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
366 enum bpf_attach_type type
, u32 unused_flags
)
368 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
369 enum bpf_cgroup_storage_type stype
;
370 u32 flags
= cgrp
->bpf
.flags
[type
];
371 struct bpf_prog
*old_prog
= NULL
;
372 struct bpf_prog_list
*pl
;
375 if (flags
& BPF_F_ALLOW_MULTI
) {
377 /* to detach MULTI prog the user has to specify valid FD
378 * of the program to be detached
382 if (list_empty(progs
))
383 /* report error when trying to detach and nothing is attached */
387 if (flags
& BPF_F_ALLOW_MULTI
) {
388 /* find the prog and detach it */
389 list_for_each_entry(pl
, progs
, node
) {
390 if (pl
->prog
!= prog
)
393 /* mark it deleted, so it's ignored while
394 * recomputing effective
402 /* to maintain backward compatibility NONE and OVERRIDE cgroups
403 * allow detaching with invalid FD (prog==NULL)
405 pl
= list_first_entry(progs
, typeof(*pl
), node
);
410 err
= update_effective_progs(cgrp
, type
);
414 /* now can actually delete it from this cgroup list */
416 for_each_cgroup_storage_type(stype
) {
417 bpf_cgroup_storage_unlink(pl
->storage
[stype
]);
418 bpf_cgroup_storage_free(pl
->storage
[stype
]);
421 if (list_empty(progs
))
422 /* last program was detached, reset flags to zero */
423 cgrp
->bpf
.flags
[type
] = 0;
425 bpf_prog_put(old_prog
);
426 static_branch_dec(&cgroup_bpf_enabled_key
);
430 /* and restore back old_prog */
435 /* Must be called with cgroup_mutex held to avoid races. */
436 int __cgroup_bpf_query(struct cgroup
*cgrp
, const union bpf_attr
*attr
,
437 union bpf_attr __user
*uattr
)
439 __u32 __user
*prog_ids
= u64_to_user_ptr(attr
->query
.prog_ids
);
440 enum bpf_attach_type type
= attr
->query
.attach_type
;
441 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
442 u32 flags
= cgrp
->bpf
.flags
[type
];
445 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
)
446 cnt
= bpf_prog_array_length(cgrp
->bpf
.effective
[type
]);
448 cnt
= prog_list_length(progs
);
450 if (copy_to_user(&uattr
->query
.attach_flags
, &flags
, sizeof(flags
)))
452 if (copy_to_user(&uattr
->query
.prog_cnt
, &cnt
, sizeof(cnt
)))
454 if (attr
->query
.prog_cnt
== 0 || !prog_ids
|| !cnt
)
455 /* return early if user requested only program count + flags */
457 if (attr
->query
.prog_cnt
< cnt
) {
458 cnt
= attr
->query
.prog_cnt
;
462 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
) {
463 return bpf_prog_array_copy_to_user(cgrp
->bpf
.effective
[type
],
466 struct bpf_prog_list
*pl
;
470 list_for_each_entry(pl
, progs
, node
) {
471 id
= pl
->prog
->aux
->id
;
472 if (copy_to_user(prog_ids
+ i
, &id
, sizeof(id
)))
481 int cgroup_bpf_prog_attach(const union bpf_attr
*attr
,
482 enum bpf_prog_type ptype
, struct bpf_prog
*prog
)
487 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
489 return PTR_ERR(cgrp
);
491 ret
= cgroup_bpf_attach(cgrp
, prog
, attr
->attach_type
,
497 int cgroup_bpf_prog_detach(const union bpf_attr
*attr
, enum bpf_prog_type ptype
)
499 struct bpf_prog
*prog
;
503 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
505 return PTR_ERR(cgrp
);
507 prog
= bpf_prog_get_type(attr
->attach_bpf_fd
, ptype
);
511 ret
= cgroup_bpf_detach(cgrp
, prog
, attr
->attach_type
, 0);
519 int cgroup_bpf_prog_query(const union bpf_attr
*attr
,
520 union bpf_attr __user
*uattr
)
525 cgrp
= cgroup_get_from_fd(attr
->query
.target_fd
);
527 return PTR_ERR(cgrp
);
529 ret
= cgroup_bpf_query(cgrp
, attr
, uattr
);
536 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
537 * @sk: The socket sending or receiving traffic
538 * @skb: The skb that is being sent or received
539 * @type: The type of program to be exectuted
541 * If no socket is passed, or the socket is not of type INET or INET6,
542 * this function does nothing and returns 0.
544 * The program type passed in via @type must be suitable for network
545 * filtering. No further check is performed to assert that.
547 * This function will return %-EPERM if any if an attached program was found
548 * and if it returned != 1 during execution. In all other cases, 0 is returned.
550 int __cgroup_bpf_run_filter_skb(struct sock
*sk
,
552 enum bpf_attach_type type
)
554 unsigned int offset
= skb
->data
- skb_network_header(skb
);
555 struct sock
*save_sk
;
556 void *saved_data_end
;
560 if (!sk
|| !sk_fullsock(sk
))
563 if (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)
566 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
569 __skb_push(skb
, offset
);
571 /* compute pointers for the bpf prog */
572 bpf_compute_and_save_data_end(skb
, &saved_data_end
);
574 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], skb
,
575 bpf_prog_run_save_cb
);
576 bpf_restore_data_end(skb
, saved_data_end
);
577 __skb_pull(skb
, offset
);
579 return ret
== 1 ? 0 : -EPERM
;
581 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb
);
584 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
585 * @sk: sock structure to manipulate
586 * @type: The type of program to be exectuted
588 * socket is passed is expected to be of type INET or INET6.
590 * The program type passed in via @type must be suitable for sock
591 * filtering. No further check is performed to assert that.
593 * This function will return %-EPERM if any if an attached program was found
594 * and if it returned != 1 during execution. In all other cases, 0 is returned.
596 int __cgroup_bpf_run_filter_sk(struct sock
*sk
,
597 enum bpf_attach_type type
)
599 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
602 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sk
, BPF_PROG_RUN
);
603 return ret
== 1 ? 0 : -EPERM
;
605 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk
);
608 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
609 * provided by user sockaddr
610 * @sk: sock struct that will use sockaddr
611 * @uaddr: sockaddr struct provided by user
612 * @type: The type of program to be exectuted
613 * @t_ctx: Pointer to attach type specific context
615 * socket is expected to be of type INET or INET6.
617 * This function will return %-EPERM if an attached program is found and
618 * returned value != 1 during execution. In all other cases, 0 is returned.
620 int __cgroup_bpf_run_filter_sock_addr(struct sock
*sk
,
621 struct sockaddr
*uaddr
,
622 enum bpf_attach_type type
,
625 struct bpf_sock_addr_kern ctx
= {
630 struct sockaddr_storage unspec
;
634 /* Check socket family since not all sockets represent network
635 * endpoint (e.g. AF_UNIX).
637 if (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)
641 memset(&unspec
, 0, sizeof(unspec
));
642 ctx
.uaddr
= (struct sockaddr
*)&unspec
;
645 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
646 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
, BPF_PROG_RUN
);
648 return ret
== 1 ? 0 : -EPERM
;
650 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr
);
653 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
654 * @sk: socket to get cgroup from
655 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
656 * sk with connection information (IP addresses, etc.) May not contain
657 * cgroup info if it is a req sock.
658 * @type: The type of program to be exectuted
660 * socket passed is expected to be of type INET or INET6.
662 * The program type passed in via @type must be suitable for sock_ops
663 * filtering. No further check is performed to assert that.
665 * This function will return %-EPERM if any if an attached program was found
666 * and if it returned != 1 during execution. In all other cases, 0 is returned.
668 int __cgroup_bpf_run_filter_sock_ops(struct sock
*sk
,
669 struct bpf_sock_ops_kern
*sock_ops
,
670 enum bpf_attach_type type
)
672 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
675 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sock_ops
,
677 return ret
== 1 ? 0 : -EPERM
;
679 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops
);
681 int __cgroup_bpf_check_dev_permission(short dev_type
, u32 major
, u32 minor
,
682 short access
, enum bpf_attach_type type
)
685 struct bpf_cgroup_dev_ctx ctx
= {
686 .access_type
= (access
<< 16) | dev_type
,
693 cgrp
= task_dfl_cgroup(current
);
694 allow
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
,
700 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission
);
702 static const struct bpf_func_proto
*
703 cgroup_dev_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
706 case BPF_FUNC_map_lookup_elem
:
707 return &bpf_map_lookup_elem_proto
;
708 case BPF_FUNC_map_update_elem
:
709 return &bpf_map_update_elem_proto
;
710 case BPF_FUNC_map_delete_elem
:
711 return &bpf_map_delete_elem_proto
;
712 case BPF_FUNC_get_current_uid_gid
:
713 return &bpf_get_current_uid_gid_proto
;
714 case BPF_FUNC_get_local_storage
:
715 return &bpf_get_local_storage_proto
;
716 case BPF_FUNC_get_current_cgroup_id
:
717 return &bpf_get_current_cgroup_id_proto
;
718 case BPF_FUNC_trace_printk
:
719 if (capable(CAP_SYS_ADMIN
))
720 return bpf_get_trace_printk_proto();
726 static bool cgroup_dev_is_valid_access(int off
, int size
,
727 enum bpf_access_type type
,
728 const struct bpf_prog
*prog
,
729 struct bpf_insn_access_aux
*info
)
731 const int size_default
= sizeof(__u32
);
733 if (type
== BPF_WRITE
)
736 if (off
< 0 || off
+ size
> sizeof(struct bpf_cgroup_dev_ctx
))
738 /* The verifier guarantees that size > 0. */
743 case bpf_ctx_range(struct bpf_cgroup_dev_ctx
, access_type
):
744 bpf_ctx_record_field_size(info
, size_default
);
745 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
749 if (size
!= size_default
)
756 const struct bpf_prog_ops cg_dev_prog_ops
= {
759 const struct bpf_verifier_ops cg_dev_verifier_ops
= {
760 .get_func_proto
= cgroup_dev_func_proto
,
761 .is_valid_access
= cgroup_dev_is_valid_access
,