1 // SPDX-License-Identifier: GPL-2.0-only
3 * Functions to manage eBPF programs attached to cgroups
5 * Copyright (c) 2016 Daniel Mack
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
18 #include <net/bpf_sk_storage.h>
20 #include "../cgroup/cgroup-internal.h"
22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key
);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key
);
25 void cgroup_bpf_offline(struct cgroup
*cgrp
)
28 percpu_ref_kill(&cgrp
->bpf
.refcnt
);
32 * cgroup_bpf_release() - put references of all bpf programs and
33 * release all cgroup bpf data
34 * @work: work structure embedded into the cgroup to modify
36 static void cgroup_bpf_release(struct work_struct
*work
)
38 struct cgroup
*p
, *cgrp
= container_of(work
, struct cgroup
,
40 enum bpf_cgroup_storage_type stype
;
41 struct bpf_prog_array
*old_array
;
44 mutex_lock(&cgroup_mutex
);
46 for (type
= 0; type
< ARRAY_SIZE(cgrp
->bpf
.progs
); type
++) {
47 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
48 struct bpf_prog_list
*pl
, *tmp
;
50 list_for_each_entry_safe(pl
, tmp
, progs
, node
) {
52 bpf_prog_put(pl
->prog
);
53 for_each_cgroup_storage_type(stype
) {
54 bpf_cgroup_storage_unlink(pl
->storage
[stype
]);
55 bpf_cgroup_storage_free(pl
->storage
[stype
]);
58 static_branch_dec(&cgroup_bpf_enabled_key
);
60 old_array
= rcu_dereference_protected(
61 cgrp
->bpf
.effective
[type
],
62 lockdep_is_held(&cgroup_mutex
));
63 bpf_prog_array_free(old_array
);
66 mutex_unlock(&cgroup_mutex
);
68 for (p
= cgroup_parent(cgrp
); p
; p
= cgroup_parent(p
))
71 percpu_ref_exit(&cgrp
->bpf
.refcnt
);
76 * cgroup_bpf_release_fn() - callback used to schedule releasing
78 * @ref: percpu ref counter structure
80 static void cgroup_bpf_release_fn(struct percpu_ref
*ref
)
82 struct cgroup
*cgrp
= container_of(ref
, struct cgroup
, bpf
.refcnt
);
84 INIT_WORK(&cgrp
->bpf
.release_work
, cgroup_bpf_release
);
85 queue_work(system_wq
, &cgrp
->bpf
.release_work
);
88 /* count number of elements in the list.
89 * it's slow but the list cannot be long
91 static u32
prog_list_length(struct list_head
*head
)
93 struct bpf_prog_list
*pl
;
96 list_for_each_entry(pl
, head
, node
) {
104 /* if parent has non-overridable prog attached,
105 * disallow attaching new programs to the descendent cgroup.
106 * if parent has overridable or multi-prog, allow attaching
108 static bool hierarchy_allows_attach(struct cgroup
*cgrp
,
109 enum bpf_attach_type type
)
113 p
= cgroup_parent(cgrp
);
117 u32 flags
= p
->bpf
.flags
[type
];
120 if (flags
& BPF_F_ALLOW_MULTI
)
122 cnt
= prog_list_length(&p
->bpf
.progs
[type
]);
123 WARN_ON_ONCE(cnt
> 1);
125 return !!(flags
& BPF_F_ALLOW_OVERRIDE
);
126 p
= cgroup_parent(p
);
131 /* compute a chain of effective programs for a given cgroup:
132 * start from the list of programs in this cgroup and add
133 * all parent programs.
134 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
135 * to programs in this cgroup
137 static int compute_effective_progs(struct cgroup
*cgrp
,
138 enum bpf_attach_type type
,
139 struct bpf_prog_array
**array
)
141 enum bpf_cgroup_storage_type stype
;
142 struct bpf_prog_array
*progs
;
143 struct bpf_prog_list
*pl
;
144 struct cgroup
*p
= cgrp
;
147 /* count number of effective programs by walking parents */
149 if (cnt
== 0 || (p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
150 cnt
+= prog_list_length(&p
->bpf
.progs
[type
]);
151 p
= cgroup_parent(p
);
154 progs
= bpf_prog_array_alloc(cnt
, GFP_KERNEL
);
158 /* populate the array with effective progs */
162 if (cnt
> 0 && !(p
->bpf
.flags
[type
] & BPF_F_ALLOW_MULTI
))
165 list_for_each_entry(pl
, &p
->bpf
.progs
[type
], node
) {
169 progs
->items
[cnt
].prog
= pl
->prog
;
170 for_each_cgroup_storage_type(stype
)
171 progs
->items
[cnt
].cgroup_storage
[stype
] =
175 } while ((p
= cgroup_parent(p
)));
181 static void activate_effective_progs(struct cgroup
*cgrp
,
182 enum bpf_attach_type type
,
183 struct bpf_prog_array
*old_array
)
185 old_array
= rcu_replace_pointer(cgrp
->bpf
.effective
[type
], old_array
,
186 lockdep_is_held(&cgroup_mutex
));
187 /* free prog array after grace period, since __cgroup_bpf_run_*()
188 * might be still walking the array
190 bpf_prog_array_free(old_array
);
194 * cgroup_bpf_inherit() - inherit effective programs from parent
195 * @cgrp: the cgroup to modify
197 int cgroup_bpf_inherit(struct cgroup
*cgrp
)
199 /* has to use marco instead of const int, since compiler thinks
200 * that array below is variable length
202 #define NR ARRAY_SIZE(cgrp->bpf.effective)
203 struct bpf_prog_array
*arrays
[NR
] = {};
207 ret
= percpu_ref_init(&cgrp
->bpf
.refcnt
, cgroup_bpf_release_fn
, 0,
212 for (p
= cgroup_parent(cgrp
); p
; p
= cgroup_parent(p
))
215 for (i
= 0; i
< NR
; i
++)
216 INIT_LIST_HEAD(&cgrp
->bpf
.progs
[i
]);
218 for (i
= 0; i
< NR
; i
++)
219 if (compute_effective_progs(cgrp
, i
, &arrays
[i
]))
222 for (i
= 0; i
< NR
; i
++)
223 activate_effective_progs(cgrp
, i
, arrays
[i
]);
227 for (i
= 0; i
< NR
; i
++)
228 bpf_prog_array_free(arrays
[i
]);
230 percpu_ref_exit(&cgrp
->bpf
.refcnt
);
235 static int update_effective_progs(struct cgroup
*cgrp
,
236 enum bpf_attach_type type
)
238 struct cgroup_subsys_state
*css
;
241 /* allocate and recompute effective prog arrays */
242 css_for_each_descendant_pre(css
, &cgrp
->self
) {
243 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
245 if (percpu_ref_is_zero(&desc
->bpf
.refcnt
))
248 err
= compute_effective_progs(desc
, type
, &desc
->bpf
.inactive
);
253 /* all allocations were successful. Activate all prog arrays */
254 css_for_each_descendant_pre(css
, &cgrp
->self
) {
255 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
257 if (percpu_ref_is_zero(&desc
->bpf
.refcnt
)) {
258 if (unlikely(desc
->bpf
.inactive
)) {
259 bpf_prog_array_free(desc
->bpf
.inactive
);
260 desc
->bpf
.inactive
= NULL
;
265 activate_effective_progs(desc
, type
, desc
->bpf
.inactive
);
266 desc
->bpf
.inactive
= NULL
;
272 /* oom while computing effective. Free all computed effective arrays
273 * since they were not activated
275 css_for_each_descendant_pre(css
, &cgrp
->self
) {
276 struct cgroup
*desc
= container_of(css
, struct cgroup
, self
);
278 bpf_prog_array_free(desc
->bpf
.inactive
);
279 desc
->bpf
.inactive
= NULL
;
285 #define BPF_CGROUP_MAX_PROGS 64
288 * __cgroup_bpf_attach() - Attach the program to a cgroup, and
289 * propagate the change to descendants
290 * @cgrp: The cgroup which descendants to traverse
291 * @prog: A program to attach
292 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
293 * @type: Type of attach operation
294 * @flags: Option flags
296 * Must be called with cgroup_mutex held.
298 int __cgroup_bpf_attach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
299 struct bpf_prog
*replace_prog
,
300 enum bpf_attach_type type
, u32 flags
)
302 u32 saved_flags
= (flags
& (BPF_F_ALLOW_OVERRIDE
| BPF_F_ALLOW_MULTI
));
303 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
304 struct bpf_prog
*old_prog
= NULL
;
305 struct bpf_cgroup_storage
*storage
[MAX_BPF_CGROUP_STORAGE_TYPE
],
306 *old_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
] = {NULL
};
307 struct bpf_prog_list
*pl
, *replace_pl
= NULL
;
308 enum bpf_cgroup_storage_type stype
;
311 if (((flags
& BPF_F_ALLOW_OVERRIDE
) && (flags
& BPF_F_ALLOW_MULTI
)) ||
312 ((flags
& BPF_F_REPLACE
) && !(flags
& BPF_F_ALLOW_MULTI
)))
313 /* invalid combination */
316 if (!hierarchy_allows_attach(cgrp
, type
))
319 if (!list_empty(progs
) && cgrp
->bpf
.flags
[type
] != saved_flags
)
320 /* Disallow attaching non-overridable on top
321 * of existing overridable in this cgroup.
322 * Disallow attaching multi-prog if overridable or none
326 if (prog_list_length(progs
) >= BPF_CGROUP_MAX_PROGS
)
329 if (flags
& BPF_F_ALLOW_MULTI
) {
330 list_for_each_entry(pl
, progs
, node
) {
331 if (pl
->prog
== prog
)
332 /* disallow attaching the same prog twice */
334 if (pl
->prog
== replace_prog
)
337 if ((flags
& BPF_F_REPLACE
) && !replace_pl
)
338 /* prog to replace not found for cgroup */
340 } else if (!list_empty(progs
)) {
341 replace_pl
= list_first_entry(progs
, typeof(*pl
), node
);
344 for_each_cgroup_storage_type(stype
) {
345 storage
[stype
] = bpf_cgroup_storage_alloc(prog
, stype
);
346 if (IS_ERR(storage
[stype
])) {
347 storage
[stype
] = NULL
;
348 for_each_cgroup_storage_type(stype
)
349 bpf_cgroup_storage_free(storage
[stype
]);
357 for_each_cgroup_storage_type(stype
) {
358 old_storage
[stype
] = pl
->storage
[stype
];
359 bpf_cgroup_storage_unlink(old_storage
[stype
]);
362 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
364 for_each_cgroup_storage_type(stype
)
365 bpf_cgroup_storage_free(storage
[stype
]);
368 list_add_tail(&pl
->node
, progs
);
372 for_each_cgroup_storage_type(stype
)
373 pl
->storage
[stype
] = storage
[stype
];
375 cgrp
->bpf
.flags
[type
] = saved_flags
;
377 err
= update_effective_progs(cgrp
, type
);
381 static_branch_inc(&cgroup_bpf_enabled_key
);
382 for_each_cgroup_storage_type(stype
) {
383 if (!old_storage
[stype
])
385 bpf_cgroup_storage_free(old_storage
[stype
]);
388 bpf_prog_put(old_prog
);
389 static_branch_dec(&cgroup_bpf_enabled_key
);
391 for_each_cgroup_storage_type(stype
)
392 bpf_cgroup_storage_link(storage
[stype
], cgrp
, type
);
396 /* and cleanup the prog list */
398 for_each_cgroup_storage_type(stype
) {
399 bpf_cgroup_storage_free(pl
->storage
[stype
]);
400 pl
->storage
[stype
] = old_storage
[stype
];
401 bpf_cgroup_storage_link(old_storage
[stype
], cgrp
, type
);
411 * __cgroup_bpf_detach() - Detach the program from a cgroup, and
412 * propagate the change to descendants
413 * @cgrp: The cgroup which descendants to traverse
414 * @prog: A program to detach or NULL
415 * @type: Type of detach operation
417 * Must be called with cgroup_mutex held.
419 int __cgroup_bpf_detach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
420 enum bpf_attach_type type
)
422 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
423 enum bpf_cgroup_storage_type stype
;
424 u32 flags
= cgrp
->bpf
.flags
[type
];
425 struct bpf_prog
*old_prog
= NULL
;
426 struct bpf_prog_list
*pl
;
429 if (flags
& BPF_F_ALLOW_MULTI
) {
431 /* to detach MULTI prog the user has to specify valid FD
432 * of the program to be detached
436 if (list_empty(progs
))
437 /* report error when trying to detach and nothing is attached */
441 if (flags
& BPF_F_ALLOW_MULTI
) {
442 /* find the prog and detach it */
443 list_for_each_entry(pl
, progs
, node
) {
444 if (pl
->prog
!= prog
)
447 /* mark it deleted, so it's ignored while
448 * recomputing effective
456 /* to maintain backward compatibility NONE and OVERRIDE cgroups
457 * allow detaching with invalid FD (prog==NULL)
459 pl
= list_first_entry(progs
, typeof(*pl
), node
);
464 err
= update_effective_progs(cgrp
, type
);
468 /* now can actually delete it from this cgroup list */
470 for_each_cgroup_storage_type(stype
) {
471 bpf_cgroup_storage_unlink(pl
->storage
[stype
]);
472 bpf_cgroup_storage_free(pl
->storage
[stype
]);
475 if (list_empty(progs
))
476 /* last program was detached, reset flags to zero */
477 cgrp
->bpf
.flags
[type
] = 0;
479 bpf_prog_put(old_prog
);
480 static_branch_dec(&cgroup_bpf_enabled_key
);
484 /* and restore back old_prog */
489 /* Must be called with cgroup_mutex held to avoid races. */
490 int __cgroup_bpf_query(struct cgroup
*cgrp
, const union bpf_attr
*attr
,
491 union bpf_attr __user
*uattr
)
493 __u32 __user
*prog_ids
= u64_to_user_ptr(attr
->query
.prog_ids
);
494 enum bpf_attach_type type
= attr
->query
.attach_type
;
495 struct list_head
*progs
= &cgrp
->bpf
.progs
[type
];
496 u32 flags
= cgrp
->bpf
.flags
[type
];
497 struct bpf_prog_array
*effective
;
500 effective
= rcu_dereference_protected(cgrp
->bpf
.effective
[type
],
501 lockdep_is_held(&cgroup_mutex
));
503 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
)
504 cnt
= bpf_prog_array_length(effective
);
506 cnt
= prog_list_length(progs
);
508 if (copy_to_user(&uattr
->query
.attach_flags
, &flags
, sizeof(flags
)))
510 if (copy_to_user(&uattr
->query
.prog_cnt
, &cnt
, sizeof(cnt
)))
512 if (attr
->query
.prog_cnt
== 0 || !prog_ids
|| !cnt
)
513 /* return early if user requested only program count + flags */
515 if (attr
->query
.prog_cnt
< cnt
) {
516 cnt
= attr
->query
.prog_cnt
;
520 if (attr
->query
.query_flags
& BPF_F_QUERY_EFFECTIVE
) {
521 return bpf_prog_array_copy_to_user(effective
, prog_ids
, cnt
);
523 struct bpf_prog_list
*pl
;
527 list_for_each_entry(pl
, progs
, node
) {
528 id
= pl
->prog
->aux
->id
;
529 if (copy_to_user(prog_ids
+ i
, &id
, sizeof(id
)))
538 int cgroup_bpf_prog_attach(const union bpf_attr
*attr
,
539 enum bpf_prog_type ptype
, struct bpf_prog
*prog
)
541 struct bpf_prog
*replace_prog
= NULL
;
545 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
547 return PTR_ERR(cgrp
);
549 if ((attr
->attach_flags
& BPF_F_ALLOW_MULTI
) &&
550 (attr
->attach_flags
& BPF_F_REPLACE
)) {
551 replace_prog
= bpf_prog_get_type(attr
->replace_bpf_fd
, ptype
);
552 if (IS_ERR(replace_prog
)) {
554 return PTR_ERR(replace_prog
);
558 ret
= cgroup_bpf_attach(cgrp
, prog
, replace_prog
, attr
->attach_type
,
562 bpf_prog_put(replace_prog
);
567 int cgroup_bpf_prog_detach(const union bpf_attr
*attr
, enum bpf_prog_type ptype
)
569 struct bpf_prog
*prog
;
573 cgrp
= cgroup_get_from_fd(attr
->target_fd
);
575 return PTR_ERR(cgrp
);
577 prog
= bpf_prog_get_type(attr
->attach_bpf_fd
, ptype
);
581 ret
= cgroup_bpf_detach(cgrp
, prog
, attr
->attach_type
, 0);
589 int cgroup_bpf_prog_query(const union bpf_attr
*attr
,
590 union bpf_attr __user
*uattr
)
595 cgrp
= cgroup_get_from_fd(attr
->query
.target_fd
);
597 return PTR_ERR(cgrp
);
599 ret
= cgroup_bpf_query(cgrp
, attr
, uattr
);
606 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
607 * @sk: The socket sending or receiving traffic
608 * @skb: The skb that is being sent or received
609 * @type: The type of program to be exectuted
611 * If no socket is passed, or the socket is not of type INET or INET6,
612 * this function does nothing and returns 0.
614 * The program type passed in via @type must be suitable for network
615 * filtering. No further check is performed to assert that.
617 * For egress packets, this function can return:
618 * NET_XMIT_SUCCESS (0) - continue with packet output
619 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
620 * NET_XMIT_CN (2) - continue with packet output and notify TCP
622 * -EPERM - drop packet
624 * For ingress packets, this function will return -EPERM if any
625 * attached program was found and if it returned != 1 during execution.
626 * Otherwise 0 is returned.
628 int __cgroup_bpf_run_filter_skb(struct sock
*sk
,
630 enum bpf_attach_type type
)
632 unsigned int offset
= skb
->data
- skb_network_header(skb
);
633 struct sock
*save_sk
;
634 void *saved_data_end
;
638 if (!sk
|| !sk_fullsock(sk
))
641 if (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)
644 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
647 __skb_push(skb
, offset
);
649 /* compute pointers for the bpf prog */
650 bpf_compute_and_save_data_end(skb
, &saved_data_end
);
652 if (type
== BPF_CGROUP_INET_EGRESS
) {
653 ret
= BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
654 cgrp
->bpf
.effective
[type
], skb
, __bpf_prog_run_save_cb
);
656 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], skb
,
657 __bpf_prog_run_save_cb
);
658 ret
= (ret
== 1 ? 0 : -EPERM
);
660 bpf_restore_data_end(skb
, saved_data_end
);
661 __skb_pull(skb
, offset
);
666 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb
);
669 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
670 * @sk: sock structure to manipulate
671 * @type: The type of program to be exectuted
673 * socket is passed is expected to be of type INET or INET6.
675 * The program type passed in via @type must be suitable for sock
676 * filtering. No further check is performed to assert that.
678 * This function will return %-EPERM if any if an attached program was found
679 * and if it returned != 1 during execution. In all other cases, 0 is returned.
681 int __cgroup_bpf_run_filter_sk(struct sock
*sk
,
682 enum bpf_attach_type type
)
684 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
687 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sk
, BPF_PROG_RUN
);
688 return ret
== 1 ? 0 : -EPERM
;
690 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk
);
693 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
694 * provided by user sockaddr
695 * @sk: sock struct that will use sockaddr
696 * @uaddr: sockaddr struct provided by user
697 * @type: The type of program to be exectuted
698 * @t_ctx: Pointer to attach type specific context
700 * socket is expected to be of type INET or INET6.
702 * This function will return %-EPERM if an attached program is found and
703 * returned value != 1 during execution. In all other cases, 0 is returned.
705 int __cgroup_bpf_run_filter_sock_addr(struct sock
*sk
,
706 struct sockaddr
*uaddr
,
707 enum bpf_attach_type type
,
710 struct bpf_sock_addr_kern ctx
= {
715 struct sockaddr_storage unspec
;
719 /* Check socket family since not all sockets represent network
720 * endpoint (e.g. AF_UNIX).
722 if (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)
726 memset(&unspec
, 0, sizeof(unspec
));
727 ctx
.uaddr
= (struct sockaddr
*)&unspec
;
730 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
731 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
, BPF_PROG_RUN
);
733 return ret
== 1 ? 0 : -EPERM
;
735 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr
);
738 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
739 * @sk: socket to get cgroup from
740 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
741 * sk with connection information (IP addresses, etc.) May not contain
742 * cgroup info if it is a req sock.
743 * @type: The type of program to be exectuted
745 * socket passed is expected to be of type INET or INET6.
747 * The program type passed in via @type must be suitable for sock_ops
748 * filtering. No further check is performed to assert that.
750 * This function will return %-EPERM if any if an attached program was found
751 * and if it returned != 1 during execution. In all other cases, 0 is returned.
753 int __cgroup_bpf_run_filter_sock_ops(struct sock
*sk
,
754 struct bpf_sock_ops_kern
*sock_ops
,
755 enum bpf_attach_type type
)
757 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
760 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], sock_ops
,
762 return ret
== 1 ? 0 : -EPERM
;
764 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops
);
766 int __cgroup_bpf_check_dev_permission(short dev_type
, u32 major
, u32 minor
,
767 short access
, enum bpf_attach_type type
)
770 struct bpf_cgroup_dev_ctx ctx
= {
771 .access_type
= (access
<< 16) | dev_type
,
778 cgrp
= task_dfl_cgroup(current
);
779 allow
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
,
785 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission
);
787 static const struct bpf_func_proto
*
788 cgroup_base_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
791 case BPF_FUNC_map_lookup_elem
:
792 return &bpf_map_lookup_elem_proto
;
793 case BPF_FUNC_map_update_elem
:
794 return &bpf_map_update_elem_proto
;
795 case BPF_FUNC_map_delete_elem
:
796 return &bpf_map_delete_elem_proto
;
797 case BPF_FUNC_map_push_elem
:
798 return &bpf_map_push_elem_proto
;
799 case BPF_FUNC_map_pop_elem
:
800 return &bpf_map_pop_elem_proto
;
801 case BPF_FUNC_map_peek_elem
:
802 return &bpf_map_peek_elem_proto
;
803 case BPF_FUNC_get_current_uid_gid
:
804 return &bpf_get_current_uid_gid_proto
;
805 case BPF_FUNC_get_local_storage
:
806 return &bpf_get_local_storage_proto
;
807 case BPF_FUNC_get_current_cgroup_id
:
808 return &bpf_get_current_cgroup_id_proto
;
809 case BPF_FUNC_trace_printk
:
810 if (capable(CAP_SYS_ADMIN
))
811 return bpf_get_trace_printk_proto();
818 static const struct bpf_func_proto
*
819 cgroup_dev_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
821 return cgroup_base_func_proto(func_id
, prog
);
824 static bool cgroup_dev_is_valid_access(int off
, int size
,
825 enum bpf_access_type type
,
826 const struct bpf_prog
*prog
,
827 struct bpf_insn_access_aux
*info
)
829 const int size_default
= sizeof(__u32
);
831 if (type
== BPF_WRITE
)
834 if (off
< 0 || off
+ size
> sizeof(struct bpf_cgroup_dev_ctx
))
836 /* The verifier guarantees that size > 0. */
841 case bpf_ctx_range(struct bpf_cgroup_dev_ctx
, access_type
):
842 bpf_ctx_record_field_size(info
, size_default
);
843 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
847 if (size
!= size_default
)
854 const struct bpf_prog_ops cg_dev_prog_ops
= {
857 const struct bpf_verifier_ops cg_dev_verifier_ops
= {
858 .get_func_proto
= cgroup_dev_func_proto
,
859 .is_valid_access
= cgroup_dev_is_valid_access
,
863 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
865 * @head: sysctl table header
866 * @table: sysctl table
867 * @write: sysctl is being read (= 0) or written (= 1)
868 * @buf: pointer to buffer passed by user space
869 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
870 * result is size of @new_buf if program set new value, initial value
872 * @ppos: value-result argument: value is position at which read from or write
873 * to sysctl is happening, result is new position if program overrode it,
874 * initial value otherwise
875 * @new_buf: pointer to pointer to new buffer that will be allocated if program
876 * overrides new value provided by user space on sysctl write
877 * NOTE: it's caller responsibility to free *new_buf if it was set
878 * @type: type of program to be executed
880 * Program is run when sysctl is being accessed, either read or written, and
881 * can allow or deny such access.
883 * This function will return %-EPERM if an attached program is found and
884 * returned value != 1 during execution. In all other cases 0 is returned.
886 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header
*head
,
887 struct ctl_table
*table
, int write
,
888 void __user
*buf
, size_t *pcount
,
889 loff_t
*ppos
, void **new_buf
,
890 enum bpf_attach_type type
)
892 struct bpf_sysctl_kern ctx
= {
898 .cur_len
= PAGE_SIZE
,
906 ctx
.cur_val
= kmalloc_track_caller(ctx
.cur_len
, GFP_KERNEL
);
913 if (table
->proc_handler(table
, 0, (void __user
*)ctx
.cur_val
,
914 &ctx
.cur_len
, &pos
)) {
915 /* Let BPF program decide how to proceed. */
920 /* Let BPF program decide how to proceed. */
924 if (write
&& buf
&& *pcount
) {
925 /* BPF program should be able to override new value with a
926 * buffer bigger than provided by user.
928 ctx
.new_val
= kmalloc_track_caller(PAGE_SIZE
, GFP_KERNEL
);
929 ctx
.new_len
= min_t(size_t, PAGE_SIZE
, *pcount
);
931 copy_from_user(ctx
.new_val
, buf
, ctx
.new_len
))
932 /* Let BPF program decide how to proceed. */
937 cgrp
= task_dfl_cgroup(current
);
938 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[type
], &ctx
, BPF_PROG_RUN
);
943 if (ret
== 1 && ctx
.new_updated
) {
944 *new_buf
= ctx
.new_val
;
945 *pcount
= ctx
.new_len
;
950 return ret
== 1 ? 0 : -EPERM
;
952 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl
);
955 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup
*cgrp
,
956 enum bpf_attach_type attach_type
)
958 struct bpf_prog_array
*prog_array
;
962 prog_array
= rcu_dereference(cgrp
->bpf
.effective
[attach_type
]);
963 empty
= bpf_prog_array_is_empty(prog_array
);
969 static int sockopt_alloc_buf(struct bpf_sockopt_kern
*ctx
, int max_optlen
)
971 if (unlikely(max_optlen
> PAGE_SIZE
) || max_optlen
< 0)
974 ctx
->optval
= kzalloc(max_optlen
, GFP_USER
);
978 ctx
->optval_end
= ctx
->optval
+ max_optlen
;
983 static void sockopt_free_buf(struct bpf_sockopt_kern
*ctx
)
988 int __cgroup_bpf_run_filter_setsockopt(struct sock
*sk
, int *level
,
989 int *optname
, char __user
*optval
,
990 int *optlen
, char **kernel_optval
)
992 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
993 struct bpf_sockopt_kern ctx
= {
1000 /* Opportunistic check to see whether we have any BPF program
1001 * attached to the hook so we don't waste time allocating
1002 * memory and locking the socket.
1004 if (!cgroup_bpf_enabled
||
1005 __cgroup_bpf_prog_array_is_empty(cgrp
, BPF_CGROUP_SETSOCKOPT
))
1008 /* Allocate a bit more than the initial user buffer for
1009 * BPF program. The canonical use case is overriding
1010 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1012 max_optlen
= max_t(int, 16, *optlen
);
1014 ret
= sockopt_alloc_buf(&ctx
, max_optlen
);
1018 ctx
.optlen
= *optlen
;
1020 if (copy_from_user(ctx
.optval
, optval
, *optlen
) != 0) {
1026 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[BPF_CGROUP_SETSOCKOPT
],
1027 &ctx
, BPF_PROG_RUN
);
1035 if (ctx
.optlen
== -1) {
1036 /* optlen set to -1, bypass kernel */
1038 } else if (ctx
.optlen
> max_optlen
|| ctx
.optlen
< -1) {
1039 /* optlen is out of bounds */
1042 /* optlen within bounds, run kernel handler */
1045 /* export any potential modifications */
1047 *optname
= ctx
.optname
;
1048 *optlen
= ctx
.optlen
;
1049 *kernel_optval
= ctx
.optval
;
1054 sockopt_free_buf(&ctx
);
1057 EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt
);
1059 int __cgroup_bpf_run_filter_getsockopt(struct sock
*sk
, int level
,
1060 int optname
, char __user
*optval
,
1061 int __user
*optlen
, int max_optlen
,
1064 struct cgroup
*cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
1065 struct bpf_sockopt_kern ctx
= {
1073 /* Opportunistic check to see whether we have any BPF program
1074 * attached to the hook so we don't waste time allocating
1075 * memory and locking the socket.
1077 if (!cgroup_bpf_enabled
||
1078 __cgroup_bpf_prog_array_is_empty(cgrp
, BPF_CGROUP_GETSOCKOPT
))
1081 ret
= sockopt_alloc_buf(&ctx
, max_optlen
);
1085 ctx
.optlen
= max_optlen
;
1088 /* If kernel getsockopt finished successfully,
1089 * copy whatever was returned to the user back
1090 * into our temporary buffer. Set optlen to the
1091 * one that kernel returned as well to let
1092 * BPF programs inspect the value.
1095 if (get_user(ctx
.optlen
, optlen
)) {
1100 if (ctx
.optlen
> max_optlen
)
1101 ctx
.optlen
= max_optlen
;
1103 if (copy_from_user(ctx
.optval
, optval
, ctx
.optlen
) != 0) {
1110 ret
= BPF_PROG_RUN_ARRAY(cgrp
->bpf
.effective
[BPF_CGROUP_GETSOCKOPT
],
1111 &ctx
, BPF_PROG_RUN
);
1119 if (ctx
.optlen
> max_optlen
) {
1124 /* BPF programs only allowed to set retval to 0, not some
1127 if (ctx
.retval
!= 0 && ctx
.retval
!= retval
) {
1132 if (copy_to_user(optval
, ctx
.optval
, ctx
.optlen
) ||
1133 put_user(ctx
.optlen
, optlen
)) {
1141 sockopt_free_buf(&ctx
);
1144 EXPORT_SYMBOL(__cgroup_bpf_run_filter_getsockopt
);
1147 static ssize_t
sysctl_cpy_dir(const struct ctl_dir
*dir
, char **bufp
,
1150 ssize_t tmp_ret
= 0, ret
;
1152 if (dir
->header
.parent
) {
1153 tmp_ret
= sysctl_cpy_dir(dir
->header
.parent
, bufp
, lenp
);
1158 ret
= strscpy(*bufp
, dir
->header
.ctl_table
[0].procname
, *lenp
);
1165 /* Avoid leading slash. */
1169 tmp_ret
= strscpy(*bufp
, "/", *lenp
);
1175 return ret
+ tmp_ret
;
1178 BPF_CALL_4(bpf_sysctl_get_name
, struct bpf_sysctl_kern
*, ctx
, char *, buf
,
1179 size_t, buf_len
, u64
, flags
)
1181 ssize_t tmp_ret
= 0, ret
;
1186 if (!(flags
& BPF_F_SYSCTL_BASE_NAME
)) {
1189 tmp_ret
= sysctl_cpy_dir(ctx
->head
->parent
, &buf
, &buf_len
);
1194 ret
= strscpy(buf
, ctx
->table
->procname
, buf_len
);
1196 return ret
< 0 ? ret
: tmp_ret
+ ret
;
1199 static const struct bpf_func_proto bpf_sysctl_get_name_proto
= {
1200 .func
= bpf_sysctl_get_name
,
1202 .ret_type
= RET_INTEGER
,
1203 .arg1_type
= ARG_PTR_TO_CTX
,
1204 .arg2_type
= ARG_PTR_TO_MEM
,
1205 .arg3_type
= ARG_CONST_SIZE
,
1206 .arg4_type
= ARG_ANYTHING
,
1209 static int copy_sysctl_value(char *dst
, size_t dst_len
, char *src
,
1218 if (!src
|| !src_len
) {
1219 memset(dst
, 0, dst_len
);
1223 memcpy(dst
, src
, min(dst_len
, src_len
));
1225 if (dst_len
> src_len
) {
1226 memset(dst
+ src_len
, '\0', dst_len
- src_len
);
1230 dst
[dst_len
- 1] = '\0';
1235 BPF_CALL_3(bpf_sysctl_get_current_value
, struct bpf_sysctl_kern
*, ctx
,
1236 char *, buf
, size_t, buf_len
)
1238 return copy_sysctl_value(buf
, buf_len
, ctx
->cur_val
, ctx
->cur_len
);
1241 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto
= {
1242 .func
= bpf_sysctl_get_current_value
,
1244 .ret_type
= RET_INTEGER
,
1245 .arg1_type
= ARG_PTR_TO_CTX
,
1246 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
1247 .arg3_type
= ARG_CONST_SIZE
,
1250 BPF_CALL_3(bpf_sysctl_get_new_value
, struct bpf_sysctl_kern
*, ctx
, char *, buf
,
1255 memset(buf
, '\0', buf_len
);
1258 return copy_sysctl_value(buf
, buf_len
, ctx
->new_val
, ctx
->new_len
);
1261 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto
= {
1262 .func
= bpf_sysctl_get_new_value
,
1264 .ret_type
= RET_INTEGER
,
1265 .arg1_type
= ARG_PTR_TO_CTX
,
1266 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
1267 .arg3_type
= ARG_CONST_SIZE
,
1270 BPF_CALL_3(bpf_sysctl_set_new_value
, struct bpf_sysctl_kern
*, ctx
,
1271 const char *, buf
, size_t, buf_len
)
1273 if (!ctx
->write
|| !ctx
->new_val
|| !ctx
->new_len
|| !buf
|| !buf_len
)
1276 if (buf_len
> PAGE_SIZE
- 1)
1279 memcpy(ctx
->new_val
, buf
, buf_len
);
1280 ctx
->new_len
= buf_len
;
1281 ctx
->new_updated
= 1;
1286 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto
= {
1287 .func
= bpf_sysctl_set_new_value
,
1289 .ret_type
= RET_INTEGER
,
1290 .arg1_type
= ARG_PTR_TO_CTX
,
1291 .arg2_type
= ARG_PTR_TO_MEM
,
1292 .arg3_type
= ARG_CONST_SIZE
,
1295 static const struct bpf_func_proto
*
1296 sysctl_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1299 case BPF_FUNC_strtol
:
1300 return &bpf_strtol_proto
;
1301 case BPF_FUNC_strtoul
:
1302 return &bpf_strtoul_proto
;
1303 case BPF_FUNC_sysctl_get_name
:
1304 return &bpf_sysctl_get_name_proto
;
1305 case BPF_FUNC_sysctl_get_current_value
:
1306 return &bpf_sysctl_get_current_value_proto
;
1307 case BPF_FUNC_sysctl_get_new_value
:
1308 return &bpf_sysctl_get_new_value_proto
;
1309 case BPF_FUNC_sysctl_set_new_value
:
1310 return &bpf_sysctl_set_new_value_proto
;
1312 return cgroup_base_func_proto(func_id
, prog
);
1316 static bool sysctl_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1317 const struct bpf_prog
*prog
,
1318 struct bpf_insn_access_aux
*info
)
1320 const int size_default
= sizeof(__u32
);
1322 if (off
< 0 || off
+ size
> sizeof(struct bpf_sysctl
) || off
% size
)
1326 case bpf_ctx_range(struct bpf_sysctl
, write
):
1327 if (type
!= BPF_READ
)
1329 bpf_ctx_record_field_size(info
, size_default
);
1330 return bpf_ctx_narrow_access_ok(off
, size
, size_default
);
1331 case bpf_ctx_range(struct bpf_sysctl
, file_pos
):
1332 if (type
== BPF_READ
) {
1333 bpf_ctx_record_field_size(info
, size_default
);
1334 return bpf_ctx_narrow_access_ok(off
, size
, size_default
);
1336 return size
== size_default
;
1343 static u32
sysctl_convert_ctx_access(enum bpf_access_type type
,
1344 const struct bpf_insn
*si
,
1345 struct bpf_insn
*insn_buf
,
1346 struct bpf_prog
*prog
, u32
*target_size
)
1348 struct bpf_insn
*insn
= insn_buf
;
1352 case offsetof(struct bpf_sysctl
, write
):
1353 *insn
++ = BPF_LDX_MEM(
1354 BPF_SIZE(si
->code
), si
->dst_reg
, si
->src_reg
,
1355 bpf_target_off(struct bpf_sysctl_kern
, write
,
1356 sizeof_field(struct bpf_sysctl_kern
,
1360 case offsetof(struct bpf_sysctl
, file_pos
):
1361 /* ppos is a pointer so it should be accessed via indirect
1362 * loads and stores. Also for stores additional temporary
1363 * register is used since neither src_reg nor dst_reg can be
1366 if (type
== BPF_WRITE
) {
1367 int treg
= BPF_REG_9
;
1369 if (si
->src_reg
== treg
|| si
->dst_reg
== treg
)
1371 if (si
->src_reg
== treg
|| si
->dst_reg
== treg
)
1373 *insn
++ = BPF_STX_MEM(
1374 BPF_DW
, si
->dst_reg
, treg
,
1375 offsetof(struct bpf_sysctl_kern
, tmp_reg
));
1376 *insn
++ = BPF_LDX_MEM(
1377 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern
, ppos
),
1379 offsetof(struct bpf_sysctl_kern
, ppos
));
1380 *insn
++ = BPF_STX_MEM(
1381 BPF_SIZEOF(u32
), treg
, si
->src_reg
,
1382 bpf_ctx_narrow_access_offset(
1383 0, sizeof(u32
), sizeof(loff_t
)));
1384 *insn
++ = BPF_LDX_MEM(
1385 BPF_DW
, treg
, si
->dst_reg
,
1386 offsetof(struct bpf_sysctl_kern
, tmp_reg
));
1388 *insn
++ = BPF_LDX_MEM(
1389 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern
, ppos
),
1390 si
->dst_reg
, si
->src_reg
,
1391 offsetof(struct bpf_sysctl_kern
, ppos
));
1392 read_size
= bpf_size_to_bytes(BPF_SIZE(si
->code
));
1393 *insn
++ = BPF_LDX_MEM(
1394 BPF_SIZE(si
->code
), si
->dst_reg
, si
->dst_reg
,
1395 bpf_ctx_narrow_access_offset(
1396 0, read_size
, sizeof(loff_t
)));
1398 *target_size
= sizeof(u32
);
1402 return insn
- insn_buf
;
1405 const struct bpf_verifier_ops cg_sysctl_verifier_ops
= {
1406 .get_func_proto
= sysctl_func_proto
,
1407 .is_valid_access
= sysctl_is_valid_access
,
1408 .convert_ctx_access
= sysctl_convert_ctx_access
,
1411 const struct bpf_prog_ops cg_sysctl_prog_ops
= {
1414 static const struct bpf_func_proto
*
1415 cg_sockopt_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1419 case BPF_FUNC_sk_storage_get
:
1420 return &bpf_sk_storage_get_proto
;
1421 case BPF_FUNC_sk_storage_delete
:
1422 return &bpf_sk_storage_delete_proto
;
1425 case BPF_FUNC_tcp_sock
:
1426 return &bpf_tcp_sock_proto
;
1429 return cgroup_base_func_proto(func_id
, prog
);
1433 static bool cg_sockopt_is_valid_access(int off
, int size
,
1434 enum bpf_access_type type
,
1435 const struct bpf_prog
*prog
,
1436 struct bpf_insn_access_aux
*info
)
1438 const int size_default
= sizeof(__u32
);
1440 if (off
< 0 || off
>= sizeof(struct bpf_sockopt
))
1443 if (off
% size
!= 0)
1446 if (type
== BPF_WRITE
) {
1448 case offsetof(struct bpf_sockopt
, retval
):
1449 if (size
!= size_default
)
1451 return prog
->expected_attach_type
==
1452 BPF_CGROUP_GETSOCKOPT
;
1453 case offsetof(struct bpf_sockopt
, optname
):
1455 case offsetof(struct bpf_sockopt
, level
):
1456 if (size
!= size_default
)
1458 return prog
->expected_attach_type
==
1459 BPF_CGROUP_SETSOCKOPT
;
1460 case offsetof(struct bpf_sockopt
, optlen
):
1461 return size
== size_default
;
1468 case offsetof(struct bpf_sockopt
, sk
):
1469 if (size
!= sizeof(__u64
))
1471 info
->reg_type
= PTR_TO_SOCKET
;
1473 case offsetof(struct bpf_sockopt
, optval
):
1474 if (size
!= sizeof(__u64
))
1476 info
->reg_type
= PTR_TO_PACKET
;
1478 case offsetof(struct bpf_sockopt
, optval_end
):
1479 if (size
!= sizeof(__u64
))
1481 info
->reg_type
= PTR_TO_PACKET_END
;
1483 case offsetof(struct bpf_sockopt
, retval
):
1484 if (size
!= size_default
)
1486 return prog
->expected_attach_type
== BPF_CGROUP_GETSOCKOPT
;
1488 if (size
!= size_default
)
1495 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
1496 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
1497 si->dst_reg, si->src_reg, \
1498 offsetof(struct bpf_sockopt_kern, F))
1500 static u32
cg_sockopt_convert_ctx_access(enum bpf_access_type type
,
1501 const struct bpf_insn
*si
,
1502 struct bpf_insn
*insn_buf
,
1503 struct bpf_prog
*prog
,
1506 struct bpf_insn
*insn
= insn_buf
;
1509 case offsetof(struct bpf_sockopt
, sk
):
1510 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, sk
);
1512 case offsetof(struct bpf_sockopt
, level
):
1513 if (type
== BPF_WRITE
)
1514 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM
, level
);
1516 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, level
);
1518 case offsetof(struct bpf_sockopt
, optname
):
1519 if (type
== BPF_WRITE
)
1520 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM
, optname
);
1522 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, optname
);
1524 case offsetof(struct bpf_sockopt
, optlen
):
1525 if (type
== BPF_WRITE
)
1526 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM
, optlen
);
1528 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, optlen
);
1530 case offsetof(struct bpf_sockopt
, retval
):
1531 if (type
== BPF_WRITE
)
1532 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM
, retval
);
1534 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, retval
);
1536 case offsetof(struct bpf_sockopt
, optval
):
1537 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, optval
);
1539 case offsetof(struct bpf_sockopt
, optval_end
):
1540 *insn
++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM
, optval_end
);
1544 return insn
- insn_buf
;
1547 static int cg_sockopt_get_prologue(struct bpf_insn
*insn_buf
,
1549 const struct bpf_prog
*prog
)
1551 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
1556 const struct bpf_verifier_ops cg_sockopt_verifier_ops
= {
1557 .get_func_proto
= cg_sockopt_func_proto
,
1558 .is_valid_access
= cg_sockopt_is_valid_access
,
1559 .convert_ctx_access
= cg_sockopt_convert_ctx_access
,
1560 .gen_prologue
= cg_sockopt_get_prologue
,
1563 const struct bpf_prog_ops cg_sockopt_prog_ops
= {