1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/percpu-refcount.h>
10 #include <linux/rbtree.h>
11 #include <uapi/linux/bpf.h>
19 struct bpf_sock_ops_kern
;
20 struct bpf_cgroup_storage
;
22 struct ctl_table_header
;
24 #ifdef CONFIG_CGROUP_BPF
26 extern struct static_key_false cgroup_bpf_enabled_key
;
27 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
29 DECLARE_PER_CPU(struct bpf_cgroup_storage
*,
30 bpf_cgroup_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
]);
32 #define for_each_cgroup_storage_type(stype) \
33 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
35 struct bpf_cgroup_storage_map
;
37 struct bpf_storage_buffer
{
42 struct bpf_cgroup_storage
{
44 struct bpf_storage_buffer
*buf
;
45 void __percpu
*percpu_buf
;
47 struct bpf_cgroup_storage_map
*map
;
48 struct bpf_cgroup_storage_key key
;
49 struct list_head list
;
54 struct bpf_prog_list
{
55 struct list_head node
;
56 struct bpf_prog
*prog
;
57 struct bpf_cgroup_storage
*storage
[MAX_BPF_CGROUP_STORAGE_TYPE
];
60 struct bpf_prog_array
;
63 /* array of effective progs in this cgroup */
64 struct bpf_prog_array __rcu
*effective
[MAX_BPF_ATTACH_TYPE
];
66 /* attached progs to this cgroup and attach flags
67 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
68 * have either zero or one element
69 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
71 struct list_head progs
[MAX_BPF_ATTACH_TYPE
];
72 u32 flags
[MAX_BPF_ATTACH_TYPE
];
74 /* temp storage for effective prog array used by prog_attach/detach */
75 struct bpf_prog_array
*inactive
;
77 /* reference counter used to detach bpf programs after cgroup removal */
78 struct percpu_ref refcnt
;
80 /* cgroup_bpf is released using a work queue */
81 struct work_struct release_work
;
84 int cgroup_bpf_inherit(struct cgroup
*cgrp
);
85 void cgroup_bpf_offline(struct cgroup
*cgrp
);
87 int __cgroup_bpf_attach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
88 struct bpf_prog
*replace_prog
,
89 enum bpf_attach_type type
, u32 flags
);
90 int __cgroup_bpf_detach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
91 enum bpf_attach_type type
);
92 int __cgroup_bpf_query(struct cgroup
*cgrp
, const union bpf_attr
*attr
,
93 union bpf_attr __user
*uattr
);
95 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
96 int cgroup_bpf_attach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
97 struct bpf_prog
*replace_prog
, enum bpf_attach_type type
,
99 int cgroup_bpf_detach(struct cgroup
*cgrp
, struct bpf_prog
*prog
,
100 enum bpf_attach_type type
, u32 flags
);
101 int cgroup_bpf_query(struct cgroup
*cgrp
, const union bpf_attr
*attr
,
102 union bpf_attr __user
*uattr
);
104 int __cgroup_bpf_run_filter_skb(struct sock
*sk
,
106 enum bpf_attach_type type
);
108 int __cgroup_bpf_run_filter_sk(struct sock
*sk
,
109 enum bpf_attach_type type
);
111 int __cgroup_bpf_run_filter_sock_addr(struct sock
*sk
,
112 struct sockaddr
*uaddr
,
113 enum bpf_attach_type type
,
116 int __cgroup_bpf_run_filter_sock_ops(struct sock
*sk
,
117 struct bpf_sock_ops_kern
*sock_ops
,
118 enum bpf_attach_type type
);
120 int __cgroup_bpf_check_dev_permission(short dev_type
, u32 major
, u32 minor
,
121 short access
, enum bpf_attach_type type
);
123 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header
*head
,
124 struct ctl_table
*table
, int write
,
125 void __user
*buf
, size_t *pcount
,
126 loff_t
*ppos
, void **new_buf
,
127 enum bpf_attach_type type
);
129 int __cgroup_bpf_run_filter_setsockopt(struct sock
*sock
, int *level
,
130 int *optname
, char __user
*optval
,
131 int *optlen
, char **kernel_optval
);
132 int __cgroup_bpf_run_filter_getsockopt(struct sock
*sk
, int level
,
133 int optname
, char __user
*optval
,
134 int __user
*optlen
, int max_optlen
,
137 static inline enum bpf_cgroup_storage_type
cgroup_storage_type(
140 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
)
141 return BPF_CGROUP_STORAGE_PERCPU
;
143 return BPF_CGROUP_STORAGE_SHARED
;
146 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
147 *storage
[MAX_BPF_CGROUP_STORAGE_TYPE
])
149 enum bpf_cgroup_storage_type stype
;
151 for_each_cgroup_storage_type(stype
)
152 this_cpu_write(bpf_cgroup_storage
[stype
], storage
[stype
]);
155 struct bpf_cgroup_storage
*bpf_cgroup_storage_alloc(struct bpf_prog
*prog
,
156 enum bpf_cgroup_storage_type stype
);
157 void bpf_cgroup_storage_free(struct bpf_cgroup_storage
*storage
);
158 void bpf_cgroup_storage_link(struct bpf_cgroup_storage
*storage
,
159 struct cgroup
*cgroup
,
160 enum bpf_attach_type type
);
161 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage
*storage
);
162 int bpf_cgroup_storage_assign(struct bpf_prog_aux
*aux
, struct bpf_map
*map
);
163 void bpf_cgroup_storage_release(struct bpf_prog_aux
*aux
, struct bpf_map
*map
);
165 int bpf_percpu_cgroup_storage_copy(struct bpf_map
*map
, void *key
, void *value
);
166 int bpf_percpu_cgroup_storage_update(struct bpf_map
*map
, void *key
,
167 void *value
, u64 flags
);
169 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
170 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
173 if (cgroup_bpf_enabled) \
174 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
175 BPF_CGROUP_INET_INGRESS); \
180 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
183 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
184 typeof(sk) __sk = sk_to_full_sk(sk); \
185 if (sk_fullsock(__sk)) \
186 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
187 BPF_CGROUP_INET_EGRESS); \
192 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
195 if (cgroup_bpf_enabled) { \
196 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
201 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
202 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
204 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
205 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
207 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
208 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
210 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
213 if (cgroup_bpf_enabled) \
214 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
219 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
222 if (cgroup_bpf_enabled) { \
224 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
231 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
232 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
234 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
235 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
237 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
238 sk->sk_prot->pre_connect)
240 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
241 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
243 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
244 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
246 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
247 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
249 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
250 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
252 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
253 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
255 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
256 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
258 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
259 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
261 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
262 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
264 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
267 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
268 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
269 if (__sk && sk_fullsock(__sk)) \
270 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
272 BPF_CGROUP_SOCK_OPS); \
277 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
280 if (cgroup_bpf_enabled) \
281 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
283 BPF_CGROUP_DEVICE); \
289 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \
292 if (cgroup_bpf_enabled) \
293 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
294 buf, count, pos, nbuf, \
295 BPF_CGROUP_SYSCTL); \
299 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
303 if (cgroup_bpf_enabled) \
304 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
311 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
314 if (cgroup_bpf_enabled) \
315 get_user(__ret, optlen); \
319 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
320 max_optlen, retval) \
322 int __ret = retval; \
323 if (cgroup_bpf_enabled) \
324 __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
326 optlen, max_optlen, \
331 int cgroup_bpf_prog_attach(const union bpf_attr
*attr
,
332 enum bpf_prog_type ptype
, struct bpf_prog
*prog
);
333 int cgroup_bpf_prog_detach(const union bpf_attr
*attr
,
334 enum bpf_prog_type ptype
);
335 int cgroup_bpf_prog_query(const union bpf_attr
*attr
,
336 union bpf_attr __user
*uattr
);
340 struct cgroup_bpf
{};
341 static inline int cgroup_bpf_inherit(struct cgroup
*cgrp
) { return 0; }
342 static inline void cgroup_bpf_offline(struct cgroup
*cgrp
) {}
344 static inline int cgroup_bpf_prog_attach(const union bpf_attr
*attr
,
345 enum bpf_prog_type ptype
,
346 struct bpf_prog
*prog
)
351 static inline int cgroup_bpf_prog_detach(const union bpf_attr
*attr
,
352 enum bpf_prog_type ptype
)
357 static inline int cgroup_bpf_prog_query(const union bpf_attr
*attr
,
358 union bpf_attr __user
*uattr
)
363 static inline void bpf_cgroup_storage_set(
364 struct bpf_cgroup_storage
*storage
[MAX_BPF_CGROUP_STORAGE_TYPE
]) {}
365 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux
*aux
,
366 struct bpf_map
*map
) { return 0; }
367 static inline void bpf_cgroup_storage_release(struct bpf_prog_aux
*aux
,
368 struct bpf_map
*map
) {}
369 static inline struct bpf_cgroup_storage
*bpf_cgroup_storage_alloc(
370 struct bpf_prog
*prog
, enum bpf_cgroup_storage_type stype
) { return NULL
; }
371 static inline void bpf_cgroup_storage_free(
372 struct bpf_cgroup_storage
*storage
) {}
373 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map
*map
, void *key
,
377 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map
*map
,
378 void *key
, void *value
, u64 flags
) {
382 #define cgroup_bpf_enabled (0)
383 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
384 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
385 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
386 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
387 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
388 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
389 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
390 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
391 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
392 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
393 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
394 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
395 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
396 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
397 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
398 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
399 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
400 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
401 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
402 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
403 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
404 optlen, max_optlen, retval) ({ retval; })
405 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
406 kernel_optval) ({ 0; })
408 #define for_each_cgroup_storage_type(stype) for (; false; )
410 #endif /* CONFIG_CGROUP_BPF */
412 #endif /* _BPF_CGROUP_H */