1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * common eBPF ELF operations.
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation;
13 * version 2.1 of the License (not later!)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this program; if not, see <http://www.gnu.org/licenses>
28 #include <asm/unistd.h>
30 #include <linux/bpf.h>
31 #include <linux/filter.h>
32 #include <linux/kernel.h>
34 #include <sys/resource.h>
37 #include "libbpf_internal.h"
40 * When building perf, unistd.h is overridden. __NR_bpf is
41 * required to be defined explicitly.
44 # if defined(__i386__)
46 # elif defined(__x86_64__)
48 # elif defined(__aarch64__)
50 # elif defined(__sparc__)
52 # elif defined(__s390__)
54 # elif defined(__arc__)
56 # elif defined(__mips__) && defined(_ABIO32)
57 # define __NR_bpf 4355
58 # elif defined(__mips__) && defined(_ABIN32)
59 # define __NR_bpf 6319
60 # elif defined(__mips__) && defined(_ABI64)
61 # define __NR_bpf 5315
63 # error __NR_bpf not defined. libbpf does not support your arch.
67 static inline __u64
ptr_to_u64(const void *ptr
)
69 return (__u64
) (unsigned long) ptr
;
72 static inline int sys_bpf(enum bpf_cmd cmd
, union bpf_attr
*attr
,
75 return syscall(__NR_bpf
, cmd
, attr
, size
);
78 static inline int sys_bpf_fd(enum bpf_cmd cmd
, union bpf_attr
*attr
,
83 fd
= sys_bpf(cmd
, attr
, size
);
84 return ensure_good_fd(fd
);
87 int sys_bpf_prog_load(union bpf_attr
*attr
, unsigned int size
, int attempts
)
92 fd
= sys_bpf_fd(BPF_PROG_LOAD
, attr
, size
);
93 } while (fd
< 0 && errno
== EAGAIN
&& --attempts
> 0);
98 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
99 * memcg-based memory accounting for BPF maps and progs. This was done in [0].
100 * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in
101 * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF.
103 * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
104 * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
106 int probe_memcg_account(int token_fd
)
108 const size_t attr_sz
= offsetofend(union bpf_attr
, prog_token_fd
);
109 struct bpf_insn insns
[] = {
110 BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns
),
113 size_t insn_cnt
= ARRAY_SIZE(insns
);
117 /* attempt loading freplace trying to use custom BTF */
118 memset(&attr
, 0, attr_sz
);
119 attr
.prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
;
120 attr
.insns
= ptr_to_u64(insns
);
121 attr
.insn_cnt
= insn_cnt
;
122 attr
.license
= ptr_to_u64("GPL");
123 attr
.prog_token_fd
= token_fd
;
125 attr
.prog_flags
|= BPF_F_TOKEN_FD
;
127 prog_fd
= sys_bpf_fd(BPF_PROG_LOAD
, &attr
, attr_sz
);
135 static bool memlock_bumped
;
136 static rlim_t memlock_rlim
= RLIM_INFINITY
;
138 int libbpf_set_memlock_rlim(size_t memlock_bytes
)
141 return libbpf_err(-EBUSY
);
143 memlock_rlim
= memlock_bytes
;
147 int bump_rlimit_memlock(void)
151 /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
152 if (memlock_bumped
|| feat_supported(NULL
, FEAT_MEMCG_ACCOUNT
))
155 memlock_bumped
= true;
157 /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
158 if (memlock_rlim
== 0)
161 rlim
.rlim_cur
= rlim
.rlim_max
= memlock_rlim
;
162 if (setrlimit(RLIMIT_MEMLOCK
, &rlim
))
168 int bpf_map_create(enum bpf_map_type map_type
,
169 const char *map_name
,
173 const struct bpf_map_create_opts
*opts
)
175 const size_t attr_sz
= offsetofend(union bpf_attr
, map_token_fd
);
179 bump_rlimit_memlock();
181 memset(&attr
, 0, attr_sz
);
183 if (!OPTS_VALID(opts
, bpf_map_create_opts
))
184 return libbpf_err(-EINVAL
);
186 attr
.map_type
= map_type
;
187 if (map_name
&& feat_supported(NULL
, FEAT_PROG_NAME
))
188 libbpf_strlcpy(attr
.map_name
, map_name
, sizeof(attr
.map_name
));
189 attr
.key_size
= key_size
;
190 attr
.value_size
= value_size
;
191 attr
.max_entries
= max_entries
;
193 attr
.btf_fd
= OPTS_GET(opts
, btf_fd
, 0);
194 attr
.btf_key_type_id
= OPTS_GET(opts
, btf_key_type_id
, 0);
195 attr
.btf_value_type_id
= OPTS_GET(opts
, btf_value_type_id
, 0);
196 attr
.btf_vmlinux_value_type_id
= OPTS_GET(opts
, btf_vmlinux_value_type_id
, 0);
197 attr
.value_type_btf_obj_fd
= OPTS_GET(opts
, value_type_btf_obj_fd
, 0);
199 attr
.inner_map_fd
= OPTS_GET(opts
, inner_map_fd
, 0);
200 attr
.map_flags
= OPTS_GET(opts
, map_flags
, 0);
201 attr
.map_extra
= OPTS_GET(opts
, map_extra
, 0);
202 attr
.numa_node
= OPTS_GET(opts
, numa_node
, 0);
203 attr
.map_ifindex
= OPTS_GET(opts
, map_ifindex
, 0);
205 attr
.map_token_fd
= OPTS_GET(opts
, token_fd
, 0);
207 fd
= sys_bpf_fd(BPF_MAP_CREATE
, &attr
, attr_sz
);
208 return libbpf_err_errno(fd
);
212 alloc_zero_tailing_info(const void *orecord
, __u32 cnt
,
213 __u32 actual_rec_size
, __u32 expected_rec_size
)
215 __u64 info_len
= (__u64
)actual_rec_size
* cnt
;
216 void *info
, *nrecord
;
219 info
= malloc(info_len
);
223 /* zero out bytes kernel does not understand */
225 for (i
= 0; i
< cnt
; i
++) {
226 memcpy(nrecord
, orecord
, expected_rec_size
);
227 memset(nrecord
+ expected_rec_size
, 0,
228 actual_rec_size
- expected_rec_size
);
229 orecord
+= actual_rec_size
;
230 nrecord
+= actual_rec_size
;
236 int bpf_prog_load(enum bpf_prog_type prog_type
,
237 const char *prog_name
, const char *license
,
238 const struct bpf_insn
*insns
, size_t insn_cnt
,
239 struct bpf_prog_load_opts
*opts
)
241 const size_t attr_sz
= offsetofend(union bpf_attr
, prog_token_fd
);
242 void *finfo
= NULL
, *linfo
= NULL
;
243 const char *func_info
, *line_info
;
244 __u32 log_size
, log_level
, attach_prog_fd
, attach_btf_obj_fd
;
245 __u32 func_info_rec_size
, line_info_rec_size
;
250 bump_rlimit_memlock();
252 if (!OPTS_VALID(opts
, bpf_prog_load_opts
))
253 return libbpf_err(-EINVAL
);
255 attempts
= OPTS_GET(opts
, attempts
, 0);
257 return libbpf_err(-EINVAL
);
259 attempts
= PROG_LOAD_ATTEMPTS
;
261 memset(&attr
, 0, attr_sz
);
263 attr
.prog_type
= prog_type
;
264 attr
.expected_attach_type
= OPTS_GET(opts
, expected_attach_type
, 0);
266 attr
.prog_btf_fd
= OPTS_GET(opts
, prog_btf_fd
, 0);
267 attr
.prog_flags
= OPTS_GET(opts
, prog_flags
, 0);
268 attr
.prog_ifindex
= OPTS_GET(opts
, prog_ifindex
, 0);
269 attr
.kern_version
= OPTS_GET(opts
, kern_version
, 0);
270 attr
.prog_token_fd
= OPTS_GET(opts
, token_fd
, 0);
272 if (prog_name
&& feat_supported(NULL
, FEAT_PROG_NAME
))
273 libbpf_strlcpy(attr
.prog_name
, prog_name
, sizeof(attr
.prog_name
));
274 attr
.license
= ptr_to_u64(license
);
276 if (insn_cnt
> UINT_MAX
)
277 return libbpf_err(-E2BIG
);
279 attr
.insns
= ptr_to_u64(insns
);
280 attr
.insn_cnt
= (__u32
)insn_cnt
;
282 attach_prog_fd
= OPTS_GET(opts
, attach_prog_fd
, 0);
283 attach_btf_obj_fd
= OPTS_GET(opts
, attach_btf_obj_fd
, 0);
285 if (attach_prog_fd
&& attach_btf_obj_fd
)
286 return libbpf_err(-EINVAL
);
288 attr
.attach_btf_id
= OPTS_GET(opts
, attach_btf_id
, 0);
290 attr
.attach_prog_fd
= attach_prog_fd
;
292 attr
.attach_btf_obj_fd
= attach_btf_obj_fd
;
294 log_buf
= OPTS_GET(opts
, log_buf
, NULL
);
295 log_size
= OPTS_GET(opts
, log_size
, 0);
296 log_level
= OPTS_GET(opts
, log_level
, 0);
298 if (!!log_buf
!= !!log_size
)
299 return libbpf_err(-EINVAL
);
301 func_info_rec_size
= OPTS_GET(opts
, func_info_rec_size
, 0);
302 func_info
= OPTS_GET(opts
, func_info
, NULL
);
303 attr
.func_info_rec_size
= func_info_rec_size
;
304 attr
.func_info
= ptr_to_u64(func_info
);
305 attr
.func_info_cnt
= OPTS_GET(opts
, func_info_cnt
, 0);
307 line_info_rec_size
= OPTS_GET(opts
, line_info_rec_size
, 0);
308 line_info
= OPTS_GET(opts
, line_info
, NULL
);
309 attr
.line_info_rec_size
= line_info_rec_size
;
310 attr
.line_info
= ptr_to_u64(line_info
);
311 attr
.line_info_cnt
= OPTS_GET(opts
, line_info_cnt
, 0);
313 attr
.fd_array
= ptr_to_u64(OPTS_GET(opts
, fd_array
, NULL
));
316 attr
.log_buf
= ptr_to_u64(log_buf
);
317 attr
.log_size
= log_size
;
318 attr
.log_level
= log_level
;
321 fd
= sys_bpf_prog_load(&attr
, attr_sz
, attempts
);
322 OPTS_SET(opts
, log_true_size
, attr
.log_true_size
);
326 /* After bpf_prog_load, the kernel may modify certain attributes
327 * to give user space a hint how to deal with loading failure.
328 * Check to see whether we can make some changes and load again.
330 while (errno
== E2BIG
&& (!finfo
|| !linfo
)) {
331 if (!finfo
&& attr
.func_info_cnt
&&
332 attr
.func_info_rec_size
< func_info_rec_size
) {
333 /* try with corrected func info records */
334 finfo
= alloc_zero_tailing_info(func_info
,
337 attr
.func_info_rec_size
);
343 attr
.func_info
= ptr_to_u64(finfo
);
344 attr
.func_info_rec_size
= func_info_rec_size
;
345 } else if (!linfo
&& attr
.line_info_cnt
&&
346 attr
.line_info_rec_size
< line_info_rec_size
) {
347 linfo
= alloc_zero_tailing_info(line_info
,
350 attr
.line_info_rec_size
);
356 attr
.line_info
= ptr_to_u64(linfo
);
357 attr
.line_info_rec_size
= line_info_rec_size
;
362 fd
= sys_bpf_prog_load(&attr
, attr_sz
, attempts
);
363 OPTS_SET(opts
, log_true_size
, attr
.log_true_size
);
368 if (log_level
== 0 && log_buf
) {
369 /* log_level == 0 with non-NULL log_buf requires retrying on error
370 * with log_level == 1 and log_buf/log_buf_size set, to get details of
373 attr
.log_buf
= ptr_to_u64(log_buf
);
374 attr
.log_size
= log_size
;
377 fd
= sys_bpf_prog_load(&attr
, attr_sz
, attempts
);
378 OPTS_SET(opts
, log_true_size
, attr
.log_true_size
);
381 /* free() doesn't affect errno, so we don't need to restore it */
384 return libbpf_err_errno(fd
);
387 int bpf_map_update_elem(int fd
, const void *key
, const void *value
,
390 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
394 memset(&attr
, 0, attr_sz
);
396 attr
.key
= ptr_to_u64(key
);
397 attr
.value
= ptr_to_u64(value
);
400 ret
= sys_bpf(BPF_MAP_UPDATE_ELEM
, &attr
, attr_sz
);
401 return libbpf_err_errno(ret
);
404 int bpf_map_lookup_elem(int fd
, const void *key
, void *value
)
406 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
410 memset(&attr
, 0, attr_sz
);
412 attr
.key
= ptr_to_u64(key
);
413 attr
.value
= ptr_to_u64(value
);
415 ret
= sys_bpf(BPF_MAP_LOOKUP_ELEM
, &attr
, attr_sz
);
416 return libbpf_err_errno(ret
);
419 int bpf_map_lookup_elem_flags(int fd
, const void *key
, void *value
, __u64 flags
)
421 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
425 memset(&attr
, 0, attr_sz
);
427 attr
.key
= ptr_to_u64(key
);
428 attr
.value
= ptr_to_u64(value
);
431 ret
= sys_bpf(BPF_MAP_LOOKUP_ELEM
, &attr
, attr_sz
);
432 return libbpf_err_errno(ret
);
435 int bpf_map_lookup_and_delete_elem(int fd
, const void *key
, void *value
)
437 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
441 memset(&attr
, 0, attr_sz
);
443 attr
.key
= ptr_to_u64(key
);
444 attr
.value
= ptr_to_u64(value
);
446 ret
= sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM
, &attr
, attr_sz
);
447 return libbpf_err_errno(ret
);
450 int bpf_map_lookup_and_delete_elem_flags(int fd
, const void *key
, void *value
, __u64 flags
)
452 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
456 memset(&attr
, 0, attr_sz
);
458 attr
.key
= ptr_to_u64(key
);
459 attr
.value
= ptr_to_u64(value
);
462 ret
= sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM
, &attr
, attr_sz
);
463 return libbpf_err_errno(ret
);
466 int bpf_map_delete_elem(int fd
, const void *key
)
468 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
472 memset(&attr
, 0, attr_sz
);
474 attr
.key
= ptr_to_u64(key
);
476 ret
= sys_bpf(BPF_MAP_DELETE_ELEM
, &attr
, attr_sz
);
477 return libbpf_err_errno(ret
);
480 int bpf_map_delete_elem_flags(int fd
, const void *key
, __u64 flags
)
482 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
486 memset(&attr
, 0, attr_sz
);
488 attr
.key
= ptr_to_u64(key
);
491 ret
= sys_bpf(BPF_MAP_DELETE_ELEM
, &attr
, attr_sz
);
492 return libbpf_err_errno(ret
);
495 int bpf_map_get_next_key(int fd
, const void *key
, void *next_key
)
497 const size_t attr_sz
= offsetofend(union bpf_attr
, next_key
);
501 memset(&attr
, 0, attr_sz
);
503 attr
.key
= ptr_to_u64(key
);
504 attr
.next_key
= ptr_to_u64(next_key
);
506 ret
= sys_bpf(BPF_MAP_GET_NEXT_KEY
, &attr
, attr_sz
);
507 return libbpf_err_errno(ret
);
510 int bpf_map_freeze(int fd
)
512 const size_t attr_sz
= offsetofend(union bpf_attr
, map_fd
);
516 memset(&attr
, 0, attr_sz
);
519 ret
= sys_bpf(BPF_MAP_FREEZE
, &attr
, attr_sz
);
520 return libbpf_err_errno(ret
);
523 static int bpf_map_batch_common(int cmd
, int fd
, void *in_batch
,
524 void *out_batch
, void *keys
, void *values
,
526 const struct bpf_map_batch_opts
*opts
)
528 const size_t attr_sz
= offsetofend(union bpf_attr
, batch
);
532 if (!OPTS_VALID(opts
, bpf_map_batch_opts
))
533 return libbpf_err(-EINVAL
);
535 memset(&attr
, 0, attr_sz
);
536 attr
.batch
.map_fd
= fd
;
537 attr
.batch
.in_batch
= ptr_to_u64(in_batch
);
538 attr
.batch
.out_batch
= ptr_to_u64(out_batch
);
539 attr
.batch
.keys
= ptr_to_u64(keys
);
540 attr
.batch
.values
= ptr_to_u64(values
);
541 attr
.batch
.count
= *count
;
542 attr
.batch
.elem_flags
= OPTS_GET(opts
, elem_flags
, 0);
543 attr
.batch
.flags
= OPTS_GET(opts
, flags
, 0);
545 ret
= sys_bpf(cmd
, &attr
, attr_sz
);
546 *count
= attr
.batch
.count
;
548 return libbpf_err_errno(ret
);
551 int bpf_map_delete_batch(int fd
, const void *keys
, __u32
*count
,
552 const struct bpf_map_batch_opts
*opts
)
554 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH
, fd
, NULL
,
555 NULL
, (void *)keys
, NULL
, count
, opts
);
558 int bpf_map_lookup_batch(int fd
, void *in_batch
, void *out_batch
, void *keys
,
559 void *values
, __u32
*count
,
560 const struct bpf_map_batch_opts
*opts
)
562 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH
, fd
, in_batch
,
563 out_batch
, keys
, values
, count
, opts
);
566 int bpf_map_lookup_and_delete_batch(int fd
, void *in_batch
, void *out_batch
,
567 void *keys
, void *values
, __u32
*count
,
568 const struct bpf_map_batch_opts
*opts
)
570 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH
,
571 fd
, in_batch
, out_batch
, keys
, values
,
575 int bpf_map_update_batch(int fd
, const void *keys
, const void *values
, __u32
*count
,
576 const struct bpf_map_batch_opts
*opts
)
578 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH
, fd
, NULL
, NULL
,
579 (void *)keys
, (void *)values
, count
, opts
);
582 int bpf_obj_pin_opts(int fd
, const char *pathname
, const struct bpf_obj_pin_opts
*opts
)
584 const size_t attr_sz
= offsetofend(union bpf_attr
, path_fd
);
588 if (!OPTS_VALID(opts
, bpf_obj_pin_opts
))
589 return libbpf_err(-EINVAL
);
591 memset(&attr
, 0, attr_sz
);
592 attr
.path_fd
= OPTS_GET(opts
, path_fd
, 0);
593 attr
.pathname
= ptr_to_u64((void *)pathname
);
594 attr
.file_flags
= OPTS_GET(opts
, file_flags
, 0);
597 ret
= sys_bpf(BPF_OBJ_PIN
, &attr
, attr_sz
);
598 return libbpf_err_errno(ret
);
601 int bpf_obj_pin(int fd
, const char *pathname
)
603 return bpf_obj_pin_opts(fd
, pathname
, NULL
);
606 int bpf_obj_get(const char *pathname
)
608 return bpf_obj_get_opts(pathname
, NULL
);
611 int bpf_obj_get_opts(const char *pathname
, const struct bpf_obj_get_opts
*opts
)
613 const size_t attr_sz
= offsetofend(union bpf_attr
, path_fd
);
617 if (!OPTS_VALID(opts
, bpf_obj_get_opts
))
618 return libbpf_err(-EINVAL
);
620 memset(&attr
, 0, attr_sz
);
621 attr
.path_fd
= OPTS_GET(opts
, path_fd
, 0);
622 attr
.pathname
= ptr_to_u64((void *)pathname
);
623 attr
.file_flags
= OPTS_GET(opts
, file_flags
, 0);
625 fd
= sys_bpf_fd(BPF_OBJ_GET
, &attr
, attr_sz
);
626 return libbpf_err_errno(fd
);
629 int bpf_prog_attach(int prog_fd
, int target_fd
, enum bpf_attach_type type
,
632 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts
, opts
,
636 return bpf_prog_attach_opts(prog_fd
, target_fd
, type
, &opts
);
639 int bpf_prog_attach_opts(int prog_fd
, int target
, enum bpf_attach_type type
,
640 const struct bpf_prog_attach_opts
*opts
)
642 const size_t attr_sz
= offsetofend(union bpf_attr
, expected_revision
);
643 __u32 relative_id
, flags
;
644 int ret
, relative_fd
;
647 if (!OPTS_VALID(opts
, bpf_prog_attach_opts
))
648 return libbpf_err(-EINVAL
);
650 relative_id
= OPTS_GET(opts
, relative_id
, 0);
651 relative_fd
= OPTS_GET(opts
, relative_fd
, 0);
652 flags
= OPTS_GET(opts
, flags
, 0);
654 /* validate we don't have unexpected combinations of non-zero fields */
655 if (relative_fd
&& relative_id
)
656 return libbpf_err(-EINVAL
);
658 memset(&attr
, 0, attr_sz
);
659 attr
.target_fd
= target
;
660 attr
.attach_bpf_fd
= prog_fd
;
661 attr
.attach_type
= type
;
662 attr
.replace_bpf_fd
= OPTS_GET(opts
, replace_fd
, 0);
663 attr
.expected_revision
= OPTS_GET(opts
, expected_revision
, 0);
666 attr
.attach_flags
= flags
| BPF_F_ID
;
667 attr
.relative_id
= relative_id
;
669 attr
.attach_flags
= flags
;
670 attr
.relative_fd
= relative_fd
;
673 ret
= sys_bpf(BPF_PROG_ATTACH
, &attr
, attr_sz
);
674 return libbpf_err_errno(ret
);
677 int bpf_prog_detach_opts(int prog_fd
, int target
, enum bpf_attach_type type
,
678 const struct bpf_prog_detach_opts
*opts
)
680 const size_t attr_sz
= offsetofend(union bpf_attr
, expected_revision
);
681 __u32 relative_id
, flags
;
682 int ret
, relative_fd
;
685 if (!OPTS_VALID(opts
, bpf_prog_detach_opts
))
686 return libbpf_err(-EINVAL
);
688 relative_id
= OPTS_GET(opts
, relative_id
, 0);
689 relative_fd
= OPTS_GET(opts
, relative_fd
, 0);
690 flags
= OPTS_GET(opts
, flags
, 0);
692 /* validate we don't have unexpected combinations of non-zero fields */
693 if (relative_fd
&& relative_id
)
694 return libbpf_err(-EINVAL
);
696 memset(&attr
, 0, attr_sz
);
697 attr
.target_fd
= target
;
698 attr
.attach_bpf_fd
= prog_fd
;
699 attr
.attach_type
= type
;
700 attr
.expected_revision
= OPTS_GET(opts
, expected_revision
, 0);
703 attr
.attach_flags
= flags
| BPF_F_ID
;
704 attr
.relative_id
= relative_id
;
706 attr
.attach_flags
= flags
;
707 attr
.relative_fd
= relative_fd
;
710 ret
= sys_bpf(BPF_PROG_DETACH
, &attr
, attr_sz
);
711 return libbpf_err_errno(ret
);
714 int bpf_prog_detach(int target_fd
, enum bpf_attach_type type
)
716 return bpf_prog_detach_opts(0, target_fd
, type
, NULL
);
719 int bpf_prog_detach2(int prog_fd
, int target_fd
, enum bpf_attach_type type
)
721 return bpf_prog_detach_opts(prog_fd
, target_fd
, type
, NULL
);
724 int bpf_link_create(int prog_fd
, int target_fd
,
725 enum bpf_attach_type attach_type
,
726 const struct bpf_link_create_opts
*opts
)
728 const size_t attr_sz
= offsetofend(union bpf_attr
, link_create
);
729 __u32 target_btf_id
, iter_info_len
, relative_id
;
730 int fd
, err
, relative_fd
;
733 if (!OPTS_VALID(opts
, bpf_link_create_opts
))
734 return libbpf_err(-EINVAL
);
736 iter_info_len
= OPTS_GET(opts
, iter_info_len
, 0);
737 target_btf_id
= OPTS_GET(opts
, target_btf_id
, 0);
739 /* validate we don't have unexpected combinations of non-zero fields */
740 if (iter_info_len
|| target_btf_id
) {
741 if (iter_info_len
&& target_btf_id
)
742 return libbpf_err(-EINVAL
);
743 if (!OPTS_ZEROED(opts
, target_btf_id
))
744 return libbpf_err(-EINVAL
);
747 memset(&attr
, 0, attr_sz
);
748 attr
.link_create
.prog_fd
= prog_fd
;
749 attr
.link_create
.target_fd
= target_fd
;
750 attr
.link_create
.attach_type
= attach_type
;
751 attr
.link_create
.flags
= OPTS_GET(opts
, flags
, 0);
754 attr
.link_create
.target_btf_id
= target_btf_id
;
758 switch (attach_type
) {
760 attr
.link_create
.iter_info
= ptr_to_u64(OPTS_GET(opts
, iter_info
, (void *)0));
761 attr
.link_create
.iter_info_len
= iter_info_len
;
764 attr
.link_create
.perf_event
.bpf_cookie
= OPTS_GET(opts
, perf_event
.bpf_cookie
, 0);
765 if (!OPTS_ZEROED(opts
, perf_event
))
766 return libbpf_err(-EINVAL
);
768 case BPF_TRACE_KPROBE_MULTI
:
769 case BPF_TRACE_KPROBE_SESSION
:
770 attr
.link_create
.kprobe_multi
.flags
= OPTS_GET(opts
, kprobe_multi
.flags
, 0);
771 attr
.link_create
.kprobe_multi
.cnt
= OPTS_GET(opts
, kprobe_multi
.cnt
, 0);
772 attr
.link_create
.kprobe_multi
.syms
= ptr_to_u64(OPTS_GET(opts
, kprobe_multi
.syms
, 0));
773 attr
.link_create
.kprobe_multi
.addrs
= ptr_to_u64(OPTS_GET(opts
, kprobe_multi
.addrs
, 0));
774 attr
.link_create
.kprobe_multi
.cookies
= ptr_to_u64(OPTS_GET(opts
, kprobe_multi
.cookies
, 0));
775 if (!OPTS_ZEROED(opts
, kprobe_multi
))
776 return libbpf_err(-EINVAL
);
778 case BPF_TRACE_UPROBE_MULTI
:
779 case BPF_TRACE_UPROBE_SESSION
:
780 attr
.link_create
.uprobe_multi
.flags
= OPTS_GET(opts
, uprobe_multi
.flags
, 0);
781 attr
.link_create
.uprobe_multi
.cnt
= OPTS_GET(opts
, uprobe_multi
.cnt
, 0);
782 attr
.link_create
.uprobe_multi
.path
= ptr_to_u64(OPTS_GET(opts
, uprobe_multi
.path
, 0));
783 attr
.link_create
.uprobe_multi
.offsets
= ptr_to_u64(OPTS_GET(opts
, uprobe_multi
.offsets
, 0));
784 attr
.link_create
.uprobe_multi
.ref_ctr_offsets
= ptr_to_u64(OPTS_GET(opts
, uprobe_multi
.ref_ctr_offsets
, 0));
785 attr
.link_create
.uprobe_multi
.cookies
= ptr_to_u64(OPTS_GET(opts
, uprobe_multi
.cookies
, 0));
786 attr
.link_create
.uprobe_multi
.pid
= OPTS_GET(opts
, uprobe_multi
.pid
, 0);
787 if (!OPTS_ZEROED(opts
, uprobe_multi
))
788 return libbpf_err(-EINVAL
);
790 case BPF_TRACE_RAW_TP
:
791 case BPF_TRACE_FENTRY
:
792 case BPF_TRACE_FEXIT
:
793 case BPF_MODIFY_RETURN
:
795 attr
.link_create
.tracing
.cookie
= OPTS_GET(opts
, tracing
.cookie
, 0);
796 if (!OPTS_ZEROED(opts
, tracing
))
797 return libbpf_err(-EINVAL
);
800 attr
.link_create
.netfilter
.pf
= OPTS_GET(opts
, netfilter
.pf
, 0);
801 attr
.link_create
.netfilter
.hooknum
= OPTS_GET(opts
, netfilter
.hooknum
, 0);
802 attr
.link_create
.netfilter
.priority
= OPTS_GET(opts
, netfilter
.priority
, 0);
803 attr
.link_create
.netfilter
.flags
= OPTS_GET(opts
, netfilter
.flags
, 0);
804 if (!OPTS_ZEROED(opts
, netfilter
))
805 return libbpf_err(-EINVAL
);
807 case BPF_TCX_INGRESS
:
809 relative_fd
= OPTS_GET(opts
, tcx
.relative_fd
, 0);
810 relative_id
= OPTS_GET(opts
, tcx
.relative_id
, 0);
811 if (relative_fd
&& relative_id
)
812 return libbpf_err(-EINVAL
);
814 attr
.link_create
.tcx
.relative_id
= relative_id
;
815 attr
.link_create
.flags
|= BPF_F_ID
;
817 attr
.link_create
.tcx
.relative_fd
= relative_fd
;
819 attr
.link_create
.tcx
.expected_revision
= OPTS_GET(opts
, tcx
.expected_revision
, 0);
820 if (!OPTS_ZEROED(opts
, tcx
))
821 return libbpf_err(-EINVAL
);
823 case BPF_NETKIT_PRIMARY
:
824 case BPF_NETKIT_PEER
:
825 relative_fd
= OPTS_GET(opts
, netkit
.relative_fd
, 0);
826 relative_id
= OPTS_GET(opts
, netkit
.relative_id
, 0);
827 if (relative_fd
&& relative_id
)
828 return libbpf_err(-EINVAL
);
830 attr
.link_create
.netkit
.relative_id
= relative_id
;
831 attr
.link_create
.flags
|= BPF_F_ID
;
833 attr
.link_create
.netkit
.relative_fd
= relative_fd
;
835 attr
.link_create
.netkit
.expected_revision
= OPTS_GET(opts
, netkit
.expected_revision
, 0);
836 if (!OPTS_ZEROED(opts
, netkit
))
837 return libbpf_err(-EINVAL
);
840 if (!OPTS_ZEROED(opts
, flags
))
841 return libbpf_err(-EINVAL
);
845 fd
= sys_bpf_fd(BPF_LINK_CREATE
, &attr
, attr_sz
);
848 /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
849 * and other similar programs
853 return libbpf_err(err
);
855 /* if user used features not supported by
856 * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
858 if (attr
.link_create
.target_fd
|| attr
.link_create
.target_btf_id
)
859 return libbpf_err(err
);
860 if (!OPTS_ZEROED(opts
, sz
))
861 return libbpf_err(err
);
863 /* otherwise, for few select kinds of programs that can be
864 * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
865 * a fallback for older kernels
867 switch (attach_type
) {
868 case BPF_TRACE_RAW_TP
:
870 case BPF_TRACE_FENTRY
:
871 case BPF_TRACE_FEXIT
:
872 case BPF_MODIFY_RETURN
:
873 return bpf_raw_tracepoint_open(NULL
, prog_fd
);
875 return libbpf_err(err
);
879 int bpf_link_detach(int link_fd
)
881 const size_t attr_sz
= offsetofend(union bpf_attr
, link_detach
);
885 memset(&attr
, 0, attr_sz
);
886 attr
.link_detach
.link_fd
= link_fd
;
888 ret
= sys_bpf(BPF_LINK_DETACH
, &attr
, attr_sz
);
889 return libbpf_err_errno(ret
);
892 int bpf_link_update(int link_fd
, int new_prog_fd
,
893 const struct bpf_link_update_opts
*opts
)
895 const size_t attr_sz
= offsetofend(union bpf_attr
, link_update
);
899 if (!OPTS_VALID(opts
, bpf_link_update_opts
))
900 return libbpf_err(-EINVAL
);
902 if (OPTS_GET(opts
, old_prog_fd
, 0) && OPTS_GET(opts
, old_map_fd
, 0))
903 return libbpf_err(-EINVAL
);
905 memset(&attr
, 0, attr_sz
);
906 attr
.link_update
.link_fd
= link_fd
;
907 attr
.link_update
.new_prog_fd
= new_prog_fd
;
908 attr
.link_update
.flags
= OPTS_GET(opts
, flags
, 0);
909 if (OPTS_GET(opts
, old_prog_fd
, 0))
910 attr
.link_update
.old_prog_fd
= OPTS_GET(opts
, old_prog_fd
, 0);
911 else if (OPTS_GET(opts
, old_map_fd
, 0))
912 attr
.link_update
.old_map_fd
= OPTS_GET(opts
, old_map_fd
, 0);
914 ret
= sys_bpf(BPF_LINK_UPDATE
, &attr
, attr_sz
);
915 return libbpf_err_errno(ret
);
918 int bpf_iter_create(int link_fd
)
920 const size_t attr_sz
= offsetofend(union bpf_attr
, iter_create
);
924 memset(&attr
, 0, attr_sz
);
925 attr
.iter_create
.link_fd
= link_fd
;
927 fd
= sys_bpf_fd(BPF_ITER_CREATE
, &attr
, attr_sz
);
928 return libbpf_err_errno(fd
);
931 int bpf_prog_query_opts(int target
, enum bpf_attach_type type
,
932 struct bpf_prog_query_opts
*opts
)
934 const size_t attr_sz
= offsetofend(union bpf_attr
, query
);
938 if (!OPTS_VALID(opts
, bpf_prog_query_opts
))
939 return libbpf_err(-EINVAL
);
941 memset(&attr
, 0, attr_sz
);
942 attr
.query
.target_fd
= target
;
943 attr
.query
.attach_type
= type
;
944 attr
.query
.query_flags
= OPTS_GET(opts
, query_flags
, 0);
945 attr
.query
.count
= OPTS_GET(opts
, count
, 0);
946 attr
.query
.prog_ids
= ptr_to_u64(OPTS_GET(opts
, prog_ids
, NULL
));
947 attr
.query
.link_ids
= ptr_to_u64(OPTS_GET(opts
, link_ids
, NULL
));
948 attr
.query
.prog_attach_flags
= ptr_to_u64(OPTS_GET(opts
, prog_attach_flags
, NULL
));
949 attr
.query
.link_attach_flags
= ptr_to_u64(OPTS_GET(opts
, link_attach_flags
, NULL
));
951 ret
= sys_bpf(BPF_PROG_QUERY
, &attr
, attr_sz
);
953 OPTS_SET(opts
, attach_flags
, attr
.query
.attach_flags
);
954 OPTS_SET(opts
, revision
, attr
.query
.revision
);
955 OPTS_SET(opts
, count
, attr
.query
.count
);
957 return libbpf_err_errno(ret
);
960 int bpf_prog_query(int target_fd
, enum bpf_attach_type type
, __u32 query_flags
,
961 __u32
*attach_flags
, __u32
*prog_ids
, __u32
*prog_cnt
)
963 LIBBPF_OPTS(bpf_prog_query_opts
, opts
);
966 opts
.query_flags
= query_flags
;
967 opts
.prog_ids
= prog_ids
;
968 opts
.prog_cnt
= *prog_cnt
;
970 ret
= bpf_prog_query_opts(target_fd
, type
, &opts
);
973 *attach_flags
= opts
.attach_flags
;
974 *prog_cnt
= opts
.prog_cnt
;
976 return libbpf_err_errno(ret
);
979 int bpf_prog_test_run_opts(int prog_fd
, struct bpf_test_run_opts
*opts
)
981 const size_t attr_sz
= offsetofend(union bpf_attr
, test
);
985 if (!OPTS_VALID(opts
, bpf_test_run_opts
))
986 return libbpf_err(-EINVAL
);
988 memset(&attr
, 0, attr_sz
);
989 attr
.test
.prog_fd
= prog_fd
;
990 attr
.test
.batch_size
= OPTS_GET(opts
, batch_size
, 0);
991 attr
.test
.cpu
= OPTS_GET(opts
, cpu
, 0);
992 attr
.test
.flags
= OPTS_GET(opts
, flags
, 0);
993 attr
.test
.repeat
= OPTS_GET(opts
, repeat
, 0);
994 attr
.test
.duration
= OPTS_GET(opts
, duration
, 0);
995 attr
.test
.ctx_size_in
= OPTS_GET(opts
, ctx_size_in
, 0);
996 attr
.test
.ctx_size_out
= OPTS_GET(opts
, ctx_size_out
, 0);
997 attr
.test
.data_size_in
= OPTS_GET(opts
, data_size_in
, 0);
998 attr
.test
.data_size_out
= OPTS_GET(opts
, data_size_out
, 0);
999 attr
.test
.ctx_in
= ptr_to_u64(OPTS_GET(opts
, ctx_in
, NULL
));
1000 attr
.test
.ctx_out
= ptr_to_u64(OPTS_GET(opts
, ctx_out
, NULL
));
1001 attr
.test
.data_in
= ptr_to_u64(OPTS_GET(opts
, data_in
, NULL
));
1002 attr
.test
.data_out
= ptr_to_u64(OPTS_GET(opts
, data_out
, NULL
));
1004 ret
= sys_bpf(BPF_PROG_TEST_RUN
, &attr
, attr_sz
);
1006 OPTS_SET(opts
, data_size_out
, attr
.test
.data_size_out
);
1007 OPTS_SET(opts
, ctx_size_out
, attr
.test
.ctx_size_out
);
1008 OPTS_SET(opts
, duration
, attr
.test
.duration
);
1009 OPTS_SET(opts
, retval
, attr
.test
.retval
);
1011 return libbpf_err_errno(ret
);
1014 static int bpf_obj_get_next_id(__u32 start_id
, __u32
*next_id
, int cmd
)
1016 const size_t attr_sz
= offsetofend(union bpf_attr
, open_flags
);
1017 union bpf_attr attr
;
1020 memset(&attr
, 0, attr_sz
);
1021 attr
.start_id
= start_id
;
1023 err
= sys_bpf(cmd
, &attr
, attr_sz
);
1025 *next_id
= attr
.next_id
;
1027 return libbpf_err_errno(err
);
1030 int bpf_prog_get_next_id(__u32 start_id
, __u32
*next_id
)
1032 return bpf_obj_get_next_id(start_id
, next_id
, BPF_PROG_GET_NEXT_ID
);
1035 int bpf_map_get_next_id(__u32 start_id
, __u32
*next_id
)
1037 return bpf_obj_get_next_id(start_id
, next_id
, BPF_MAP_GET_NEXT_ID
);
1040 int bpf_btf_get_next_id(__u32 start_id
, __u32
*next_id
)
1042 return bpf_obj_get_next_id(start_id
, next_id
, BPF_BTF_GET_NEXT_ID
);
1045 int bpf_link_get_next_id(__u32 start_id
, __u32
*next_id
)
1047 return bpf_obj_get_next_id(start_id
, next_id
, BPF_LINK_GET_NEXT_ID
);
1050 int bpf_prog_get_fd_by_id_opts(__u32 id
,
1051 const struct bpf_get_fd_by_id_opts
*opts
)
1053 const size_t attr_sz
= offsetofend(union bpf_attr
, open_flags
);
1054 union bpf_attr attr
;
1057 if (!OPTS_VALID(opts
, bpf_get_fd_by_id_opts
))
1058 return libbpf_err(-EINVAL
);
1060 memset(&attr
, 0, attr_sz
);
1062 attr
.open_flags
= OPTS_GET(opts
, open_flags
, 0);
1064 fd
= sys_bpf_fd(BPF_PROG_GET_FD_BY_ID
, &attr
, attr_sz
);
1065 return libbpf_err_errno(fd
);
1068 int bpf_prog_get_fd_by_id(__u32 id
)
1070 return bpf_prog_get_fd_by_id_opts(id
, NULL
);
1073 int bpf_map_get_fd_by_id_opts(__u32 id
,
1074 const struct bpf_get_fd_by_id_opts
*opts
)
1076 const size_t attr_sz
= offsetofend(union bpf_attr
, open_flags
);
1077 union bpf_attr attr
;
1080 if (!OPTS_VALID(opts
, bpf_get_fd_by_id_opts
))
1081 return libbpf_err(-EINVAL
);
1083 memset(&attr
, 0, attr_sz
);
1085 attr
.open_flags
= OPTS_GET(opts
, open_flags
, 0);
1087 fd
= sys_bpf_fd(BPF_MAP_GET_FD_BY_ID
, &attr
, attr_sz
);
1088 return libbpf_err_errno(fd
);
1091 int bpf_map_get_fd_by_id(__u32 id
)
1093 return bpf_map_get_fd_by_id_opts(id
, NULL
);
1096 int bpf_btf_get_fd_by_id_opts(__u32 id
,
1097 const struct bpf_get_fd_by_id_opts
*opts
)
1099 const size_t attr_sz
= offsetofend(union bpf_attr
, open_flags
);
1100 union bpf_attr attr
;
1103 if (!OPTS_VALID(opts
, bpf_get_fd_by_id_opts
))
1104 return libbpf_err(-EINVAL
);
1106 memset(&attr
, 0, attr_sz
);
1108 attr
.open_flags
= OPTS_GET(opts
, open_flags
, 0);
1110 fd
= sys_bpf_fd(BPF_BTF_GET_FD_BY_ID
, &attr
, attr_sz
);
1111 return libbpf_err_errno(fd
);
1114 int bpf_btf_get_fd_by_id(__u32 id
)
1116 return bpf_btf_get_fd_by_id_opts(id
, NULL
);
1119 int bpf_link_get_fd_by_id_opts(__u32 id
,
1120 const struct bpf_get_fd_by_id_opts
*opts
)
1122 const size_t attr_sz
= offsetofend(union bpf_attr
, open_flags
);
1123 union bpf_attr attr
;
1126 if (!OPTS_VALID(opts
, bpf_get_fd_by_id_opts
))
1127 return libbpf_err(-EINVAL
);
1129 memset(&attr
, 0, attr_sz
);
1131 attr
.open_flags
= OPTS_GET(opts
, open_flags
, 0);
1133 fd
= sys_bpf_fd(BPF_LINK_GET_FD_BY_ID
, &attr
, attr_sz
);
1134 return libbpf_err_errno(fd
);
1137 int bpf_link_get_fd_by_id(__u32 id
)
1139 return bpf_link_get_fd_by_id_opts(id
, NULL
);
1142 int bpf_obj_get_info_by_fd(int bpf_fd
, void *info
, __u32
*info_len
)
1144 const size_t attr_sz
= offsetofend(union bpf_attr
, info
);
1145 union bpf_attr attr
;
1148 memset(&attr
, 0, attr_sz
);
1149 attr
.info
.bpf_fd
= bpf_fd
;
1150 attr
.info
.info_len
= *info_len
;
1151 attr
.info
.info
= ptr_to_u64(info
);
1153 err
= sys_bpf(BPF_OBJ_GET_INFO_BY_FD
, &attr
, attr_sz
);
1155 *info_len
= attr
.info
.info_len
;
1156 return libbpf_err_errno(err
);
1159 int bpf_prog_get_info_by_fd(int prog_fd
, struct bpf_prog_info
*info
, __u32
*info_len
)
1161 return bpf_obj_get_info_by_fd(prog_fd
, info
, info_len
);
1164 int bpf_map_get_info_by_fd(int map_fd
, struct bpf_map_info
*info
, __u32
*info_len
)
1166 return bpf_obj_get_info_by_fd(map_fd
, info
, info_len
);
1169 int bpf_btf_get_info_by_fd(int btf_fd
, struct bpf_btf_info
*info
, __u32
*info_len
)
1171 return bpf_obj_get_info_by_fd(btf_fd
, info
, info_len
);
1174 int bpf_link_get_info_by_fd(int link_fd
, struct bpf_link_info
*info
, __u32
*info_len
)
1176 return bpf_obj_get_info_by_fd(link_fd
, info
, info_len
);
1179 int bpf_raw_tracepoint_open_opts(int prog_fd
, struct bpf_raw_tp_opts
*opts
)
1181 const size_t attr_sz
= offsetofend(union bpf_attr
, raw_tracepoint
);
1182 union bpf_attr attr
;
1185 if (!OPTS_VALID(opts
, bpf_raw_tp_opts
))
1186 return libbpf_err(-EINVAL
);
1188 memset(&attr
, 0, attr_sz
);
1189 attr
.raw_tracepoint
.prog_fd
= prog_fd
;
1190 attr
.raw_tracepoint
.name
= ptr_to_u64(OPTS_GET(opts
, tp_name
, NULL
));
1191 attr
.raw_tracepoint
.cookie
= OPTS_GET(opts
, cookie
, 0);
1193 fd
= sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN
, &attr
, attr_sz
);
1194 return libbpf_err_errno(fd
);
1197 int bpf_raw_tracepoint_open(const char *name
, int prog_fd
)
1199 LIBBPF_OPTS(bpf_raw_tp_opts
, opts
, .tp_name
= name
);
1201 return bpf_raw_tracepoint_open_opts(prog_fd
, &opts
);
1204 int bpf_btf_load(const void *btf_data
, size_t btf_size
, struct bpf_btf_load_opts
*opts
)
1206 const size_t attr_sz
= offsetofend(union bpf_attr
, btf_token_fd
);
1207 union bpf_attr attr
;
1213 bump_rlimit_memlock();
1215 memset(&attr
, 0, attr_sz
);
1217 if (!OPTS_VALID(opts
, bpf_btf_load_opts
))
1218 return libbpf_err(-EINVAL
);
1220 log_buf
= OPTS_GET(opts
, log_buf
, NULL
);
1221 log_size
= OPTS_GET(opts
, log_size
, 0);
1222 log_level
= OPTS_GET(opts
, log_level
, 0);
1224 if (log_size
> UINT_MAX
)
1225 return libbpf_err(-EINVAL
);
1226 if (log_size
&& !log_buf
)
1227 return libbpf_err(-EINVAL
);
1229 attr
.btf
= ptr_to_u64(btf_data
);
1230 attr
.btf_size
= btf_size
;
1232 attr
.btf_flags
= OPTS_GET(opts
, btf_flags
, 0);
1233 attr
.btf_token_fd
= OPTS_GET(opts
, token_fd
, 0);
1235 /* log_level == 0 and log_buf != NULL means "try loading without
1236 * log_buf, but retry with log_buf and log_level=1 on error", which is
1237 * consistent across low-level and high-level BTF and program loading
1238 * APIs within libbpf and provides a sensible behavior in practice
1241 attr
.btf_log_buf
= ptr_to_u64(log_buf
);
1242 attr
.btf_log_size
= (__u32
)log_size
;
1243 attr
.btf_log_level
= log_level
;
1246 fd
= sys_bpf_fd(BPF_BTF_LOAD
, &attr
, attr_sz
);
1247 if (fd
< 0 && log_buf
&& log_level
== 0) {
1248 attr
.btf_log_buf
= ptr_to_u64(log_buf
);
1249 attr
.btf_log_size
= (__u32
)log_size
;
1250 attr
.btf_log_level
= 1;
1251 fd
= sys_bpf_fd(BPF_BTF_LOAD
, &attr
, attr_sz
);
1254 OPTS_SET(opts
, log_true_size
, attr
.btf_log_true_size
);
1255 return libbpf_err_errno(fd
);
1258 int bpf_task_fd_query(int pid
, int fd
, __u32 flags
, char *buf
, __u32
*buf_len
,
1259 __u32
*prog_id
, __u32
*fd_type
, __u64
*probe_offset
,
1262 const size_t attr_sz
= offsetofend(union bpf_attr
, task_fd_query
);
1263 union bpf_attr attr
;
1266 memset(&attr
, 0, attr_sz
);
1267 attr
.task_fd_query
.pid
= pid
;
1268 attr
.task_fd_query
.fd
= fd
;
1269 attr
.task_fd_query
.flags
= flags
;
1270 attr
.task_fd_query
.buf
= ptr_to_u64(buf
);
1271 attr
.task_fd_query
.buf_len
= *buf_len
;
1273 err
= sys_bpf(BPF_TASK_FD_QUERY
, &attr
, attr_sz
);
1275 *buf_len
= attr
.task_fd_query
.buf_len
;
1276 *prog_id
= attr
.task_fd_query
.prog_id
;
1277 *fd_type
= attr
.task_fd_query
.fd_type
;
1278 *probe_offset
= attr
.task_fd_query
.probe_offset
;
1279 *probe_addr
= attr
.task_fd_query
.probe_addr
;
1281 return libbpf_err_errno(err
);
1284 int bpf_enable_stats(enum bpf_stats_type type
)
1286 const size_t attr_sz
= offsetofend(union bpf_attr
, enable_stats
);
1287 union bpf_attr attr
;
1290 memset(&attr
, 0, attr_sz
);
1291 attr
.enable_stats
.type
= type
;
1293 fd
= sys_bpf_fd(BPF_ENABLE_STATS
, &attr
, attr_sz
);
1294 return libbpf_err_errno(fd
);
1297 int bpf_prog_bind_map(int prog_fd
, int map_fd
,
1298 const struct bpf_prog_bind_opts
*opts
)
1300 const size_t attr_sz
= offsetofend(union bpf_attr
, prog_bind_map
);
1301 union bpf_attr attr
;
1304 if (!OPTS_VALID(opts
, bpf_prog_bind_opts
))
1305 return libbpf_err(-EINVAL
);
1307 memset(&attr
, 0, attr_sz
);
1308 attr
.prog_bind_map
.prog_fd
= prog_fd
;
1309 attr
.prog_bind_map
.map_fd
= map_fd
;
1310 attr
.prog_bind_map
.flags
= OPTS_GET(opts
, flags
, 0);
1312 ret
= sys_bpf(BPF_PROG_BIND_MAP
, &attr
, attr_sz
);
1313 return libbpf_err_errno(ret
);
1316 int bpf_token_create(int bpffs_fd
, struct bpf_token_create_opts
*opts
)
1318 const size_t attr_sz
= offsetofend(union bpf_attr
, token_create
);
1319 union bpf_attr attr
;
1322 if (!OPTS_VALID(opts
, bpf_token_create_opts
))
1323 return libbpf_err(-EINVAL
);
1325 memset(&attr
, 0, attr_sz
);
1326 attr
.token_create
.bpffs_fd
= bpffs_fd
;
1327 attr
.token_create
.flags
= OPTS_GET(opts
, flags
, 0);
1329 fd
= sys_bpf_fd(BPF_TOKEN_CREATE
, &attr
, attr_sz
);
1330 return libbpf_err_errno(fd
);