1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * common eBPF ELF operations.
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation;
13 * version 2.1 of the License (not later!)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this program; if not, see <http://www.gnu.org/licenses>
28 #include <asm/unistd.h>
30 #include <linux/bpf.h>
33 #include "libbpf_internal.h"
35 /* make sure libbpf doesn't use kernel-only integer typedefs */
36 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
39 * When building perf, unistd.h is overridden. __NR_bpf is
40 * required to be defined explicitly.
43 # if defined(__i386__)
45 # elif defined(__x86_64__)
47 # elif defined(__aarch64__)
49 # elif defined(__sparc__)
51 # elif defined(__s390__)
53 # elif defined(__arc__)
56 # error __NR_bpf not defined. libbpf does not support your arch.
60 static inline __u64
ptr_to_u64(const void *ptr
)
62 return (__u64
) (unsigned long) ptr
;
65 static inline int sys_bpf(enum bpf_cmd cmd
, union bpf_attr
*attr
,
68 return syscall(__NR_bpf
, cmd
, attr
, size
);
71 static inline int sys_bpf_prog_load(union bpf_attr
*attr
, unsigned int size
)
76 fd
= sys_bpf(BPF_PROG_LOAD
, attr
, size
);
77 } while (fd
< 0 && errno
== EAGAIN
);
82 int bpf_create_map_xattr(const struct bpf_create_map_attr
*create_attr
)
86 memset(&attr
, '\0', sizeof(attr
));
88 attr
.map_type
= create_attr
->map_type
;
89 attr
.key_size
= create_attr
->key_size
;
90 attr
.value_size
= create_attr
->value_size
;
91 attr
.max_entries
= create_attr
->max_entries
;
92 attr
.map_flags
= create_attr
->map_flags
;
93 if (create_attr
->name
)
94 memcpy(attr
.map_name
, create_attr
->name
,
95 min(strlen(create_attr
->name
), BPF_OBJ_NAME_LEN
- 1));
96 attr
.numa_node
= create_attr
->numa_node
;
97 attr
.btf_fd
= create_attr
->btf_fd
;
98 attr
.btf_key_type_id
= create_attr
->btf_key_type_id
;
99 attr
.btf_value_type_id
= create_attr
->btf_value_type_id
;
100 attr
.map_ifindex
= create_attr
->map_ifindex
;
101 if (attr
.map_type
== BPF_MAP_TYPE_STRUCT_OPS
)
102 attr
.btf_vmlinux_value_type_id
=
103 create_attr
->btf_vmlinux_value_type_id
;
105 attr
.inner_map_fd
= create_attr
->inner_map_fd
;
107 return sys_bpf(BPF_MAP_CREATE
, &attr
, sizeof(attr
));
110 int bpf_create_map_node(enum bpf_map_type map_type
, const char *name
,
111 int key_size
, int value_size
, int max_entries
,
112 __u32 map_flags
, int node
)
114 struct bpf_create_map_attr map_attr
= {};
116 map_attr
.name
= name
;
117 map_attr
.map_type
= map_type
;
118 map_attr
.map_flags
= map_flags
;
119 map_attr
.key_size
= key_size
;
120 map_attr
.value_size
= value_size
;
121 map_attr
.max_entries
= max_entries
;
123 map_attr
.numa_node
= node
;
124 map_attr
.map_flags
|= BPF_F_NUMA_NODE
;
127 return bpf_create_map_xattr(&map_attr
);
130 int bpf_create_map(enum bpf_map_type map_type
, int key_size
,
131 int value_size
, int max_entries
, __u32 map_flags
)
133 struct bpf_create_map_attr map_attr
= {};
135 map_attr
.map_type
= map_type
;
136 map_attr
.map_flags
= map_flags
;
137 map_attr
.key_size
= key_size
;
138 map_attr
.value_size
= value_size
;
139 map_attr
.max_entries
= max_entries
;
141 return bpf_create_map_xattr(&map_attr
);
144 int bpf_create_map_name(enum bpf_map_type map_type
, const char *name
,
145 int key_size
, int value_size
, int max_entries
,
148 struct bpf_create_map_attr map_attr
= {};
150 map_attr
.name
= name
;
151 map_attr
.map_type
= map_type
;
152 map_attr
.map_flags
= map_flags
;
153 map_attr
.key_size
= key_size
;
154 map_attr
.value_size
= value_size
;
155 map_attr
.max_entries
= max_entries
;
157 return bpf_create_map_xattr(&map_attr
);
160 int bpf_create_map_in_map_node(enum bpf_map_type map_type
, const char *name
,
161 int key_size
, int inner_map_fd
, int max_entries
,
162 __u32 map_flags
, int node
)
166 memset(&attr
, '\0', sizeof(attr
));
168 attr
.map_type
= map_type
;
169 attr
.key_size
= key_size
;
171 attr
.inner_map_fd
= inner_map_fd
;
172 attr
.max_entries
= max_entries
;
173 attr
.map_flags
= map_flags
;
175 memcpy(attr
.map_name
, name
,
176 min(strlen(name
), BPF_OBJ_NAME_LEN
- 1));
179 attr
.map_flags
|= BPF_F_NUMA_NODE
;
180 attr
.numa_node
= node
;
183 return sys_bpf(BPF_MAP_CREATE
, &attr
, sizeof(attr
));
186 int bpf_create_map_in_map(enum bpf_map_type map_type
, const char *name
,
187 int key_size
, int inner_map_fd
, int max_entries
,
190 return bpf_create_map_in_map_node(map_type
, name
, key_size
,
191 inner_map_fd
, max_entries
, map_flags
,
196 alloc_zero_tailing_info(const void *orecord
, __u32 cnt
,
197 __u32 actual_rec_size
, __u32 expected_rec_size
)
199 __u64 info_len
= (__u64
)actual_rec_size
* cnt
;
200 void *info
, *nrecord
;
203 info
= malloc(info_len
);
207 /* zero out bytes kernel does not understand */
209 for (i
= 0; i
< cnt
; i
++) {
210 memcpy(nrecord
, orecord
, expected_rec_size
);
211 memset(nrecord
+ expected_rec_size
, 0,
212 actual_rec_size
- expected_rec_size
);
213 orecord
+= actual_rec_size
;
214 nrecord
+= actual_rec_size
;
220 int bpf_load_program_xattr(const struct bpf_load_program_attr
*load_attr
,
221 char *log_buf
, size_t log_buf_sz
)
223 void *finfo
= NULL
, *linfo
= NULL
;
228 if (!load_attr
|| !log_buf
!= !log_buf_sz
)
231 log_level
= load_attr
->log_level
;
232 if (log_level
> (4 | 2 | 1) || (log_level
&& !log_buf
))
235 memset(&attr
, 0, sizeof(attr
));
236 attr
.prog_type
= load_attr
->prog_type
;
237 attr
.expected_attach_type
= load_attr
->expected_attach_type
;
238 if (attr
.prog_type
== BPF_PROG_TYPE_STRUCT_OPS
||
239 attr
.prog_type
== BPF_PROG_TYPE_LSM
) {
240 attr
.attach_btf_id
= load_attr
->attach_btf_id
;
241 } else if (attr
.prog_type
== BPF_PROG_TYPE_TRACING
||
242 attr
.prog_type
== BPF_PROG_TYPE_EXT
) {
243 attr
.attach_btf_id
= load_attr
->attach_btf_id
;
244 attr
.attach_prog_fd
= load_attr
->attach_prog_fd
;
246 attr
.prog_ifindex
= load_attr
->prog_ifindex
;
247 attr
.kern_version
= load_attr
->kern_version
;
249 attr
.insn_cnt
= (__u32
)load_attr
->insns_cnt
;
250 attr
.insns
= ptr_to_u64(load_attr
->insns
);
251 attr
.license
= ptr_to_u64(load_attr
->license
);
253 attr
.log_level
= log_level
;
255 attr
.log_buf
= ptr_to_u64(log_buf
);
256 attr
.log_size
= log_buf_sz
;
258 attr
.log_buf
= ptr_to_u64(NULL
);
262 attr
.prog_btf_fd
= load_attr
->prog_btf_fd
;
263 attr
.func_info_rec_size
= load_attr
->func_info_rec_size
;
264 attr
.func_info_cnt
= load_attr
->func_info_cnt
;
265 attr
.func_info
= ptr_to_u64(load_attr
->func_info
);
266 attr
.line_info_rec_size
= load_attr
->line_info_rec_size
;
267 attr
.line_info_cnt
= load_attr
->line_info_cnt
;
268 attr
.line_info
= ptr_to_u64(load_attr
->line_info
);
270 memcpy(attr
.prog_name
, load_attr
->name
,
271 min(strlen(load_attr
->name
), BPF_OBJ_NAME_LEN
- 1));
272 attr
.prog_flags
= load_attr
->prog_flags
;
274 fd
= sys_bpf_prog_load(&attr
, sizeof(attr
));
278 /* After bpf_prog_load, the kernel may modify certain attributes
279 * to give user space a hint how to deal with loading failure.
280 * Check to see whether we can make some changes and load again.
282 while (errno
== E2BIG
&& (!finfo
|| !linfo
)) {
283 if (!finfo
&& attr
.func_info_cnt
&&
284 attr
.func_info_rec_size
< load_attr
->func_info_rec_size
) {
285 /* try with corrected func info records */
286 finfo
= alloc_zero_tailing_info(load_attr
->func_info
,
287 load_attr
->func_info_cnt
,
288 load_attr
->func_info_rec_size
,
289 attr
.func_info_rec_size
);
293 attr
.func_info
= ptr_to_u64(finfo
);
294 attr
.func_info_rec_size
= load_attr
->func_info_rec_size
;
295 } else if (!linfo
&& attr
.line_info_cnt
&&
296 attr
.line_info_rec_size
<
297 load_attr
->line_info_rec_size
) {
298 linfo
= alloc_zero_tailing_info(load_attr
->line_info
,
299 load_attr
->line_info_cnt
,
300 load_attr
->line_info_rec_size
,
301 attr
.line_info_rec_size
);
305 attr
.line_info
= ptr_to_u64(linfo
);
306 attr
.line_info_rec_size
= load_attr
->line_info_rec_size
;
311 fd
= sys_bpf_prog_load(&attr
, sizeof(attr
));
317 if (log_level
|| !log_buf
)
320 /* Try again with log */
321 attr
.log_buf
= ptr_to_u64(log_buf
);
322 attr
.log_size
= log_buf_sz
;
325 fd
= sys_bpf_prog_load(&attr
, sizeof(attr
));
332 int bpf_load_program(enum bpf_prog_type type
, const struct bpf_insn
*insns
,
333 size_t insns_cnt
, const char *license
,
334 __u32 kern_version
, char *log_buf
,
337 struct bpf_load_program_attr load_attr
;
339 memset(&load_attr
, 0, sizeof(struct bpf_load_program_attr
));
340 load_attr
.prog_type
= type
;
341 load_attr
.expected_attach_type
= 0;
342 load_attr
.name
= NULL
;
343 load_attr
.insns
= insns
;
344 load_attr
.insns_cnt
= insns_cnt
;
345 load_attr
.license
= license
;
346 load_attr
.kern_version
= kern_version
;
348 return bpf_load_program_xattr(&load_attr
, log_buf
, log_buf_sz
);
351 int bpf_verify_program(enum bpf_prog_type type
, const struct bpf_insn
*insns
,
352 size_t insns_cnt
, __u32 prog_flags
, const char *license
,
353 __u32 kern_version
, char *log_buf
, size_t log_buf_sz
,
358 memset(&attr
, 0, sizeof(attr
));
359 attr
.prog_type
= type
;
360 attr
.insn_cnt
= (__u32
)insns_cnt
;
361 attr
.insns
= ptr_to_u64(insns
);
362 attr
.license
= ptr_to_u64(license
);
363 attr
.log_buf
= ptr_to_u64(log_buf
);
364 attr
.log_size
= log_buf_sz
;
365 attr
.log_level
= log_level
;
367 attr
.kern_version
= kern_version
;
368 attr
.prog_flags
= prog_flags
;
370 return sys_bpf_prog_load(&attr
, sizeof(attr
));
373 int bpf_map_update_elem(int fd
, const void *key
, const void *value
,
378 memset(&attr
, 0, sizeof(attr
));
380 attr
.key
= ptr_to_u64(key
);
381 attr
.value
= ptr_to_u64(value
);
384 return sys_bpf(BPF_MAP_UPDATE_ELEM
, &attr
, sizeof(attr
));
387 int bpf_map_lookup_elem(int fd
, const void *key
, void *value
)
391 memset(&attr
, 0, sizeof(attr
));
393 attr
.key
= ptr_to_u64(key
);
394 attr
.value
= ptr_to_u64(value
);
396 return sys_bpf(BPF_MAP_LOOKUP_ELEM
, &attr
, sizeof(attr
));
399 int bpf_map_lookup_elem_flags(int fd
, const void *key
, void *value
, __u64 flags
)
403 memset(&attr
, 0, sizeof(attr
));
405 attr
.key
= ptr_to_u64(key
);
406 attr
.value
= ptr_to_u64(value
);
409 return sys_bpf(BPF_MAP_LOOKUP_ELEM
, &attr
, sizeof(attr
));
412 int bpf_map_lookup_and_delete_elem(int fd
, const void *key
, void *value
)
416 memset(&attr
, 0, sizeof(attr
));
418 attr
.key
= ptr_to_u64(key
);
419 attr
.value
= ptr_to_u64(value
);
421 return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM
, &attr
, sizeof(attr
));
424 int bpf_map_delete_elem(int fd
, const void *key
)
428 memset(&attr
, 0, sizeof(attr
));
430 attr
.key
= ptr_to_u64(key
);
432 return sys_bpf(BPF_MAP_DELETE_ELEM
, &attr
, sizeof(attr
));
435 int bpf_map_get_next_key(int fd
, const void *key
, void *next_key
)
439 memset(&attr
, 0, sizeof(attr
));
441 attr
.key
= ptr_to_u64(key
);
442 attr
.next_key
= ptr_to_u64(next_key
);
444 return sys_bpf(BPF_MAP_GET_NEXT_KEY
, &attr
, sizeof(attr
));
447 int bpf_map_freeze(int fd
)
451 memset(&attr
, 0, sizeof(attr
));
454 return sys_bpf(BPF_MAP_FREEZE
, &attr
, sizeof(attr
));
457 static int bpf_map_batch_common(int cmd
, int fd
, void *in_batch
,
458 void *out_batch
, void *keys
, void *values
,
460 const struct bpf_map_batch_opts
*opts
)
465 if (!OPTS_VALID(opts
, bpf_map_batch_opts
))
468 memset(&attr
, 0, sizeof(attr
));
469 attr
.batch
.map_fd
= fd
;
470 attr
.batch
.in_batch
= ptr_to_u64(in_batch
);
471 attr
.batch
.out_batch
= ptr_to_u64(out_batch
);
472 attr
.batch
.keys
= ptr_to_u64(keys
);
473 attr
.batch
.values
= ptr_to_u64(values
);
474 attr
.batch
.count
= *count
;
475 attr
.batch
.elem_flags
= OPTS_GET(opts
, elem_flags
, 0);
476 attr
.batch
.flags
= OPTS_GET(opts
, flags
, 0);
478 ret
= sys_bpf(cmd
, &attr
, sizeof(attr
));
479 *count
= attr
.batch
.count
;
484 int bpf_map_delete_batch(int fd
, void *keys
, __u32
*count
,
485 const struct bpf_map_batch_opts
*opts
)
487 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH
, fd
, NULL
,
488 NULL
, keys
, NULL
, count
, opts
);
491 int bpf_map_lookup_batch(int fd
, void *in_batch
, void *out_batch
, void *keys
,
492 void *values
, __u32
*count
,
493 const struct bpf_map_batch_opts
*opts
)
495 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH
, fd
, in_batch
,
496 out_batch
, keys
, values
, count
, opts
);
499 int bpf_map_lookup_and_delete_batch(int fd
, void *in_batch
, void *out_batch
,
500 void *keys
, void *values
, __u32
*count
,
501 const struct bpf_map_batch_opts
*opts
)
503 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH
,
504 fd
, in_batch
, out_batch
, keys
, values
,
508 int bpf_map_update_batch(int fd
, void *keys
, void *values
, __u32
*count
,
509 const struct bpf_map_batch_opts
*opts
)
511 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH
, fd
, NULL
, NULL
,
512 keys
, values
, count
, opts
);
515 int bpf_obj_pin(int fd
, const char *pathname
)
519 memset(&attr
, 0, sizeof(attr
));
520 attr
.pathname
= ptr_to_u64((void *)pathname
);
523 return sys_bpf(BPF_OBJ_PIN
, &attr
, sizeof(attr
));
526 int bpf_obj_get(const char *pathname
)
530 memset(&attr
, 0, sizeof(attr
));
531 attr
.pathname
= ptr_to_u64((void *)pathname
);
533 return sys_bpf(BPF_OBJ_GET
, &attr
, sizeof(attr
));
536 int bpf_prog_attach(int prog_fd
, int target_fd
, enum bpf_attach_type type
,
539 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts
, opts
,
543 return bpf_prog_attach_xattr(prog_fd
, target_fd
, type
, &opts
);
546 int bpf_prog_attach_xattr(int prog_fd
, int target_fd
,
547 enum bpf_attach_type type
,
548 const struct bpf_prog_attach_opts
*opts
)
552 if (!OPTS_VALID(opts
, bpf_prog_attach_opts
))
555 memset(&attr
, 0, sizeof(attr
));
556 attr
.target_fd
= target_fd
;
557 attr
.attach_bpf_fd
= prog_fd
;
558 attr
.attach_type
= type
;
559 attr
.attach_flags
= OPTS_GET(opts
, flags
, 0);
560 attr
.replace_bpf_fd
= OPTS_GET(opts
, replace_prog_fd
, 0);
562 return sys_bpf(BPF_PROG_ATTACH
, &attr
, sizeof(attr
));
565 int bpf_prog_detach(int target_fd
, enum bpf_attach_type type
)
569 memset(&attr
, 0, sizeof(attr
));
570 attr
.target_fd
= target_fd
;
571 attr
.attach_type
= type
;
573 return sys_bpf(BPF_PROG_DETACH
, &attr
, sizeof(attr
));
576 int bpf_prog_detach2(int prog_fd
, int target_fd
, enum bpf_attach_type type
)
580 memset(&attr
, 0, sizeof(attr
));
581 attr
.target_fd
= target_fd
;
582 attr
.attach_bpf_fd
= prog_fd
;
583 attr
.attach_type
= type
;
585 return sys_bpf(BPF_PROG_DETACH
, &attr
, sizeof(attr
));
588 int bpf_link_create(int prog_fd
, int target_fd
,
589 enum bpf_attach_type attach_type
,
590 const struct bpf_link_create_opts
*opts
)
594 if (!OPTS_VALID(opts
, bpf_link_create_opts
))
597 memset(&attr
, 0, sizeof(attr
));
598 attr
.link_create
.prog_fd
= prog_fd
;
599 attr
.link_create
.target_fd
= target_fd
;
600 attr
.link_create
.attach_type
= attach_type
;
602 return sys_bpf(BPF_LINK_CREATE
, &attr
, sizeof(attr
));
605 int bpf_link_update(int link_fd
, int new_prog_fd
,
606 const struct bpf_link_update_opts
*opts
)
610 if (!OPTS_VALID(opts
, bpf_link_update_opts
))
613 memset(&attr
, 0, sizeof(attr
));
614 attr
.link_update
.link_fd
= link_fd
;
615 attr
.link_update
.new_prog_fd
= new_prog_fd
;
616 attr
.link_update
.flags
= OPTS_GET(opts
, flags
, 0);
617 attr
.link_update
.old_prog_fd
= OPTS_GET(opts
, old_prog_fd
, 0);
619 return sys_bpf(BPF_LINK_UPDATE
, &attr
, sizeof(attr
));
622 int bpf_prog_query(int target_fd
, enum bpf_attach_type type
, __u32 query_flags
,
623 __u32
*attach_flags
, __u32
*prog_ids
, __u32
*prog_cnt
)
628 memset(&attr
, 0, sizeof(attr
));
629 attr
.query
.target_fd
= target_fd
;
630 attr
.query
.attach_type
= type
;
631 attr
.query
.query_flags
= query_flags
;
632 attr
.query
.prog_cnt
= *prog_cnt
;
633 attr
.query
.prog_ids
= ptr_to_u64(prog_ids
);
635 ret
= sys_bpf(BPF_PROG_QUERY
, &attr
, sizeof(attr
));
637 *attach_flags
= attr
.query
.attach_flags
;
638 *prog_cnt
= attr
.query
.prog_cnt
;
642 int bpf_prog_test_run(int prog_fd
, int repeat
, void *data
, __u32 size
,
643 void *data_out
, __u32
*size_out
, __u32
*retval
,
649 memset(&attr
, 0, sizeof(attr
));
650 attr
.test
.prog_fd
= prog_fd
;
651 attr
.test
.data_in
= ptr_to_u64(data
);
652 attr
.test
.data_out
= ptr_to_u64(data_out
);
653 attr
.test
.data_size_in
= size
;
654 attr
.test
.repeat
= repeat
;
656 ret
= sys_bpf(BPF_PROG_TEST_RUN
, &attr
, sizeof(attr
));
658 *size_out
= attr
.test
.data_size_out
;
660 *retval
= attr
.test
.retval
;
662 *duration
= attr
.test
.duration
;
666 int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr
*test_attr
)
671 if (!test_attr
->data_out
&& test_attr
->data_size_out
> 0)
674 memset(&attr
, 0, sizeof(attr
));
675 attr
.test
.prog_fd
= test_attr
->prog_fd
;
676 attr
.test
.data_in
= ptr_to_u64(test_attr
->data_in
);
677 attr
.test
.data_out
= ptr_to_u64(test_attr
->data_out
);
678 attr
.test
.data_size_in
= test_attr
->data_size_in
;
679 attr
.test
.data_size_out
= test_attr
->data_size_out
;
680 attr
.test
.ctx_in
= ptr_to_u64(test_attr
->ctx_in
);
681 attr
.test
.ctx_out
= ptr_to_u64(test_attr
->ctx_out
);
682 attr
.test
.ctx_size_in
= test_attr
->ctx_size_in
;
683 attr
.test
.ctx_size_out
= test_attr
->ctx_size_out
;
684 attr
.test
.repeat
= test_attr
->repeat
;
686 ret
= sys_bpf(BPF_PROG_TEST_RUN
, &attr
, sizeof(attr
));
687 test_attr
->data_size_out
= attr
.test
.data_size_out
;
688 test_attr
->ctx_size_out
= attr
.test
.ctx_size_out
;
689 test_attr
->retval
= attr
.test
.retval
;
690 test_attr
->duration
= attr
.test
.duration
;
694 static int bpf_obj_get_next_id(__u32 start_id
, __u32
*next_id
, int cmd
)
699 memset(&attr
, 0, sizeof(attr
));
700 attr
.start_id
= start_id
;
702 err
= sys_bpf(cmd
, &attr
, sizeof(attr
));
704 *next_id
= attr
.next_id
;
709 int bpf_prog_get_next_id(__u32 start_id
, __u32
*next_id
)
711 return bpf_obj_get_next_id(start_id
, next_id
, BPF_PROG_GET_NEXT_ID
);
714 int bpf_map_get_next_id(__u32 start_id
, __u32
*next_id
)
716 return bpf_obj_get_next_id(start_id
, next_id
, BPF_MAP_GET_NEXT_ID
);
719 int bpf_btf_get_next_id(__u32 start_id
, __u32
*next_id
)
721 return bpf_obj_get_next_id(start_id
, next_id
, BPF_BTF_GET_NEXT_ID
);
724 int bpf_prog_get_fd_by_id(__u32 id
)
728 memset(&attr
, 0, sizeof(attr
));
731 return sys_bpf(BPF_PROG_GET_FD_BY_ID
, &attr
, sizeof(attr
));
734 int bpf_map_get_fd_by_id(__u32 id
)
738 memset(&attr
, 0, sizeof(attr
));
741 return sys_bpf(BPF_MAP_GET_FD_BY_ID
, &attr
, sizeof(attr
));
744 int bpf_btf_get_fd_by_id(__u32 id
)
748 memset(&attr
, 0, sizeof(attr
));
751 return sys_bpf(BPF_BTF_GET_FD_BY_ID
, &attr
, sizeof(attr
));
754 int bpf_obj_get_info_by_fd(int prog_fd
, void *info
, __u32
*info_len
)
759 memset(&attr
, 0, sizeof(attr
));
760 attr
.info
.bpf_fd
= prog_fd
;
761 attr
.info
.info_len
= *info_len
;
762 attr
.info
.info
= ptr_to_u64(info
);
764 err
= sys_bpf(BPF_OBJ_GET_INFO_BY_FD
, &attr
, sizeof(attr
));
766 *info_len
= attr
.info
.info_len
;
771 int bpf_raw_tracepoint_open(const char *name
, int prog_fd
)
775 memset(&attr
, 0, sizeof(attr
));
776 attr
.raw_tracepoint
.name
= ptr_to_u64(name
);
777 attr
.raw_tracepoint
.prog_fd
= prog_fd
;
779 return sys_bpf(BPF_RAW_TRACEPOINT_OPEN
, &attr
, sizeof(attr
));
782 int bpf_load_btf(void *btf
, __u32 btf_size
, char *log_buf
, __u32 log_buf_size
,
785 union bpf_attr attr
= {};
788 attr
.btf
= ptr_to_u64(btf
);
789 attr
.btf_size
= btf_size
;
792 if (do_log
&& log_buf
&& log_buf_size
) {
793 attr
.btf_log_level
= 1;
794 attr
.btf_log_size
= log_buf_size
;
795 attr
.btf_log_buf
= ptr_to_u64(log_buf
);
798 fd
= sys_bpf(BPF_BTF_LOAD
, &attr
, sizeof(attr
));
799 if (fd
== -1 && !do_log
&& log_buf
&& log_buf_size
) {
807 int bpf_task_fd_query(int pid
, int fd
, __u32 flags
, char *buf
, __u32
*buf_len
,
808 __u32
*prog_id
, __u32
*fd_type
, __u64
*probe_offset
,
811 union bpf_attr attr
= {};
814 attr
.task_fd_query
.pid
= pid
;
815 attr
.task_fd_query
.fd
= fd
;
816 attr
.task_fd_query
.flags
= flags
;
817 attr
.task_fd_query
.buf
= ptr_to_u64(buf
);
818 attr
.task_fd_query
.buf_len
= *buf_len
;
820 err
= sys_bpf(BPF_TASK_FD_QUERY
, &attr
, sizeof(attr
));
821 *buf_len
= attr
.task_fd_query
.buf_len
;
822 *prog_id
= attr
.task_fd_query
.prog_id
;
823 *fd_type
= attr
.task_fd_query
.fd_type
;
824 *probe_offset
= attr
.task_fd_query
.probe_offset
;
825 *probe_addr
= attr
.task_fd_query
.probe_addr
;