1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
10 #include <bpf/libbpf.h>
12 #include <linux/err.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
19 #include "bpf-loader.h"
20 #include "bpf-prologue.h"
21 #include "probe-event.h"
22 #include "probe-finder.h" // for MAX_PROBES
23 #include "parse-events.h"
24 #include "strfilter.h"
25 #include "llvm-utils.h"
26 #include "c++/clang-c.h"
28 static int libbpf_perf_print(enum libbpf_print_level level
__attribute__((unused
)),
29 const char *fmt
, va_list args
)
31 return veprintf(1, verbose
, pr_fmt(fmt
), args
);
34 struct bpf_prog_priv
{
38 struct perf_probe_event pev
;
40 struct bpf_insn
*insns_buf
;
45 static bool libbpf_initialized
;
48 bpf__prepare_load_buffer(void *obj_buf
, size_t obj_buf_sz
, const char *name
)
50 struct bpf_object
*obj
;
52 if (!libbpf_initialized
) {
53 libbpf_set_print(libbpf_perf_print
);
54 libbpf_initialized
= true;
57 obj
= bpf_object__open_buffer(obj_buf
, obj_buf_sz
, name
);
58 if (IS_ERR_OR_NULL(obj
)) {
59 pr_debug("bpf: failed to load buffer\n");
60 return ERR_PTR(-EINVAL
);
66 struct bpf_object
*bpf__prepare_load(const char *filename
, bool source
)
68 struct bpf_object
*obj
;
70 if (!libbpf_initialized
) {
71 libbpf_set_print(libbpf_perf_print
);
72 libbpf_initialized
= true;
81 err
= perf_clang__compile_bpf(filename
, &obj_buf
, &obj_buf_sz
);
82 perf_clang__cleanup();
84 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err
);
85 err
= llvm__compile_bpf(filename
, &obj_buf
, &obj_buf_sz
);
87 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE
);
89 pr_debug("bpf: successful builtin compilation\n");
90 obj
= bpf_object__open_buffer(obj_buf
, obj_buf_sz
, filename
);
92 if (!IS_ERR_OR_NULL(obj
) && llvm_param
.dump_obj
)
93 llvm__dump_obj(filename
, obj_buf
, obj_buf_sz
);
97 obj
= bpf_object__open(filename
);
99 if (IS_ERR_OR_NULL(obj
)) {
100 pr_debug("bpf: failed to load %s\n", filename
);
107 void bpf__clear(void)
109 struct bpf_object
*obj
, *tmp
;
111 bpf_object__for_each_safe(obj
, tmp
) {
113 bpf_object__close(obj
);
118 clear_prog_priv(struct bpf_program
*prog __maybe_unused
,
121 struct bpf_prog_priv
*priv
= _priv
;
123 cleanup_perf_probe_events(&priv
->pev
, 1);
124 zfree(&priv
->insns_buf
);
125 zfree(&priv
->type_mapping
);
126 zfree(&priv
->sys_name
);
127 zfree(&priv
->evt_name
);
132 prog_config__exec(const char *value
, struct perf_probe_event
*pev
)
135 pev
->target
= strdup(value
);
142 prog_config__module(const char *value
, struct perf_probe_event
*pev
)
144 pev
->uprobes
= false;
145 pev
->target
= strdup(value
);
152 prog_config__bool(const char *value
, bool *pbool
, bool invert
)
160 err
= strtobool(value
, &bool_value
);
164 *pbool
= invert
? !bool_value
: bool_value
;
169 prog_config__inlines(const char *value
,
170 struct perf_probe_event
*pev __maybe_unused
)
172 return prog_config__bool(value
, &probe_conf
.no_inlines
, true);
176 prog_config__force(const char *value
,
177 struct perf_probe_event
*pev __maybe_unused
)
179 return prog_config__bool(value
, &probe_conf
.force_add
, false);
186 int (*func
)(const char *, struct perf_probe_event
*);
187 } bpf_prog_config_terms
[] = {
190 .usage
= "exec=<full path of file>",
191 .desc
= "Set uprobe target",
192 .func
= prog_config__exec
,
196 .usage
= "module=<module name> ",
197 .desc
= "Set kprobe module",
198 .func
= prog_config__module
,
202 .usage
= "inlines=[yes|no] ",
203 .desc
= "Probe at inline symbol",
204 .func
= prog_config__inlines
,
208 .usage
= "force=[yes|no] ",
209 .desc
= "Forcibly add events with existing name",
210 .func
= prog_config__force
,
215 do_prog_config(const char *key
, const char *value
,
216 struct perf_probe_event
*pev
)
220 pr_debug("config bpf program: %s=%s\n", key
, value
);
221 for (i
= 0; i
< ARRAY_SIZE(bpf_prog_config_terms
); i
++)
222 if (strcmp(key
, bpf_prog_config_terms
[i
].key
) == 0)
223 return bpf_prog_config_terms
[i
].func(value
, pev
);
225 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
228 pr_debug("\nHint: Valid options are:\n");
229 for (i
= 0; i
< ARRAY_SIZE(bpf_prog_config_terms
); i
++)
230 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms
[i
].usage
,
231 bpf_prog_config_terms
[i
].desc
);
234 return -BPF_LOADER_ERRNO__PROGCONF_TERM
;
238 parse_prog_config_kvpair(const char *config_str
, struct perf_probe_event
*pev
)
240 char *text
= strdup(config_str
);
242 const char *main_str
= NULL
;
246 pr_debug("Not enough memory: dup config_str failed\n");
247 return ERR_PTR(-ENOMEM
);
251 while ((sep
= strchr(line
, ';'))) {
255 equ
= strchr(line
, '=');
257 pr_warning("WARNING: invalid config in BPF object: %s\n",
259 pr_warning("\tShould be 'key=value'.\n");
264 err
= do_prog_config(line
, equ
+ 1, pev
);
272 main_str
= config_str
+ (line
- text
);
275 return err
? ERR_PTR(err
) : main_str
;
279 parse_prog_config(const char *config_str
, const char **p_main_str
,
280 bool *is_tp
, struct perf_probe_event
*pev
)
283 const char *main_str
= parse_prog_config_kvpair(config_str
, pev
);
285 if (IS_ERR(main_str
))
286 return PTR_ERR(main_str
);
288 *p_main_str
= main_str
;
289 if (!strchr(main_str
, '=')) {
290 /* Is a tracepoint event? */
291 const char *s
= strchr(main_str
, ':');
294 pr_debug("bpf: '%s' is not a valid tracepoint\n",
296 return -BPF_LOADER_ERRNO__CONFIG
;
304 err
= parse_perf_probe_command(main_str
, pev
);
306 pr_debug("bpf: '%s' is not a valid config string\n",
308 /* parse failed, don't need clear pev. */
309 return -BPF_LOADER_ERRNO__CONFIG
;
315 config_bpf_program(struct bpf_program
*prog
)
317 struct perf_probe_event
*pev
= NULL
;
318 struct bpf_prog_priv
*priv
= NULL
;
319 const char *config_str
, *main_str
;
323 /* Initialize per-program probing setting */
324 probe_conf
.no_inlines
= false;
325 probe_conf
.force_add
= false;
327 config_str
= bpf_program__title(prog
, false);
328 if (IS_ERR(config_str
)) {
329 pr_debug("bpf: unable to get title for program\n");
330 return PTR_ERR(config_str
);
333 priv
= calloc(sizeof(*priv
), 1);
335 pr_debug("bpf: failed to alloc priv\n");
340 pr_debug("bpf: config program '%s'\n", config_str
);
341 err
= parse_prog_config(config_str
, &main_str
, &is_tp
, pev
);
346 char *s
= strchr(main_str
, ':');
349 priv
->sys_name
= strndup(main_str
, s
- main_str
);
350 priv
->evt_name
= strdup(s
+ 1);
354 if (pev
->group
&& strcmp(pev
->group
, PERF_BPF_PROBE_GROUP
)) {
355 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
356 config_str
, PERF_BPF_PROBE_GROUP
);
357 err
= -BPF_LOADER_ERRNO__GROUP
;
359 } else if (!pev
->group
)
360 pev
->group
= strdup(PERF_BPF_PROBE_GROUP
);
363 pr_debug("bpf: strdup failed\n");
369 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
371 err
= -BPF_LOADER_ERRNO__EVENTNAME
;
374 pr_debug("bpf: config '%s' is ok\n", config_str
);
377 err
= bpf_program__set_priv(prog
, priv
, clear_prog_priv
);
379 pr_debug("Failed to set priv for program '%s'\n", config_str
);
387 clear_perf_probe_event(pev
);
392 static int bpf__prepare_probe(void)
395 static bool initialized
= false;
398 * Make err static, so if init failed the first, bpf__prepare_probe()
399 * fails each time without calling init_probe_symbol_maps multiple
406 err
= init_probe_symbol_maps(false);
408 pr_debug("Failed to init_probe_symbol_maps\n");
409 probe_conf
.max_probes
= MAX_PROBES
;
414 preproc_gen_prologue(struct bpf_program
*prog
, int n
,
415 struct bpf_insn
*orig_insns
, int orig_insns_cnt
,
416 struct bpf_prog_prep_result
*res
)
418 struct bpf_prog_priv
*priv
= bpf_program__priv(prog
);
419 struct probe_trace_event
*tev
;
420 struct perf_probe_event
*pev
;
421 struct bpf_insn
*buf
;
422 size_t prologue_cnt
= 0;
425 if (IS_ERR(priv
) || !priv
|| priv
->is_tp
)
430 if (n
< 0 || n
>= priv
->nr_types
)
433 /* Find a tev belongs to that type */
434 for (i
= 0; i
< pev
->ntevs
; i
++) {
435 if (priv
->type_mapping
[i
] == n
)
439 if (i
>= pev
->ntevs
) {
440 pr_debug("Internal error: prologue type %d not found\n", n
);
441 return -BPF_LOADER_ERRNO__PROLOGUE
;
446 buf
= priv
->insns_buf
;
447 err
= bpf__gen_prologue(tev
->args
, tev
->nargs
,
449 BPF_MAXINSNS
- orig_insns_cnt
);
453 title
= bpf_program__title(prog
, false);
457 pr_debug("Failed to generate prologue for program %s\n",
462 memcpy(&buf
[prologue_cnt
], orig_insns
,
463 sizeof(struct bpf_insn
) * orig_insns_cnt
);
465 res
->new_insn_ptr
= buf
;
466 res
->new_insn_cnt
= prologue_cnt
+ orig_insns_cnt
;
471 pr_debug("Internal error in preproc_gen_prologue\n");
472 return -BPF_LOADER_ERRNO__PROLOGUE
;
476 * compare_tev_args is reflexive, transitive and antisymmetric.
477 * I can proof it but this margin is too narrow to contain.
479 static int compare_tev_args(const void *ptev1
, const void *ptev2
)
482 const struct probe_trace_event
*tev1
=
483 *(const struct probe_trace_event
**)ptev1
;
484 const struct probe_trace_event
*tev2
=
485 *(const struct probe_trace_event
**)ptev2
;
487 ret
= tev2
->nargs
- tev1
->nargs
;
491 for (i
= 0; i
< tev1
->nargs
; i
++) {
492 struct probe_trace_arg
*arg1
, *arg2
;
493 struct probe_trace_arg_ref
*ref1
, *ref2
;
495 arg1
= &tev1
->args
[i
];
496 arg2
= &tev2
->args
[i
];
498 ret
= strcmp(arg1
->value
, arg2
->value
);
505 while (ref1
&& ref2
) {
506 ret
= ref2
->offset
- ref1
->offset
;
515 return ref2
? 1 : -1;
522 * Assign a type number to each tevs in a pev.
523 * mapping is an array with same slots as tevs in that pev.
524 * nr_types will be set to number of types.
526 static int map_prologue(struct perf_probe_event
*pev
, int *mapping
,
530 struct probe_trace_event
**ptevs
;
532 size_t array_sz
= sizeof(*ptevs
) * pev
->ntevs
;
534 ptevs
= malloc(array_sz
);
536 pr_debug("Not enough memory: alloc ptevs failed\n");
540 pr_debug("In map_prologue, ntevs=%d\n", pev
->ntevs
);
541 for (i
= 0; i
< pev
->ntevs
; i
++)
542 ptevs
[i
] = &pev
->tevs
[i
];
544 qsort(ptevs
, pev
->ntevs
, sizeof(*ptevs
),
547 for (i
= 0; i
< pev
->ntevs
; i
++) {
550 n
= ptevs
[i
] - pev
->tevs
;
553 pr_debug("mapping[%d]=%d\n", n
, type
);
557 if (compare_tev_args(ptevs
+ i
, ptevs
+ i
- 1) == 0)
562 pr_debug("mapping[%d]=%d\n", n
, mapping
[n
]);
565 *nr_types
= type
+ 1;
570 static int hook_load_preprocessor(struct bpf_program
*prog
)
572 struct bpf_prog_priv
*priv
= bpf_program__priv(prog
);
573 struct perf_probe_event
*pev
;
574 bool need_prologue
= false;
577 if (IS_ERR(priv
) || !priv
) {
578 pr_debug("Internal error when hook preprocessor\n");
579 return -BPF_LOADER_ERRNO__INTERNAL
;
583 priv
->need_prologue
= false;
588 for (i
= 0; i
< pev
->ntevs
; i
++) {
589 struct probe_trace_event
*tev
= &pev
->tevs
[i
];
591 if (tev
->nargs
> 0) {
592 need_prologue
= true;
598 * Since all tevs don't have argument, we don't need generate
601 if (!need_prologue
) {
602 priv
->need_prologue
= false;
606 priv
->need_prologue
= true;
607 priv
->insns_buf
= malloc(sizeof(struct bpf_insn
) * BPF_MAXINSNS
);
608 if (!priv
->insns_buf
) {
609 pr_debug("Not enough memory: alloc insns_buf failed\n");
613 priv
->type_mapping
= malloc(sizeof(int) * pev
->ntevs
);
614 if (!priv
->type_mapping
) {
615 pr_debug("Not enough memory: alloc type_mapping failed\n");
618 memset(priv
->type_mapping
, -1,
619 sizeof(int) * pev
->ntevs
);
621 err
= map_prologue(pev
, priv
->type_mapping
, &priv
->nr_types
);
625 err
= bpf_program__set_prep(prog
, priv
->nr_types
,
626 preproc_gen_prologue
);
630 int bpf__probe(struct bpf_object
*obj
)
633 struct bpf_program
*prog
;
634 struct bpf_prog_priv
*priv
;
635 struct perf_probe_event
*pev
;
637 err
= bpf__prepare_probe();
639 pr_debug("bpf__prepare_probe failed\n");
643 bpf_object__for_each_program(prog
, obj
) {
644 err
= config_bpf_program(prog
);
648 priv
= bpf_program__priv(prog
);
649 if (IS_ERR(priv
) || !priv
) {
655 bpf_program__set_tracepoint(prog
);
659 bpf_program__set_kprobe(prog
);
662 err
= convert_perf_probe_events(pev
, 1);
664 pr_debug("bpf_probe: failed to convert perf probe events\n");
668 err
= apply_perf_probe_events(pev
, 1);
670 pr_debug("bpf_probe: failed to apply perf probe events\n");
675 * After probing, let's consider prologue, which
676 * adds program fetcher to BPF programs.
678 * hook_load_preprocessorr() hooks pre-processor
679 * to bpf_program, let it generate prologue
680 * dynamically during loading.
682 err
= hook_load_preprocessor(prog
);
687 return err
< 0 ? err
: 0;
690 #define EVENTS_WRITE_BUFSIZE 4096
691 int bpf__unprobe(struct bpf_object
*obj
)
694 struct bpf_program
*prog
;
696 bpf_object__for_each_program(prog
, obj
) {
697 struct bpf_prog_priv
*priv
= bpf_program__priv(prog
);
700 if (IS_ERR(priv
) || !priv
|| priv
->is_tp
)
703 for (i
= 0; i
< priv
->pev
.ntevs
; i
++) {
704 struct probe_trace_event
*tev
= &priv
->pev
.tevs
[i
];
705 char name_buf
[EVENTS_WRITE_BUFSIZE
];
706 struct strfilter
*delfilter
;
708 snprintf(name_buf
, EVENTS_WRITE_BUFSIZE
,
709 "%s:%s", tev
->group
, tev
->event
);
710 name_buf
[EVENTS_WRITE_BUFSIZE
- 1] = '\0';
712 delfilter
= strfilter__new(name_buf
, NULL
);
714 pr_debug("Failed to create filter for unprobing\n");
719 err
= del_perf_probe_events(delfilter
);
720 strfilter__delete(delfilter
);
722 pr_debug("Failed to delete %s\n", name_buf
);
731 int bpf__load(struct bpf_object
*obj
)
735 err
= bpf_object__load(obj
);
738 libbpf_strerror(err
, bf
, sizeof(bf
));
739 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err
, bf
);
745 int bpf__foreach_event(struct bpf_object
*obj
,
746 bpf_prog_iter_callback_t func
,
749 struct bpf_program
*prog
;
752 bpf_object__for_each_program(prog
, obj
) {
753 struct bpf_prog_priv
*priv
= bpf_program__priv(prog
);
754 struct probe_trace_event
*tev
;
755 struct perf_probe_event
*pev
;
758 if (IS_ERR(priv
) || !priv
) {
759 pr_debug("bpf: failed to get private field\n");
760 return -BPF_LOADER_ERRNO__INTERNAL
;
764 fd
= bpf_program__fd(prog
);
765 err
= (*func
)(priv
->sys_name
, priv
->evt_name
, fd
, arg
);
767 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
774 for (i
= 0; i
< pev
->ntevs
; i
++) {
777 if (priv
->need_prologue
) {
778 int type
= priv
->type_mapping
[i
];
780 fd
= bpf_program__nth_fd(prog
, type
);
782 fd
= bpf_program__fd(prog
);
786 pr_debug("bpf: failed to get file descriptor\n");
790 err
= (*func
)(tev
->group
, tev
->event
, fd
, arg
);
792 pr_debug("bpf: call back failed, stop iterate\n");
800 enum bpf_map_op_type
{
801 BPF_MAP_OP_SET_VALUE
,
802 BPF_MAP_OP_SET_EVSEL
,
805 enum bpf_map_key_type
{
811 struct list_head list
;
812 enum bpf_map_op_type op_type
;
813 enum bpf_map_key_type key_type
;
815 struct parse_events_array array
;
819 struct perf_evsel
*evsel
;
823 struct bpf_map_priv
{
824 struct list_head ops_list
;
828 bpf_map_op__delete(struct bpf_map_op
*op
)
830 if (!list_empty(&op
->list
))
832 if (op
->key_type
== BPF_MAP_KEY_RANGES
)
833 parse_events__clear_array(&op
->k
.array
);
838 bpf_map_priv__purge(struct bpf_map_priv
*priv
)
840 struct bpf_map_op
*pos
, *n
;
842 list_for_each_entry_safe(pos
, n
, &priv
->ops_list
, list
) {
843 list_del_init(&pos
->list
);
844 bpf_map_op__delete(pos
);
849 bpf_map_priv__clear(struct bpf_map
*map __maybe_unused
,
852 struct bpf_map_priv
*priv
= _priv
;
854 bpf_map_priv__purge(priv
);
859 bpf_map_op_setkey(struct bpf_map_op
*op
, struct parse_events_term
*term
)
861 op
->key_type
= BPF_MAP_KEY_ALL
;
865 if (term
->array
.nr_ranges
) {
866 size_t memsz
= term
->array
.nr_ranges
*
867 sizeof(op
->k
.array
.ranges
[0]);
869 op
->k
.array
.ranges
= memdup(term
->array
.ranges
, memsz
);
870 if (!op
->k
.array
.ranges
) {
871 pr_debug("Not enough memory to alloc indices for map\n");
874 op
->key_type
= BPF_MAP_KEY_RANGES
;
875 op
->k
.array
.nr_ranges
= term
->array
.nr_ranges
;
880 static struct bpf_map_op
*
881 bpf_map_op__new(struct parse_events_term
*term
)
883 struct bpf_map_op
*op
;
886 op
= zalloc(sizeof(*op
));
888 pr_debug("Failed to alloc bpf_map_op\n");
889 return ERR_PTR(-ENOMEM
);
891 INIT_LIST_HEAD(&op
->list
);
893 err
= bpf_map_op_setkey(op
, term
);
901 static struct bpf_map_op
*
902 bpf_map_op__clone(struct bpf_map_op
*op
)
904 struct bpf_map_op
*newop
;
906 newop
= memdup(op
, sizeof(*op
));
908 pr_debug("Failed to alloc bpf_map_op\n");
912 INIT_LIST_HEAD(&newop
->list
);
913 if (op
->key_type
== BPF_MAP_KEY_RANGES
) {
914 size_t memsz
= op
->k
.array
.nr_ranges
*
915 sizeof(op
->k
.array
.ranges
[0]);
917 newop
->k
.array
.ranges
= memdup(op
->k
.array
.ranges
, memsz
);
918 if (!newop
->k
.array
.ranges
) {
919 pr_debug("Failed to alloc indices for map\n");
928 static struct bpf_map_priv
*
929 bpf_map_priv__clone(struct bpf_map_priv
*priv
)
931 struct bpf_map_priv
*newpriv
;
932 struct bpf_map_op
*pos
, *newop
;
934 newpriv
= zalloc(sizeof(*newpriv
));
936 pr_debug("Not enough memory to alloc map private\n");
939 INIT_LIST_HEAD(&newpriv
->ops_list
);
941 list_for_each_entry(pos
, &priv
->ops_list
, list
) {
942 newop
= bpf_map_op__clone(pos
);
944 bpf_map_priv__purge(newpriv
);
947 list_add_tail(&newop
->list
, &newpriv
->ops_list
);
954 bpf_map__add_op(struct bpf_map
*map
, struct bpf_map_op
*op
)
956 const char *map_name
= bpf_map__name(map
);
957 struct bpf_map_priv
*priv
= bpf_map__priv(map
);
960 pr_debug("Failed to get private from map %s\n", map_name
);
961 return PTR_ERR(priv
);
965 priv
= zalloc(sizeof(*priv
));
967 pr_debug("Not enough memory to alloc map private\n");
970 INIT_LIST_HEAD(&priv
->ops_list
);
972 if (bpf_map__set_priv(map
, priv
, bpf_map_priv__clear
)) {
974 return -BPF_LOADER_ERRNO__INTERNAL
;
978 list_add_tail(&op
->list
, &priv
->ops_list
);
982 static struct bpf_map_op
*
983 bpf_map__add_newop(struct bpf_map
*map
, struct parse_events_term
*term
)
985 struct bpf_map_op
*op
;
988 op
= bpf_map_op__new(term
);
992 err
= bpf_map__add_op(map
, op
);
994 bpf_map_op__delete(op
);
1001 __bpf_map__config_value(struct bpf_map
*map
,
1002 struct parse_events_term
*term
)
1004 struct bpf_map_op
*op
;
1005 const char *map_name
= bpf_map__name(map
);
1006 const struct bpf_map_def
*def
= bpf_map__def(map
);
1009 pr_debug("Unable to get map definition from '%s'\n",
1011 return -BPF_LOADER_ERRNO__INTERNAL
;
1014 if (def
->type
!= BPF_MAP_TYPE_ARRAY
) {
1015 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1017 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE
;
1019 if (def
->key_size
< sizeof(unsigned int)) {
1020 pr_debug("Map %s has incorrect key size\n", map_name
);
1021 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE
;
1023 switch (def
->value_size
) {
1030 pr_debug("Map %s has incorrect value size\n", map_name
);
1031 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE
;
1034 op
= bpf_map__add_newop(map
, term
);
1037 op
->op_type
= BPF_MAP_OP_SET_VALUE
;
1038 op
->v
.value
= term
->val
.num
;
1043 bpf_map__config_value(struct bpf_map
*map
,
1044 struct parse_events_term
*term
,
1045 struct perf_evlist
*evlist __maybe_unused
)
1047 if (!term
->err_val
) {
1048 pr_debug("Config value not set\n");
1049 return -BPF_LOADER_ERRNO__OBJCONF_CONF
;
1052 if (term
->type_val
!= PARSE_EVENTS__TERM_TYPE_NUM
) {
1053 pr_debug("ERROR: wrong value type for 'value'\n");
1054 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE
;
1057 return __bpf_map__config_value(map
, term
);
1061 __bpf_map__config_event(struct bpf_map
*map
,
1062 struct parse_events_term
*term
,
1063 struct perf_evlist
*evlist
)
1065 struct perf_evsel
*evsel
;
1066 const struct bpf_map_def
*def
;
1067 struct bpf_map_op
*op
;
1068 const char *map_name
= bpf_map__name(map
);
1070 evsel
= perf_evlist__find_evsel_by_str(evlist
, term
->val
.str
);
1072 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1073 map_name
, term
->val
.str
);
1074 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT
;
1077 def
= bpf_map__def(map
);
1079 pr_debug("Unable to get map definition from '%s'\n",
1081 return PTR_ERR(def
);
1085 * No need to check key_size and value_size:
1086 * kernel has already checked them.
1088 if (def
->type
!= BPF_MAP_TYPE_PERF_EVENT_ARRAY
) {
1089 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1091 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE
;
1094 op
= bpf_map__add_newop(map
, term
);
1097 op
->op_type
= BPF_MAP_OP_SET_EVSEL
;
1098 op
->v
.evsel
= evsel
;
1103 bpf_map__config_event(struct bpf_map
*map
,
1104 struct parse_events_term
*term
,
1105 struct perf_evlist
*evlist
)
1107 if (!term
->err_val
) {
1108 pr_debug("Config value not set\n");
1109 return -BPF_LOADER_ERRNO__OBJCONF_CONF
;
1112 if (term
->type_val
!= PARSE_EVENTS__TERM_TYPE_STR
) {
1113 pr_debug("ERROR: wrong value type for 'event'\n");
1114 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE
;
1117 return __bpf_map__config_event(map
, term
, evlist
);
1120 struct bpf_obj_config__map_func
{
1121 const char *config_opt
;
1122 int (*config_func
)(struct bpf_map
*, struct parse_events_term
*,
1123 struct perf_evlist
*);
1126 struct bpf_obj_config__map_func bpf_obj_config__map_funcs
[] = {
1127 {"value", bpf_map__config_value
},
1128 {"event", bpf_map__config_event
},
1132 config_map_indices_range_check(struct parse_events_term
*term
,
1133 struct bpf_map
*map
,
1134 const char *map_name
)
1136 struct parse_events_array
*array
= &term
->array
;
1137 const struct bpf_map_def
*def
;
1140 if (!array
->nr_ranges
)
1142 if (!array
->ranges
) {
1143 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1144 map_name
, (int)array
->nr_ranges
);
1145 return -BPF_LOADER_ERRNO__INTERNAL
;
1148 def
= bpf_map__def(map
);
1150 pr_debug("ERROR: Unable to get map definition from '%s'\n",
1152 return -BPF_LOADER_ERRNO__INTERNAL
;
1155 for (i
= 0; i
< array
->nr_ranges
; i
++) {
1156 unsigned int start
= array
->ranges
[i
].start
;
1157 size_t length
= array
->ranges
[i
].length
;
1158 unsigned int idx
= start
+ length
- 1;
1160 if (idx
>= def
->max_entries
) {
1161 pr_debug("ERROR: index %d too large\n", idx
);
1162 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG
;
1169 bpf__obj_config_map(struct bpf_object
*obj
,
1170 struct parse_events_term
*term
,
1171 struct perf_evlist
*evlist
,
1174 /* key is "map:<mapname>.<config opt>" */
1175 char *map_name
= strdup(term
->config
+ sizeof("map:") - 1);
1176 struct bpf_map
*map
;
1177 int err
= -BPF_LOADER_ERRNO__OBJCONF_OPT
;
1184 map_opt
= strchr(map_name
, '.');
1186 pr_debug("ERROR: Invalid map config: %s\n", map_name
);
1191 if (*map_opt
== '\0') {
1192 pr_debug("ERROR: Invalid map option: %s\n", term
->config
);
1196 map
= bpf_object__find_map_by_name(obj
, map_name
);
1198 pr_debug("ERROR: Map %s doesn't exist\n", map_name
);
1199 err
= -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST
;
1203 *key_scan_pos
+= strlen(map_opt
);
1204 err
= config_map_indices_range_check(term
, map
, map_name
);
1207 *key_scan_pos
-= strlen(map_opt
);
1209 for (i
= 0; i
< ARRAY_SIZE(bpf_obj_config__map_funcs
); i
++) {
1210 struct bpf_obj_config__map_func
*func
=
1211 &bpf_obj_config__map_funcs
[i
];
1213 if (strcmp(map_opt
, func
->config_opt
) == 0) {
1214 err
= func
->config_func(map
, term
, evlist
);
1219 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt
);
1220 err
= -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT
;
1224 key_scan_pos
+= strlen(map_opt
);
1228 int bpf__config_obj(struct bpf_object
*obj
,
1229 struct parse_events_term
*term
,
1230 struct perf_evlist
*evlist
,
1233 int key_scan_pos
= 0;
1236 if (!obj
|| !term
|| !term
->config
)
1239 if (strstarts(term
->config
, "map:")) {
1240 key_scan_pos
= sizeof("map:") - 1;
1241 err
= bpf__obj_config_map(obj
, term
, evlist
, &key_scan_pos
);
1244 err
= -BPF_LOADER_ERRNO__OBJCONF_OPT
;
1247 *error_pos
= key_scan_pos
;
1252 typedef int (*map_config_func_t
)(const char *name
, int map_fd
,
1253 const struct bpf_map_def
*pdef
,
1254 struct bpf_map_op
*op
,
1255 void *pkey
, void *arg
);
1258 foreach_key_array_all(map_config_func_t func
,
1259 void *arg
, const char *name
,
1260 int map_fd
, const struct bpf_map_def
*pdef
,
1261 struct bpf_map_op
*op
)
1266 for (i
= 0; i
< pdef
->max_entries
; i
++) {
1267 err
= func(name
, map_fd
, pdef
, op
, &i
, arg
);
1269 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1278 foreach_key_array_ranges(map_config_func_t func
, void *arg
,
1279 const char *name
, int map_fd
,
1280 const struct bpf_map_def
*pdef
,
1281 struct bpf_map_op
*op
)
1286 for (i
= 0; i
< op
->k
.array
.nr_ranges
; i
++) {
1287 unsigned int start
= op
->k
.array
.ranges
[i
].start
;
1288 size_t length
= op
->k
.array
.ranges
[i
].length
;
1290 for (j
= 0; j
< length
; j
++) {
1291 unsigned int idx
= start
+ j
;
1293 err
= func(name
, map_fd
, pdef
, op
, &idx
, arg
);
1295 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1305 bpf_map_config_foreach_key(struct bpf_map
*map
,
1306 map_config_func_t func
,
1310 struct bpf_map_op
*op
;
1311 const struct bpf_map_def
*def
;
1312 const char *name
= bpf_map__name(map
);
1313 struct bpf_map_priv
*priv
= bpf_map__priv(map
);
1316 pr_debug("ERROR: failed to get private from map %s\n", name
);
1317 return -BPF_LOADER_ERRNO__INTERNAL
;
1319 if (!priv
|| list_empty(&priv
->ops_list
)) {
1320 pr_debug("INFO: nothing to config for map %s\n", name
);
1324 def
= bpf_map__def(map
);
1326 pr_debug("ERROR: failed to get definition from map %s\n", name
);
1327 return -BPF_LOADER_ERRNO__INTERNAL
;
1329 map_fd
= bpf_map__fd(map
);
1331 pr_debug("ERROR: failed to get fd from map %s\n", name
);
1335 list_for_each_entry(op
, &priv
->ops_list
, list
) {
1336 switch (def
->type
) {
1337 case BPF_MAP_TYPE_ARRAY
:
1338 case BPF_MAP_TYPE_PERF_EVENT_ARRAY
:
1339 switch (op
->key_type
) {
1340 case BPF_MAP_KEY_ALL
:
1341 err
= foreach_key_array_all(func
, arg
, name
,
1344 case BPF_MAP_KEY_RANGES
:
1345 err
= foreach_key_array_ranges(func
, arg
, name
,
1350 pr_debug("ERROR: keytype for map '%s' invalid\n",
1352 return -BPF_LOADER_ERRNO__INTERNAL
;
1358 pr_debug("ERROR: type of '%s' incorrect\n", name
);
1359 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE
;
1367 apply_config_value_for_key(int map_fd
, void *pkey
,
1368 size_t val_size
, u64 val
)
1374 u8 _val
= (u8
)(val
);
1375 err
= bpf_map_update_elem(map_fd
, pkey
, &_val
, BPF_ANY
);
1379 u16 _val
= (u16
)(val
);
1380 err
= bpf_map_update_elem(map_fd
, pkey
, &_val
, BPF_ANY
);
1384 u32 _val
= (u32
)(val
);
1385 err
= bpf_map_update_elem(map_fd
, pkey
, &_val
, BPF_ANY
);
1389 err
= bpf_map_update_elem(map_fd
, pkey
, &val
, BPF_ANY
);
1393 pr_debug("ERROR: invalid value size\n");
1394 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE
;
1402 apply_config_evsel_for_key(const char *name
, int map_fd
, void *pkey
,
1403 struct perf_evsel
*evsel
)
1405 struct xyarray
*xy
= evsel
->fd
;
1406 struct perf_event_attr
*attr
;
1407 unsigned int key
, events
;
1408 bool check_pass
= false;
1413 pr_debug("ERROR: evsel not ready for map %s\n", name
);
1414 return -BPF_LOADER_ERRNO__INTERNAL
;
1417 if (xy
->row_size
/ xy
->entry_size
!= 1) {
1418 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1420 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM
;
1423 attr
= &evsel
->attr
;
1424 if (attr
->inherit
) {
1425 pr_debug("ERROR: Can't put inherit event into map %s\n", name
);
1426 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH
;
1429 if (perf_evsel__is_bpf_output(evsel
))
1431 if (attr
->type
== PERF_TYPE_RAW
)
1433 if (attr
->type
== PERF_TYPE_HARDWARE
)
1436 pr_debug("ERROR: Event type is wrong for map %s\n", name
);
1437 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE
;
1440 events
= xy
->entries
/ (xy
->row_size
/ xy
->entry_size
);
1441 key
= *((unsigned int *)pkey
);
1442 if (key
>= events
) {
1443 pr_debug("ERROR: there is no event %d for map %s\n",
1445 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE
;
1447 evt_fd
= xyarray__entry(xy
, key
, 0);
1448 err
= bpf_map_update_elem(map_fd
, pkey
, evt_fd
, BPF_ANY
);
1455 apply_obj_config_map_for_key(const char *name
, int map_fd
,
1456 const struct bpf_map_def
*pdef
,
1457 struct bpf_map_op
*op
,
1458 void *pkey
, void *arg __maybe_unused
)
1462 switch (op
->op_type
) {
1463 case BPF_MAP_OP_SET_VALUE
:
1464 err
= apply_config_value_for_key(map_fd
, pkey
,
1468 case BPF_MAP_OP_SET_EVSEL
:
1469 err
= apply_config_evsel_for_key(name
, map_fd
, pkey
,
1473 pr_debug("ERROR: unknown value type for '%s'\n", name
);
1474 err
= -BPF_LOADER_ERRNO__INTERNAL
;
1480 apply_obj_config_map(struct bpf_map
*map
)
1482 return bpf_map_config_foreach_key(map
,
1483 apply_obj_config_map_for_key
,
1488 apply_obj_config_object(struct bpf_object
*obj
)
1490 struct bpf_map
*map
;
1493 bpf_object__for_each_map(map
, obj
) {
1494 err
= apply_obj_config_map(map
);
1501 int bpf__apply_obj_config(void)
1503 struct bpf_object
*obj
, *tmp
;
1506 bpf_object__for_each_safe(obj
, tmp
) {
1507 err
= apply_obj_config_object(obj
);
1515 #define bpf__for_each_map(pos, obj, objtmp) \
1516 bpf_object__for_each_safe(obj, objtmp) \
1517 bpf_object__for_each_map(pos, obj)
1519 #define bpf__for_each_map_named(pos, obj, objtmp, name) \
1520 bpf__for_each_map(pos, obj, objtmp) \
1521 if (bpf_map__name(pos) && \
1523 bpf_map__name(pos)) == 0))
1525 struct perf_evsel
*bpf__setup_output_event(struct perf_evlist
*evlist
, const char *name
)
1527 struct bpf_map_priv
*tmpl_priv
= NULL
;
1528 struct bpf_object
*obj
, *tmp
;
1529 struct perf_evsel
*evsel
= NULL
;
1530 struct bpf_map
*map
;
1532 bool need_init
= false;
1534 bpf__for_each_map_named(map
, obj
, tmp
, name
) {
1535 struct bpf_map_priv
*priv
= bpf_map__priv(map
);
1538 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL
);
1541 * No need to check map type: type should have been
1542 * verified by kernel.
1544 if (!need_init
&& !priv
)
1546 if (!tmpl_priv
&& priv
)
1554 char *event_definition
= NULL
;
1556 if (asprintf(&event_definition
, "bpf-output/no-inherit=1,name=%s/", name
) < 0)
1557 return ERR_PTR(-ENOMEM
);
1559 err
= parse_events(evlist
, event_definition
, NULL
);
1560 free(event_definition
);
1563 pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name
);
1564 return ERR_PTR(-err
);
1567 evsel
= perf_evlist__last(evlist
);
1570 bpf__for_each_map_named(map
, obj
, tmp
, name
) {
1571 struct bpf_map_priv
*priv
= bpf_map__priv(map
);
1574 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL
);
1579 priv
= bpf_map_priv__clone(tmpl_priv
);
1581 return ERR_PTR(-ENOMEM
);
1583 err
= bpf_map__set_priv(map
, priv
, bpf_map_priv__clear
);
1585 bpf_map_priv__clear(map
, priv
);
1586 return ERR_PTR(err
);
1589 struct bpf_map_op
*op
;
1591 op
= bpf_map__add_newop(map
, NULL
);
1593 return ERR_CAST(op
);
1594 op
->op_type
= BPF_MAP_OP_SET_EVSEL
;
1595 op
->v
.evsel
= evsel
;
1602 int bpf__setup_stdout(struct perf_evlist
*evlist
)
1604 struct perf_evsel
*evsel
= bpf__setup_output_event(evlist
, "__bpf_stdout__");
1605 return PTR_ERR_OR_ZERO(evsel
);
1608 #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1609 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1610 #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1612 static const char *bpf_loader_strerror_table
[NR_ERRNO
] = {
1613 [ERRCODE_OFFSET(CONFIG
)] = "Invalid config string",
1614 [ERRCODE_OFFSET(GROUP
)] = "Invalid group name",
1615 [ERRCODE_OFFSET(EVENTNAME
)] = "No event name found in config string",
1616 [ERRCODE_OFFSET(INTERNAL
)] = "BPF loader internal error",
1617 [ERRCODE_OFFSET(COMPILE
)] = "Error when compiling BPF scriptlet",
1618 [ERRCODE_OFFSET(PROGCONF_TERM
)] = "Invalid program config term in config string",
1619 [ERRCODE_OFFSET(PROLOGUE
)] = "Failed to generate prologue",
1620 [ERRCODE_OFFSET(PROLOGUE2BIG
)] = "Prologue too big for program",
1621 [ERRCODE_OFFSET(PROLOGUEOOB
)] = "Offset out of bound for prologue",
1622 [ERRCODE_OFFSET(OBJCONF_OPT
)] = "Invalid object config option",
1623 [ERRCODE_OFFSET(OBJCONF_CONF
)] = "Config value not set (missing '=')",
1624 [ERRCODE_OFFSET(OBJCONF_MAP_OPT
)] = "Invalid object map config option",
1625 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST
)] = "Target map doesn't exist",
1626 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE
)] = "Incorrect value type for map",
1627 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE
)] = "Incorrect map type",
1628 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE
)] = "Incorrect map key size",
1629 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE
)] = "Incorrect map value size",
1630 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT
)] = "Event not found for map setting",
1631 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE
)] = "Invalid map size for event setting",
1632 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM
)] = "Event dimension too large",
1633 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH
)] = "Doesn't support inherit event",
1634 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE
)] = "Wrong event type for map",
1635 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG
)] = "Index too large",
1639 bpf_loader_strerror(int err
, char *buf
, size_t size
)
1641 char sbuf
[STRERR_BUFSIZE
];
1647 err
= err
> 0 ? err
: -err
;
1649 if (err
>= __LIBBPF_ERRNO__START
)
1650 return libbpf_strerror(err
, buf
, size
);
1652 if (err
>= __BPF_LOADER_ERRNO__START
&& err
< __BPF_LOADER_ERRNO__END
) {
1653 msg
= bpf_loader_strerror_table
[ERRNO_OFFSET(err
)];
1654 snprintf(buf
, size
, "%s", msg
);
1655 buf
[size
- 1] = '\0';
1659 if (err
>= __BPF_LOADER_ERRNO__END
)
1660 snprintf(buf
, size
, "Unknown bpf loader error %d", err
);
1662 snprintf(buf
, size
, "%s",
1663 str_error_r(err
, sbuf
, sizeof(sbuf
)));
1665 buf
[size
- 1] = '\0';
1669 #define bpf__strerror_head(err, buf, size) \
1670 char sbuf[STRERR_BUFSIZE], *emsg;\
1675 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1679 scnprintf(buf, size, "%s", emsg);\
1682 #define bpf__strerror_entry(val, fmt...)\
1684 scnprintf(buf, size, fmt);\
1688 #define bpf__strerror_end(buf, size)\
1690 buf[size - 1] = '\0';
1692 int bpf__strerror_prepare_load(const char *filename
, bool source
,
1693 int err
, char *buf
, size_t size
)
1698 n
= snprintf(buf
, size
, "Failed to load %s%s: ",
1699 filename
, source
? " from source" : "");
1701 buf
[size
- 1] = '\0';
1707 ret
= bpf_loader_strerror(err
, buf
, size
);
1708 buf
[size
- 1] = '\0';
1712 int bpf__strerror_probe(struct bpf_object
*obj __maybe_unused
,
1713 int err
, char *buf
, size_t size
)
1715 bpf__strerror_head(err
, buf
, size
);
1716 case BPF_LOADER_ERRNO__PROGCONF_TERM
: {
1717 scnprintf(buf
, size
, "%s (add -v to see detail)", emsg
);
1720 bpf__strerror_entry(EEXIST
, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
1721 bpf__strerror_entry(EACCES
, "You need to be root");
1722 bpf__strerror_entry(EPERM
, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1723 bpf__strerror_entry(ENOENT
, "You need to check probing points in BPF file");
1724 bpf__strerror_end(buf
, size
);
1728 int bpf__strerror_load(struct bpf_object
*obj
,
1729 int err
, char *buf
, size_t size
)
1731 bpf__strerror_head(err
, buf
, size
);
1732 case LIBBPF_ERRNO__KVER
: {
1733 unsigned int obj_kver
= bpf_object__kversion(obj
);
1734 unsigned int real_kver
;
1736 if (fetch_kernel_version(&real_kver
, NULL
, 0)) {
1737 scnprintf(buf
, size
, "Unable to fetch kernel version");
1741 if (obj_kver
!= real_kver
) {
1742 scnprintf(buf
, size
,
1743 "'version' ("KVER_FMT
") doesn't match running kernel ("KVER_FMT
")",
1744 KVER_PARAM(obj_kver
),
1745 KVER_PARAM(real_kver
));
1749 scnprintf(buf
, size
, "Failed to load program for unknown reason");
1752 bpf__strerror_end(buf
, size
);
1756 int bpf__strerror_config_obj(struct bpf_object
*obj __maybe_unused
,
1757 struct parse_events_term
*term __maybe_unused
,
1758 struct perf_evlist
*evlist __maybe_unused
,
1759 int *error_pos __maybe_unused
, int err
,
1760 char *buf
, size_t size
)
1762 bpf__strerror_head(err
, buf
, size
);
1763 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE
,
1764 "Can't use this config term with this map type");
1765 bpf__strerror_end(buf
, size
);
1769 int bpf__strerror_apply_obj_config(int err
, char *buf
, size_t size
)
1771 bpf__strerror_head(err
, buf
, size
);
1772 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM
,
1773 "Cannot set event to BPF map in multi-thread tracing");
1774 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH
,
1775 "%s (Hint: use -i to turn off inherit)", emsg
);
1776 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE
,
1777 "Can only put raw, hardware and BPF output event into a BPF map");
1778 bpf__strerror_end(buf
, size
);
1782 int bpf__strerror_setup_output_event(struct perf_evlist
*evlist __maybe_unused
,
1783 int err
, char *buf
, size_t size
)
1785 bpf__strerror_head(err
, buf
, size
);
1786 bpf__strerror_end(buf
, size
);