1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
10 #include <bpf/libbpf.h>
12 #include <linux/err.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
18 #include "bpf-loader.h"
19 #include "bpf-prologue.h"
20 #include "probe-event.h"
21 #include "probe-finder.h" // for MAX_PROBES
22 #include "parse-events.h"
23 #include "strfilter.h"
24 #include "llvm-utils.h"
25 #include "c++/clang-c.h"
27 #define DEFINE_PRINT_FN(name, level) \
28 static int libbpf_##name(const char *fmt, ...) \
33 va_start(args, fmt); \
34 ret = veprintf(level, verbose, pr_fmt(fmt), args);\
39 DEFINE_PRINT_FN(warning
, 1)
40 DEFINE_PRINT_FN(info
, 1)
41 DEFINE_PRINT_FN(debug
, 1)
43 struct bpf_prog_priv
{
47 struct perf_probe_event pev
;
49 struct bpf_insn
*insns_buf
;
54 static bool libbpf_initialized
;
57 bpf__prepare_load_buffer(void *obj_buf
, size_t obj_buf_sz
, const char *name
)
59 struct bpf_object
*obj
;
61 if (!libbpf_initialized
) {
62 libbpf_set_print(libbpf_warning
,
65 libbpf_initialized
= true;
68 obj
= bpf_object__open_buffer(obj_buf
, obj_buf_sz
, name
);
70 pr_debug("bpf: failed to load buffer\n");
71 return ERR_PTR(-EINVAL
);
77 struct bpf_object
*bpf__prepare_load(const char *filename
, bool source
)
79 struct bpf_object
*obj
;
81 if (!libbpf_initialized
) {
82 libbpf_set_print(libbpf_warning
,
85 libbpf_initialized
= true;
94 err
= perf_clang__compile_bpf(filename
, &obj_buf
, &obj_buf_sz
);
95 perf_clang__cleanup();
97 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err
);
98 err
= llvm__compile_bpf(filename
, &obj_buf
, &obj_buf_sz
);
100 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE
);
102 pr_debug("bpf: successfull builtin compilation\n");
103 obj
= bpf_object__open_buffer(obj_buf
, obj_buf_sz
, filename
);
105 if (!IS_ERR(obj
) && llvm_param
.dump_obj
)
106 llvm__dump_obj(filename
, obj_buf
, obj_buf_sz
);
110 obj
= bpf_object__open(filename
);
113 pr_debug("bpf: failed to load %s\n", filename
);
120 void bpf__clear(void)
122 struct bpf_object
*obj
, *tmp
;
124 bpf_object__for_each_safe(obj
, tmp
) {
126 bpf_object__close(obj
);
131 clear_prog_priv(struct bpf_program
*prog __maybe_unused
,
134 struct bpf_prog_priv
*priv
= _priv
;
136 cleanup_perf_probe_events(&priv
->pev
, 1);
137 zfree(&priv
->insns_buf
);
138 zfree(&priv
->type_mapping
);
139 zfree(&priv
->sys_name
);
140 zfree(&priv
->evt_name
);
145 prog_config__exec(const char *value
, struct perf_probe_event
*pev
)
148 pev
->target
= strdup(value
);
155 prog_config__module(const char *value
, struct perf_probe_event
*pev
)
157 pev
->uprobes
= false;
158 pev
->target
= strdup(value
);
165 prog_config__bool(const char *value
, bool *pbool
, bool invert
)
173 err
= strtobool(value
, &bool_value
);
177 *pbool
= invert
? !bool_value
: bool_value
;
182 prog_config__inlines(const char *value
,
183 struct perf_probe_event
*pev __maybe_unused
)
185 return prog_config__bool(value
, &probe_conf
.no_inlines
, true);
189 prog_config__force(const char *value
,
190 struct perf_probe_event
*pev __maybe_unused
)
192 return prog_config__bool(value
, &probe_conf
.force_add
, false);
199 int (*func
)(const char *, struct perf_probe_event
*);
200 } bpf_prog_config_terms
[] = {
203 .usage
= "exec=<full path of file>",
204 .desc
= "Set uprobe target",
205 .func
= prog_config__exec
,
209 .usage
= "module=<module name> ",
210 .desc
= "Set kprobe module",
211 .func
= prog_config__module
,
215 .usage
= "inlines=[yes|no] ",
216 .desc
= "Probe at inline symbol",
217 .func
= prog_config__inlines
,
221 .usage
= "force=[yes|no] ",
222 .desc
= "Forcibly add events with existing name",
223 .func
= prog_config__force
,
228 do_prog_config(const char *key
, const char *value
,
229 struct perf_probe_event
*pev
)
233 pr_debug("config bpf program: %s=%s\n", key
, value
);
234 for (i
= 0; i
< ARRAY_SIZE(bpf_prog_config_terms
); i
++)
235 if (strcmp(key
, bpf_prog_config_terms
[i
].key
) == 0)
236 return bpf_prog_config_terms
[i
].func(value
, pev
);
238 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
241 pr_debug("\nHint: Valid options are:\n");
242 for (i
= 0; i
< ARRAY_SIZE(bpf_prog_config_terms
); i
++)
243 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms
[i
].usage
,
244 bpf_prog_config_terms
[i
].desc
);
247 return -BPF_LOADER_ERRNO__PROGCONF_TERM
;
251 parse_prog_config_kvpair(const char *config_str
, struct perf_probe_event
*pev
)
253 char *text
= strdup(config_str
);
255 const char *main_str
= NULL
;
259 pr_debug("Not enough memory: dup config_str failed\n");
260 return ERR_PTR(-ENOMEM
);
264 while ((sep
= strchr(line
, ';'))) {
268 equ
= strchr(line
, '=');
270 pr_warning("WARNING: invalid config in BPF object: %s\n",
272 pr_warning("\tShould be 'key=value'.\n");
277 err
= do_prog_config(line
, equ
+ 1, pev
);
285 main_str
= config_str
+ (line
- text
);
288 return err
? ERR_PTR(err
) : main_str
;
292 parse_prog_config(const char *config_str
, const char **p_main_str
,
293 bool *is_tp
, struct perf_probe_event
*pev
)
296 const char *main_str
= parse_prog_config_kvpair(config_str
, pev
);
298 if (IS_ERR(main_str
))
299 return PTR_ERR(main_str
);
301 *p_main_str
= main_str
;
302 if (!strchr(main_str
, '=')) {
303 /* Is a tracepoint event? */
304 const char *s
= strchr(main_str
, ':');
307 pr_debug("bpf: '%s' is not a valid tracepoint\n",
309 return -BPF_LOADER_ERRNO__CONFIG
;
317 err
= parse_perf_probe_command(main_str
, pev
);
319 pr_debug("bpf: '%s' is not a valid config string\n",
321 /* parse failed, don't need clear pev. */
322 return -BPF_LOADER_ERRNO__CONFIG
;
328 config_bpf_program(struct bpf_program
*prog
)
330 struct perf_probe_event
*pev
= NULL
;
331 struct bpf_prog_priv
*priv
= NULL
;
332 const char *config_str
, *main_str
;
336 /* Initialize per-program probing setting */
337 probe_conf
.no_inlines
= false;
338 probe_conf
.force_add
= false;
340 config_str
= bpf_program__title(prog
, false);
341 if (IS_ERR(config_str
)) {
342 pr_debug("bpf: unable to get title for program\n");
343 return PTR_ERR(config_str
);
346 priv
= calloc(sizeof(*priv
), 1);
348 pr_debug("bpf: failed to alloc priv\n");
353 pr_debug("bpf: config program '%s'\n", config_str
);
354 err
= parse_prog_config(config_str
, &main_str
, &is_tp
, pev
);
359 char *s
= strchr(main_str
, ':');
362 priv
->sys_name
= strndup(main_str
, s
- main_str
);
363 priv
->evt_name
= strdup(s
+ 1);
367 if (pev
->group
&& strcmp(pev
->group
, PERF_BPF_PROBE_GROUP
)) {
368 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
369 config_str
, PERF_BPF_PROBE_GROUP
);
370 err
= -BPF_LOADER_ERRNO__GROUP
;
372 } else if (!pev
->group
)
373 pev
->group
= strdup(PERF_BPF_PROBE_GROUP
);
376 pr_debug("bpf: strdup failed\n");
382 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
384 err
= -BPF_LOADER_ERRNO__EVENTNAME
;
387 pr_debug("bpf: config '%s' is ok\n", config_str
);
390 err
= bpf_program__set_priv(prog
, priv
, clear_prog_priv
);
392 pr_debug("Failed to set priv for program '%s'\n", config_str
);
400 clear_perf_probe_event(pev
);
405 static int bpf__prepare_probe(void)
408 static bool initialized
= false;
411 * Make err static, so if init failed the first, bpf__prepare_probe()
412 * fails each time without calling init_probe_symbol_maps multiple
419 err
= init_probe_symbol_maps(false);
421 pr_debug("Failed to init_probe_symbol_maps\n");
422 probe_conf
.max_probes
= MAX_PROBES
;
427 preproc_gen_prologue(struct bpf_program
*prog
, int n
,
428 struct bpf_insn
*orig_insns
, int orig_insns_cnt
,
429 struct bpf_prog_prep_result
*res
)
431 struct bpf_prog_priv
*priv
= bpf_program__priv(prog
);
432 struct probe_trace_event
*tev
;
433 struct perf_probe_event
*pev
;
434 struct bpf_insn
*buf
;
435 size_t prologue_cnt
= 0;
438 if (IS_ERR(priv
) || !priv
|| priv
->is_tp
)
443 if (n
< 0 || n
>= priv
->nr_types
)
446 /* Find a tev belongs to that type */
447 for (i
= 0; i
< pev
->ntevs
; i
++) {
448 if (priv
->type_mapping
[i
] == n
)
452 if (i
>= pev
->ntevs
) {
453 pr_debug("Internal error: prologue type %d not found\n", n
);
454 return -BPF_LOADER_ERRNO__PROLOGUE
;
459 buf
= priv
->insns_buf
;
460 err
= bpf__gen_prologue(tev
->args
, tev
->nargs
,
462 BPF_MAXINSNS
- orig_insns_cnt
);
466 title
= bpf_program__title(prog
, false);
470 pr_debug("Failed to generate prologue for program %s\n",
475 memcpy(&buf
[prologue_cnt
], orig_insns
,
476 sizeof(struct bpf_insn
) * orig_insns_cnt
);
478 res
->new_insn_ptr
= buf
;
479 res
->new_insn_cnt
= prologue_cnt
+ orig_insns_cnt
;
484 pr_debug("Internal error in preproc_gen_prologue\n");
485 return -BPF_LOADER_ERRNO__PROLOGUE
;
489 * compare_tev_args is reflexive, transitive and antisymmetric.
490 * I can proof it but this margin is too narrow to contain.
492 static int compare_tev_args(const void *ptev1
, const void *ptev2
)
495 const struct probe_trace_event
*tev1
=
496 *(const struct probe_trace_event
**)ptev1
;
497 const struct probe_trace_event
*tev2
=
498 *(const struct probe_trace_event
**)ptev2
;
500 ret
= tev2
->nargs
- tev1
->nargs
;
504 for (i
= 0; i
< tev1
->nargs
; i
++) {
505 struct probe_trace_arg
*arg1
, *arg2
;
506 struct probe_trace_arg_ref
*ref1
, *ref2
;
508 arg1
= &tev1
->args
[i
];
509 arg2
= &tev2
->args
[i
];
511 ret
= strcmp(arg1
->value
, arg2
->value
);
518 while (ref1
&& ref2
) {
519 ret
= ref2
->offset
- ref1
->offset
;
528 return ref2
? 1 : -1;
535 * Assign a type number to each tevs in a pev.
536 * mapping is an array with same slots as tevs in that pev.
537 * nr_types will be set to number of types.
539 static int map_prologue(struct perf_probe_event
*pev
, int *mapping
,
543 struct probe_trace_event
**ptevs
;
545 size_t array_sz
= sizeof(*ptevs
) * pev
->ntevs
;
547 ptevs
= malloc(array_sz
);
549 pr_debug("Not enough memory: alloc ptevs failed\n");
553 pr_debug("In map_prologue, ntevs=%d\n", pev
->ntevs
);
554 for (i
= 0; i
< pev
->ntevs
; i
++)
555 ptevs
[i
] = &pev
->tevs
[i
];
557 qsort(ptevs
, pev
->ntevs
, sizeof(*ptevs
),
560 for (i
= 0; i
< pev
->ntevs
; i
++) {
563 n
= ptevs
[i
] - pev
->tevs
;
566 pr_debug("mapping[%d]=%d\n", n
, type
);
570 if (compare_tev_args(ptevs
+ i
, ptevs
+ i
- 1) == 0)
575 pr_debug("mapping[%d]=%d\n", n
, mapping
[n
]);
578 *nr_types
= type
+ 1;
583 static int hook_load_preprocessor(struct bpf_program
*prog
)
585 struct bpf_prog_priv
*priv
= bpf_program__priv(prog
);
586 struct perf_probe_event
*pev
;
587 bool need_prologue
= false;
590 if (IS_ERR(priv
) || !priv
) {
591 pr_debug("Internal error when hook preprocessor\n");
592 return -BPF_LOADER_ERRNO__INTERNAL
;
596 priv
->need_prologue
= false;
601 for (i
= 0; i
< pev
->ntevs
; i
++) {
602 struct probe_trace_event
*tev
= &pev
->tevs
[i
];
604 if (tev
->nargs
> 0) {
605 need_prologue
= true;
611 * Since all tevs don't have argument, we don't need generate
614 if (!need_prologue
) {
615 priv
->need_prologue
= false;
619 priv
->need_prologue
= true;
620 priv
->insns_buf
= malloc(sizeof(struct bpf_insn
) * BPF_MAXINSNS
);
621 if (!priv
->insns_buf
) {
622 pr_debug("Not enough memory: alloc insns_buf failed\n");
626 priv
->type_mapping
= malloc(sizeof(int) * pev
->ntevs
);
627 if (!priv
->type_mapping
) {
628 pr_debug("Not enough memory: alloc type_mapping failed\n");
631 memset(priv
->type_mapping
, -1,
632 sizeof(int) * pev
->ntevs
);
634 err
= map_prologue(pev
, priv
->type_mapping
, &priv
->nr_types
);
638 err
= bpf_program__set_prep(prog
, priv
->nr_types
,
639 preproc_gen_prologue
);
643 int bpf__probe(struct bpf_object
*obj
)
646 struct bpf_program
*prog
;
647 struct bpf_prog_priv
*priv
;
648 struct perf_probe_event
*pev
;
650 err
= bpf__prepare_probe();
652 pr_debug("bpf__prepare_probe failed\n");
656 bpf_object__for_each_program(prog
, obj
) {
657 err
= config_bpf_program(prog
);
661 priv
= bpf_program__priv(prog
);
662 if (IS_ERR(priv
) || !priv
) {
668 bpf_program__set_tracepoint(prog
);
672 bpf_program__set_kprobe(prog
);
675 err
= convert_perf_probe_events(pev
, 1);
677 pr_debug("bpf_probe: failed to convert perf probe events\n");
681 err
= apply_perf_probe_events(pev
, 1);
683 pr_debug("bpf_probe: failed to apply perf probe events\n");
688 * After probing, let's consider prologue, which
689 * adds program fetcher to BPF programs.
691 * hook_load_preprocessorr() hooks pre-processor
692 * to bpf_program, let it generate prologue
693 * dynamically during loading.
695 err
= hook_load_preprocessor(prog
);
700 return err
< 0 ? err
: 0;
703 #define EVENTS_WRITE_BUFSIZE 4096
704 int bpf__unprobe(struct bpf_object
*obj
)
707 struct bpf_program
*prog
;
709 bpf_object__for_each_program(prog
, obj
) {
710 struct bpf_prog_priv
*priv
= bpf_program__priv(prog
);
713 if (IS_ERR(priv
) || !priv
|| priv
->is_tp
)
716 for (i
= 0; i
< priv
->pev
.ntevs
; i
++) {
717 struct probe_trace_event
*tev
= &priv
->pev
.tevs
[i
];
718 char name_buf
[EVENTS_WRITE_BUFSIZE
];
719 struct strfilter
*delfilter
;
721 snprintf(name_buf
, EVENTS_WRITE_BUFSIZE
,
722 "%s:%s", tev
->group
, tev
->event
);
723 name_buf
[EVENTS_WRITE_BUFSIZE
- 1] = '\0';
725 delfilter
= strfilter__new(name_buf
, NULL
);
727 pr_debug("Failed to create filter for unprobing\n");
732 err
= del_perf_probe_events(delfilter
);
733 strfilter__delete(delfilter
);
735 pr_debug("Failed to delete %s\n", name_buf
);
744 int bpf__load(struct bpf_object
*obj
)
748 err
= bpf_object__load(obj
);
750 pr_debug("bpf: load objects failed\n");
756 int bpf__foreach_event(struct bpf_object
*obj
,
757 bpf_prog_iter_callback_t func
,
760 struct bpf_program
*prog
;
763 bpf_object__for_each_program(prog
, obj
) {
764 struct bpf_prog_priv
*priv
= bpf_program__priv(prog
);
765 struct probe_trace_event
*tev
;
766 struct perf_probe_event
*pev
;
769 if (IS_ERR(priv
) || !priv
) {
770 pr_debug("bpf: failed to get private field\n");
771 return -BPF_LOADER_ERRNO__INTERNAL
;
775 fd
= bpf_program__fd(prog
);
776 err
= (*func
)(priv
->sys_name
, priv
->evt_name
, fd
, arg
);
778 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
785 for (i
= 0; i
< pev
->ntevs
; i
++) {
788 if (priv
->need_prologue
) {
789 int type
= priv
->type_mapping
[i
];
791 fd
= bpf_program__nth_fd(prog
, type
);
793 fd
= bpf_program__fd(prog
);
797 pr_debug("bpf: failed to get file descriptor\n");
801 err
= (*func
)(tev
->group
, tev
->event
, fd
, arg
);
803 pr_debug("bpf: call back failed, stop iterate\n");
811 enum bpf_map_op_type
{
812 BPF_MAP_OP_SET_VALUE
,
813 BPF_MAP_OP_SET_EVSEL
,
816 enum bpf_map_key_type
{
822 struct list_head list
;
823 enum bpf_map_op_type op_type
;
824 enum bpf_map_key_type key_type
;
826 struct parse_events_array array
;
830 struct perf_evsel
*evsel
;
834 struct bpf_map_priv
{
835 struct list_head ops_list
;
839 bpf_map_op__delete(struct bpf_map_op
*op
)
841 if (!list_empty(&op
->list
))
843 if (op
->key_type
== BPF_MAP_KEY_RANGES
)
844 parse_events__clear_array(&op
->k
.array
);
849 bpf_map_priv__purge(struct bpf_map_priv
*priv
)
851 struct bpf_map_op
*pos
, *n
;
853 list_for_each_entry_safe(pos
, n
, &priv
->ops_list
, list
) {
854 list_del_init(&pos
->list
);
855 bpf_map_op__delete(pos
);
860 bpf_map_priv__clear(struct bpf_map
*map __maybe_unused
,
863 struct bpf_map_priv
*priv
= _priv
;
865 bpf_map_priv__purge(priv
);
870 bpf_map_op_setkey(struct bpf_map_op
*op
, struct parse_events_term
*term
)
872 op
->key_type
= BPF_MAP_KEY_ALL
;
876 if (term
->array
.nr_ranges
) {
877 size_t memsz
= term
->array
.nr_ranges
*
878 sizeof(op
->k
.array
.ranges
[0]);
880 op
->k
.array
.ranges
= memdup(term
->array
.ranges
, memsz
);
881 if (!op
->k
.array
.ranges
) {
882 pr_debug("Not enough memory to alloc indices for map\n");
885 op
->key_type
= BPF_MAP_KEY_RANGES
;
886 op
->k
.array
.nr_ranges
= term
->array
.nr_ranges
;
891 static struct bpf_map_op
*
892 bpf_map_op__new(struct parse_events_term
*term
)
894 struct bpf_map_op
*op
;
897 op
= zalloc(sizeof(*op
));
899 pr_debug("Failed to alloc bpf_map_op\n");
900 return ERR_PTR(-ENOMEM
);
902 INIT_LIST_HEAD(&op
->list
);
904 err
= bpf_map_op_setkey(op
, term
);
912 static struct bpf_map_op
*
913 bpf_map_op__clone(struct bpf_map_op
*op
)
915 struct bpf_map_op
*newop
;
917 newop
= memdup(op
, sizeof(*op
));
919 pr_debug("Failed to alloc bpf_map_op\n");
923 INIT_LIST_HEAD(&newop
->list
);
924 if (op
->key_type
== BPF_MAP_KEY_RANGES
) {
925 size_t memsz
= op
->k
.array
.nr_ranges
*
926 sizeof(op
->k
.array
.ranges
[0]);
928 newop
->k
.array
.ranges
= memdup(op
->k
.array
.ranges
, memsz
);
929 if (!newop
->k
.array
.ranges
) {
930 pr_debug("Failed to alloc indices for map\n");
939 static struct bpf_map_priv
*
940 bpf_map_priv__clone(struct bpf_map_priv
*priv
)
942 struct bpf_map_priv
*newpriv
;
943 struct bpf_map_op
*pos
, *newop
;
945 newpriv
= zalloc(sizeof(*newpriv
));
947 pr_debug("Not enough memory to alloc map private\n");
950 INIT_LIST_HEAD(&newpriv
->ops_list
);
952 list_for_each_entry(pos
, &priv
->ops_list
, list
) {
953 newop
= bpf_map_op__clone(pos
);
955 bpf_map_priv__purge(newpriv
);
958 list_add_tail(&newop
->list
, &newpriv
->ops_list
);
965 bpf_map__add_op(struct bpf_map
*map
, struct bpf_map_op
*op
)
967 const char *map_name
= bpf_map__name(map
);
968 struct bpf_map_priv
*priv
= bpf_map__priv(map
);
971 pr_debug("Failed to get private from map %s\n", map_name
);
972 return PTR_ERR(priv
);
976 priv
= zalloc(sizeof(*priv
));
978 pr_debug("Not enough memory to alloc map private\n");
981 INIT_LIST_HEAD(&priv
->ops_list
);
983 if (bpf_map__set_priv(map
, priv
, bpf_map_priv__clear
)) {
985 return -BPF_LOADER_ERRNO__INTERNAL
;
989 list_add_tail(&op
->list
, &priv
->ops_list
);
993 static struct bpf_map_op
*
994 bpf_map__add_newop(struct bpf_map
*map
, struct parse_events_term
*term
)
996 struct bpf_map_op
*op
;
999 op
= bpf_map_op__new(term
);
1003 err
= bpf_map__add_op(map
, op
);
1005 bpf_map_op__delete(op
);
1006 return ERR_PTR(err
);
1012 __bpf_map__config_value(struct bpf_map
*map
,
1013 struct parse_events_term
*term
)
1015 struct bpf_map_op
*op
;
1016 const char *map_name
= bpf_map__name(map
);
1017 const struct bpf_map_def
*def
= bpf_map__def(map
);
1020 pr_debug("Unable to get map definition from '%s'\n",
1022 return -BPF_LOADER_ERRNO__INTERNAL
;
1025 if (def
->type
!= BPF_MAP_TYPE_ARRAY
) {
1026 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1028 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE
;
1030 if (def
->key_size
< sizeof(unsigned int)) {
1031 pr_debug("Map %s has incorrect key size\n", map_name
);
1032 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE
;
1034 switch (def
->value_size
) {
1041 pr_debug("Map %s has incorrect value size\n", map_name
);
1042 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE
;
1045 op
= bpf_map__add_newop(map
, term
);
1048 op
->op_type
= BPF_MAP_OP_SET_VALUE
;
1049 op
->v
.value
= term
->val
.num
;
1054 bpf_map__config_value(struct bpf_map
*map
,
1055 struct parse_events_term
*term
,
1056 struct perf_evlist
*evlist __maybe_unused
)
1058 if (!term
->err_val
) {
1059 pr_debug("Config value not set\n");
1060 return -BPF_LOADER_ERRNO__OBJCONF_CONF
;
1063 if (term
->type_val
!= PARSE_EVENTS__TERM_TYPE_NUM
) {
1064 pr_debug("ERROR: wrong value type for 'value'\n");
1065 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE
;
1068 return __bpf_map__config_value(map
, term
);
1072 __bpf_map__config_event(struct bpf_map
*map
,
1073 struct parse_events_term
*term
,
1074 struct perf_evlist
*evlist
)
1076 struct perf_evsel
*evsel
;
1077 const struct bpf_map_def
*def
;
1078 struct bpf_map_op
*op
;
1079 const char *map_name
= bpf_map__name(map
);
1081 evsel
= perf_evlist__find_evsel_by_str(evlist
, term
->val
.str
);
1083 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1084 map_name
, term
->val
.str
);
1085 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT
;
1088 def
= bpf_map__def(map
);
1090 pr_debug("Unable to get map definition from '%s'\n",
1092 return PTR_ERR(def
);
1096 * No need to check key_size and value_size:
1097 * kernel has already checked them.
1099 if (def
->type
!= BPF_MAP_TYPE_PERF_EVENT_ARRAY
) {
1100 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1102 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE
;
1105 op
= bpf_map__add_newop(map
, term
);
1108 op
->op_type
= BPF_MAP_OP_SET_EVSEL
;
1109 op
->v
.evsel
= evsel
;
1114 bpf_map__config_event(struct bpf_map
*map
,
1115 struct parse_events_term
*term
,
1116 struct perf_evlist
*evlist
)
1118 if (!term
->err_val
) {
1119 pr_debug("Config value not set\n");
1120 return -BPF_LOADER_ERRNO__OBJCONF_CONF
;
1123 if (term
->type_val
!= PARSE_EVENTS__TERM_TYPE_STR
) {
1124 pr_debug("ERROR: wrong value type for 'event'\n");
1125 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE
;
1128 return __bpf_map__config_event(map
, term
, evlist
);
1131 struct bpf_obj_config__map_func
{
1132 const char *config_opt
;
1133 int (*config_func
)(struct bpf_map
*, struct parse_events_term
*,
1134 struct perf_evlist
*);
1137 struct bpf_obj_config__map_func bpf_obj_config__map_funcs
[] = {
1138 {"value", bpf_map__config_value
},
1139 {"event", bpf_map__config_event
},
1143 config_map_indices_range_check(struct parse_events_term
*term
,
1144 struct bpf_map
*map
,
1145 const char *map_name
)
1147 struct parse_events_array
*array
= &term
->array
;
1148 const struct bpf_map_def
*def
;
1151 if (!array
->nr_ranges
)
1153 if (!array
->ranges
) {
1154 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1155 map_name
, (int)array
->nr_ranges
);
1156 return -BPF_LOADER_ERRNO__INTERNAL
;
1159 def
= bpf_map__def(map
);
1161 pr_debug("ERROR: Unable to get map definition from '%s'\n",
1163 return -BPF_LOADER_ERRNO__INTERNAL
;
1166 for (i
= 0; i
< array
->nr_ranges
; i
++) {
1167 unsigned int start
= array
->ranges
[i
].start
;
1168 size_t length
= array
->ranges
[i
].length
;
1169 unsigned int idx
= start
+ length
- 1;
1171 if (idx
>= def
->max_entries
) {
1172 pr_debug("ERROR: index %d too large\n", idx
);
1173 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG
;
1180 bpf__obj_config_map(struct bpf_object
*obj
,
1181 struct parse_events_term
*term
,
1182 struct perf_evlist
*evlist
,
1185 /* key is "map:<mapname>.<config opt>" */
1186 char *map_name
= strdup(term
->config
+ sizeof("map:") - 1);
1187 struct bpf_map
*map
;
1188 int err
= -BPF_LOADER_ERRNO__OBJCONF_OPT
;
1195 map_opt
= strchr(map_name
, '.');
1197 pr_debug("ERROR: Invalid map config: %s\n", map_name
);
1202 if (*map_opt
== '\0') {
1203 pr_debug("ERROR: Invalid map option: %s\n", term
->config
);
1207 map
= bpf_object__find_map_by_name(obj
, map_name
);
1209 pr_debug("ERROR: Map %s doesn't exist\n", map_name
);
1210 err
= -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST
;
1214 *key_scan_pos
+= strlen(map_opt
);
1215 err
= config_map_indices_range_check(term
, map
, map_name
);
1218 *key_scan_pos
-= strlen(map_opt
);
1220 for (i
= 0; i
< ARRAY_SIZE(bpf_obj_config__map_funcs
); i
++) {
1221 struct bpf_obj_config__map_func
*func
=
1222 &bpf_obj_config__map_funcs
[i
];
1224 if (strcmp(map_opt
, func
->config_opt
) == 0) {
1225 err
= func
->config_func(map
, term
, evlist
);
1230 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt
);
1231 err
= -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT
;
1235 key_scan_pos
+= strlen(map_opt
);
1239 int bpf__config_obj(struct bpf_object
*obj
,
1240 struct parse_events_term
*term
,
1241 struct perf_evlist
*evlist
,
1244 int key_scan_pos
= 0;
1247 if (!obj
|| !term
|| !term
->config
)
1250 if (strstarts(term
->config
, "map:")) {
1251 key_scan_pos
= sizeof("map:") - 1;
1252 err
= bpf__obj_config_map(obj
, term
, evlist
, &key_scan_pos
);
1255 err
= -BPF_LOADER_ERRNO__OBJCONF_OPT
;
1258 *error_pos
= key_scan_pos
;
1263 typedef int (*map_config_func_t
)(const char *name
, int map_fd
,
1264 const struct bpf_map_def
*pdef
,
1265 struct bpf_map_op
*op
,
1266 void *pkey
, void *arg
);
1269 foreach_key_array_all(map_config_func_t func
,
1270 void *arg
, const char *name
,
1271 int map_fd
, const struct bpf_map_def
*pdef
,
1272 struct bpf_map_op
*op
)
1277 for (i
= 0; i
< pdef
->max_entries
; i
++) {
1278 err
= func(name
, map_fd
, pdef
, op
, &i
, arg
);
1280 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1289 foreach_key_array_ranges(map_config_func_t func
, void *arg
,
1290 const char *name
, int map_fd
,
1291 const struct bpf_map_def
*pdef
,
1292 struct bpf_map_op
*op
)
1297 for (i
= 0; i
< op
->k
.array
.nr_ranges
; i
++) {
1298 unsigned int start
= op
->k
.array
.ranges
[i
].start
;
1299 size_t length
= op
->k
.array
.ranges
[i
].length
;
1301 for (j
= 0; j
< length
; j
++) {
1302 unsigned int idx
= start
+ j
;
1304 err
= func(name
, map_fd
, pdef
, op
, &idx
, arg
);
1306 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1316 bpf_map_config_foreach_key(struct bpf_map
*map
,
1317 map_config_func_t func
,
1321 struct bpf_map_op
*op
;
1322 const struct bpf_map_def
*def
;
1323 const char *name
= bpf_map__name(map
);
1324 struct bpf_map_priv
*priv
= bpf_map__priv(map
);
1327 pr_debug("ERROR: failed to get private from map %s\n", name
);
1328 return -BPF_LOADER_ERRNO__INTERNAL
;
1330 if (!priv
|| list_empty(&priv
->ops_list
)) {
1331 pr_debug("INFO: nothing to config for map %s\n", name
);
1335 def
= bpf_map__def(map
);
1337 pr_debug("ERROR: failed to get definition from map %s\n", name
);
1338 return -BPF_LOADER_ERRNO__INTERNAL
;
1340 map_fd
= bpf_map__fd(map
);
1342 pr_debug("ERROR: failed to get fd from map %s\n", name
);
1346 list_for_each_entry(op
, &priv
->ops_list
, list
) {
1347 switch (def
->type
) {
1348 case BPF_MAP_TYPE_ARRAY
:
1349 case BPF_MAP_TYPE_PERF_EVENT_ARRAY
:
1350 switch (op
->key_type
) {
1351 case BPF_MAP_KEY_ALL
:
1352 err
= foreach_key_array_all(func
, arg
, name
,
1355 case BPF_MAP_KEY_RANGES
:
1356 err
= foreach_key_array_ranges(func
, arg
, name
,
1361 pr_debug("ERROR: keytype for map '%s' invalid\n",
1363 return -BPF_LOADER_ERRNO__INTERNAL
;
1369 pr_debug("ERROR: type of '%s' incorrect\n", name
);
1370 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE
;
1378 apply_config_value_for_key(int map_fd
, void *pkey
,
1379 size_t val_size
, u64 val
)
1385 u8 _val
= (u8
)(val
);
1386 err
= bpf_map_update_elem(map_fd
, pkey
, &_val
, BPF_ANY
);
1390 u16 _val
= (u16
)(val
);
1391 err
= bpf_map_update_elem(map_fd
, pkey
, &_val
, BPF_ANY
);
1395 u32 _val
= (u32
)(val
);
1396 err
= bpf_map_update_elem(map_fd
, pkey
, &_val
, BPF_ANY
);
1400 err
= bpf_map_update_elem(map_fd
, pkey
, &val
, BPF_ANY
);
1404 pr_debug("ERROR: invalid value size\n");
1405 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE
;
1413 apply_config_evsel_for_key(const char *name
, int map_fd
, void *pkey
,
1414 struct perf_evsel
*evsel
)
1416 struct xyarray
*xy
= evsel
->fd
;
1417 struct perf_event_attr
*attr
;
1418 unsigned int key
, events
;
1419 bool check_pass
= false;
1424 pr_debug("ERROR: evsel not ready for map %s\n", name
);
1425 return -BPF_LOADER_ERRNO__INTERNAL
;
1428 if (xy
->row_size
/ xy
->entry_size
!= 1) {
1429 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1431 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM
;
1434 attr
= &evsel
->attr
;
1435 if (attr
->inherit
) {
1436 pr_debug("ERROR: Can't put inherit event into map %s\n", name
);
1437 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH
;
1440 if (perf_evsel__is_bpf_output(evsel
))
1442 if (attr
->type
== PERF_TYPE_RAW
)
1444 if (attr
->type
== PERF_TYPE_HARDWARE
)
1447 pr_debug("ERROR: Event type is wrong for map %s\n", name
);
1448 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE
;
1451 events
= xy
->entries
/ (xy
->row_size
/ xy
->entry_size
);
1452 key
= *((unsigned int *)pkey
);
1453 if (key
>= events
) {
1454 pr_debug("ERROR: there is no event %d for map %s\n",
1456 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE
;
1458 evt_fd
= xyarray__entry(xy
, key
, 0);
1459 err
= bpf_map_update_elem(map_fd
, pkey
, evt_fd
, BPF_ANY
);
1466 apply_obj_config_map_for_key(const char *name
, int map_fd
,
1467 const struct bpf_map_def
*pdef
,
1468 struct bpf_map_op
*op
,
1469 void *pkey
, void *arg __maybe_unused
)
1473 switch (op
->op_type
) {
1474 case BPF_MAP_OP_SET_VALUE
:
1475 err
= apply_config_value_for_key(map_fd
, pkey
,
1479 case BPF_MAP_OP_SET_EVSEL
:
1480 err
= apply_config_evsel_for_key(name
, map_fd
, pkey
,
1484 pr_debug("ERROR: unknown value type for '%s'\n", name
);
1485 err
= -BPF_LOADER_ERRNO__INTERNAL
;
1491 apply_obj_config_map(struct bpf_map
*map
)
1493 return bpf_map_config_foreach_key(map
,
1494 apply_obj_config_map_for_key
,
1499 apply_obj_config_object(struct bpf_object
*obj
)
1501 struct bpf_map
*map
;
1504 bpf_map__for_each(map
, obj
) {
1505 err
= apply_obj_config_map(map
);
1512 int bpf__apply_obj_config(void)
1514 struct bpf_object
*obj
, *tmp
;
1517 bpf_object__for_each_safe(obj
, tmp
) {
1518 err
= apply_obj_config_object(obj
);
1526 #define bpf__for_each_map(pos, obj, objtmp) \
1527 bpf_object__for_each_safe(obj, objtmp) \
1528 bpf_map__for_each(pos, obj)
1530 #define bpf__for_each_stdout_map(pos, obj, objtmp) \
1531 bpf__for_each_map(pos, obj, objtmp) \
1532 if (bpf_map__name(pos) && \
1533 (strcmp("__bpf_stdout__", \
1534 bpf_map__name(pos)) == 0))
1536 int bpf__setup_stdout(struct perf_evlist
*evlist
)
1538 struct bpf_map_priv
*tmpl_priv
= NULL
;
1539 struct bpf_object
*obj
, *tmp
;
1540 struct perf_evsel
*evsel
= NULL
;
1541 struct bpf_map
*map
;
1543 bool need_init
= false;
1545 bpf__for_each_stdout_map(map
, obj
, tmp
) {
1546 struct bpf_map_priv
*priv
= bpf_map__priv(map
);
1549 return -BPF_LOADER_ERRNO__INTERNAL
;
1552 * No need to check map type: type should have been
1553 * verified by kernel.
1555 if (!need_init
&& !priv
)
1557 if (!tmpl_priv
&& priv
)
1565 err
= parse_events(evlist
, "bpf-output/no-inherit=1,name=__bpf_stdout__/",
1568 pr_debug("ERROR: failed to create bpf-output event\n");
1572 evsel
= perf_evlist__last(evlist
);
1575 bpf__for_each_stdout_map(map
, obj
, tmp
) {
1576 struct bpf_map_priv
*priv
= bpf_map__priv(map
);
1579 return -BPF_LOADER_ERRNO__INTERNAL
;
1584 priv
= bpf_map_priv__clone(tmpl_priv
);
1588 err
= bpf_map__set_priv(map
, priv
, bpf_map_priv__clear
);
1590 bpf_map_priv__clear(map
, priv
);
1594 struct bpf_map_op
*op
;
1596 op
= bpf_map__add_newop(map
, NULL
);
1599 op
->op_type
= BPF_MAP_OP_SET_EVSEL
;
1600 op
->v
.evsel
= evsel
;
1607 #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1608 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1609 #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1611 static const char *bpf_loader_strerror_table
[NR_ERRNO
] = {
1612 [ERRCODE_OFFSET(CONFIG
)] = "Invalid config string",
1613 [ERRCODE_OFFSET(GROUP
)] = "Invalid group name",
1614 [ERRCODE_OFFSET(EVENTNAME
)] = "No event name found in config string",
1615 [ERRCODE_OFFSET(INTERNAL
)] = "BPF loader internal error",
1616 [ERRCODE_OFFSET(COMPILE
)] = "Error when compiling BPF scriptlet",
1617 [ERRCODE_OFFSET(PROGCONF_TERM
)] = "Invalid program config term in config string",
1618 [ERRCODE_OFFSET(PROLOGUE
)] = "Failed to generate prologue",
1619 [ERRCODE_OFFSET(PROLOGUE2BIG
)] = "Prologue too big for program",
1620 [ERRCODE_OFFSET(PROLOGUEOOB
)] = "Offset out of bound for prologue",
1621 [ERRCODE_OFFSET(OBJCONF_OPT
)] = "Invalid object config option",
1622 [ERRCODE_OFFSET(OBJCONF_CONF
)] = "Config value not set (missing '=')",
1623 [ERRCODE_OFFSET(OBJCONF_MAP_OPT
)] = "Invalid object map config option",
1624 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST
)] = "Target map doesn't exist",
1625 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE
)] = "Incorrect value type for map",
1626 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE
)] = "Incorrect map type",
1627 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE
)] = "Incorrect map key size",
1628 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE
)] = "Incorrect map value size",
1629 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT
)] = "Event not found for map setting",
1630 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE
)] = "Invalid map size for event setting",
1631 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM
)] = "Event dimension too large",
1632 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH
)] = "Doesn't support inherit event",
1633 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE
)] = "Wrong event type for map",
1634 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG
)] = "Index too large",
1638 bpf_loader_strerror(int err
, char *buf
, size_t size
)
1640 char sbuf
[STRERR_BUFSIZE
];
1646 err
= err
> 0 ? err
: -err
;
1648 if (err
>= __LIBBPF_ERRNO__START
)
1649 return libbpf_strerror(err
, buf
, size
);
1651 if (err
>= __BPF_LOADER_ERRNO__START
&& err
< __BPF_LOADER_ERRNO__END
) {
1652 msg
= bpf_loader_strerror_table
[ERRNO_OFFSET(err
)];
1653 snprintf(buf
, size
, "%s", msg
);
1654 buf
[size
- 1] = '\0';
1658 if (err
>= __BPF_LOADER_ERRNO__END
)
1659 snprintf(buf
, size
, "Unknown bpf loader error %d", err
);
1661 snprintf(buf
, size
, "%s",
1662 str_error_r(err
, sbuf
, sizeof(sbuf
)));
1664 buf
[size
- 1] = '\0';
1668 #define bpf__strerror_head(err, buf, size) \
1669 char sbuf[STRERR_BUFSIZE], *emsg;\
1674 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1678 scnprintf(buf, size, "%s", emsg);\
1681 #define bpf__strerror_entry(val, fmt...)\
1683 scnprintf(buf, size, fmt);\
1687 #define bpf__strerror_end(buf, size)\
1689 buf[size - 1] = '\0';
1691 int bpf__strerror_prepare_load(const char *filename
, bool source
,
1692 int err
, char *buf
, size_t size
)
1697 n
= snprintf(buf
, size
, "Failed to load %s%s: ",
1698 filename
, source
? " from source" : "");
1700 buf
[size
- 1] = '\0';
1706 ret
= bpf_loader_strerror(err
, buf
, size
);
1707 buf
[size
- 1] = '\0';
1711 int bpf__strerror_probe(struct bpf_object
*obj __maybe_unused
,
1712 int err
, char *buf
, size_t size
)
1714 bpf__strerror_head(err
, buf
, size
);
1715 case BPF_LOADER_ERRNO__PROGCONF_TERM
: {
1716 scnprintf(buf
, size
, "%s (add -v to see detail)", emsg
);
1719 bpf__strerror_entry(EEXIST
, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
1720 bpf__strerror_entry(EACCES
, "You need to be root");
1721 bpf__strerror_entry(EPERM
, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1722 bpf__strerror_entry(ENOENT
, "You need to check probing points in BPF file");
1723 bpf__strerror_end(buf
, size
);
1727 int bpf__strerror_load(struct bpf_object
*obj
,
1728 int err
, char *buf
, size_t size
)
1730 bpf__strerror_head(err
, buf
, size
);
1731 case LIBBPF_ERRNO__KVER
: {
1732 unsigned int obj_kver
= bpf_object__kversion(obj
);
1733 unsigned int real_kver
;
1735 if (fetch_kernel_version(&real_kver
, NULL
, 0)) {
1736 scnprintf(buf
, size
, "Unable to fetch kernel version");
1740 if (obj_kver
!= real_kver
) {
1741 scnprintf(buf
, size
,
1742 "'version' ("KVER_FMT
") doesn't match running kernel ("KVER_FMT
")",
1743 KVER_PARAM(obj_kver
),
1744 KVER_PARAM(real_kver
));
1748 scnprintf(buf
, size
, "Failed to load program for unknown reason");
1751 bpf__strerror_end(buf
, size
);
1755 int bpf__strerror_config_obj(struct bpf_object
*obj __maybe_unused
,
1756 struct parse_events_term
*term __maybe_unused
,
1757 struct perf_evlist
*evlist __maybe_unused
,
1758 int *error_pos __maybe_unused
, int err
,
1759 char *buf
, size_t size
)
1761 bpf__strerror_head(err
, buf
, size
);
1762 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE
,
1763 "Can't use this config term with this map type");
1764 bpf__strerror_end(buf
, size
);
1768 int bpf__strerror_apply_obj_config(int err
, char *buf
, size_t size
)
1770 bpf__strerror_head(err
, buf
, size
);
1771 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM
,
1772 "Cannot set event to BPF map in multi-thread tracing");
1773 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH
,
1774 "%s (Hint: use -i to turn off inherit)", emsg
);
1775 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE
,
1776 "Can only put raw, hardware and BPF output event into a BPF map");
1777 bpf__strerror_end(buf
, size
);
1781 int bpf__strerror_setup_stdout(struct perf_evlist
*evlist __maybe_unused
,
1782 int err
, char *buf
, size_t size
)
1784 bpf__strerror_head(err
, buf
, size
);
1785 bpf__strerror_end(buf
, size
);