2 * Common eBPF ELF object loading operations.
4 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation;
11 * version 2.1 of the License (not later!)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses>
30 #include <asm/unistd.h>
31 #include <linux/kernel.h>
32 #include <linux/bpf.h>
33 #include <linux/list.h>
44 #define __printf(a, b) __attribute__((format(printf, a, b)))
47 static int __base_pr(const char *format
, ...)
52 va_start(args
, format
);
53 err
= vfprintf(stderr
, format
, args
);
58 static __printf(1, 2) libbpf_print_fn_t __pr_warning
= __base_pr
;
59 static __printf(1, 2) libbpf_print_fn_t __pr_info
= __base_pr
;
60 static __printf(1, 2) libbpf_print_fn_t __pr_debug
;
62 #define __pr(func, fmt, ...) \
65 (func)("libbpf: " fmt, ##__VA_ARGS__); \
68 #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
69 #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
70 #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
72 void libbpf_set_print(libbpf_print_fn_t warn
,
73 libbpf_print_fn_t info
,
74 libbpf_print_fn_t debug
)
81 #define STRERR_BUFSIZE 128
83 #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
84 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
85 #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
87 static const char *libbpf_strerror_table
[NR_ERRNO
] = {
88 [ERRCODE_OFFSET(LIBELF
)] = "Something wrong in libelf",
89 [ERRCODE_OFFSET(FORMAT
)] = "BPF object format invalid",
90 [ERRCODE_OFFSET(KVERSION
)] = "'version' section incorrect or lost",
91 [ERRCODE_OFFSET(ENDIAN
)] = "Endian mismatch",
92 [ERRCODE_OFFSET(INTERNAL
)] = "Internal error in libbpf",
93 [ERRCODE_OFFSET(RELOC
)] = "Relocation failed",
94 [ERRCODE_OFFSET(VERIFY
)] = "Kernel verifier blocks program loading",
95 [ERRCODE_OFFSET(PROG2BIG
)] = "Program too big",
96 [ERRCODE_OFFSET(KVER
)] = "Incorrect kernel version",
97 [ERRCODE_OFFSET(PROGTYPE
)] = "Kernel doesn't support this program type",
100 int libbpf_strerror(int err
, char *buf
, size_t size
)
105 err
= err
> 0 ? err
: -err
;
107 if (err
< __LIBBPF_ERRNO__START
) {
110 ret
= strerror_r(err
, buf
, size
);
111 buf
[size
- 1] = '\0';
115 if (err
< __LIBBPF_ERRNO__END
) {
118 msg
= libbpf_strerror_table
[ERRNO_OFFSET(err
)];
119 snprintf(buf
, size
, "%s", msg
);
120 buf
[size
- 1] = '\0';
124 snprintf(buf
, size
, "Unknown libbpf error %d", err
);
125 buf
[size
- 1] = '\0';
129 #define CHECK_ERR(action, err, out) do { \
136 /* Copied from tools/perf/util/util.h */
138 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
142 # define zclose(fd) ({ \
145 ___err = close((fd)); \
150 #ifdef HAVE_LIBELF_MMAP_SUPPORT
151 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
153 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
157 * bpf_prog should be a better name but it has been used in
161 /* Index in elf obj file, for relocation use. */
164 struct bpf_insn
*insns
;
166 enum bpf_prog_type type
;
178 bpf_program_prep_t preprocessor
;
180 struct bpf_object
*obj
;
182 bpf_program_clear_priv_t clear_priv
;
189 struct bpf_map_def def
;
191 bpf_map_clear_priv_t clear_priv
;
194 static LIST_HEAD(bpf_objects_list
);
200 struct bpf_program
*programs
;
202 struct bpf_map
*maps
;
208 * Information when doing elf related work. Only valid if fd
227 * All loaded bpf_object is linked in a list, which is
228 * hidden to caller. bpf_objects__<func> handlers deal with
231 struct list_head list
;
234 bpf_object_clear_priv_t clear_priv
;
238 #define obj_elf_valid(o) ((o)->efile.elf)
240 static void bpf_program__unload(struct bpf_program
*prog
)
248 * If the object is opened but the program was never loaded,
249 * it is possible that prog->instances.nr == -1.
251 if (prog
->instances
.nr
> 0) {
252 for (i
= 0; i
< prog
->instances
.nr
; i
++)
253 zclose(prog
->instances
.fds
[i
]);
254 } else if (prog
->instances
.nr
!= -1) {
255 pr_warning("Internal error: instances.nr is %d\n",
259 prog
->instances
.nr
= -1;
260 zfree(&prog
->instances
.fds
);
263 static void bpf_program__exit(struct bpf_program
*prog
)
268 if (prog
->clear_priv
)
269 prog
->clear_priv(prog
, prog
->priv
);
272 prog
->clear_priv
= NULL
;
274 bpf_program__unload(prog
);
275 zfree(&prog
->section_name
);
277 zfree(&prog
->reloc_desc
);
285 bpf_program__init(void *data
, size_t size
, char *name
, int idx
,
286 struct bpf_program
*prog
)
288 if (size
< sizeof(struct bpf_insn
)) {
289 pr_warning("corrupted section '%s'\n", name
);
293 bzero(prog
, sizeof(*prog
));
295 prog
->section_name
= strdup(name
);
296 if (!prog
->section_name
) {
297 pr_warning("failed to alloc name for prog %s\n",
302 prog
->insns
= malloc(size
);
304 pr_warning("failed to alloc insns for %s\n", name
);
307 prog
->insns_cnt
= size
/ sizeof(struct bpf_insn
);
308 memcpy(prog
->insns
, data
,
309 prog
->insns_cnt
* sizeof(struct bpf_insn
));
311 prog
->instances
.fds
= NULL
;
312 prog
->instances
.nr
= -1;
313 prog
->type
= BPF_PROG_TYPE_KPROBE
;
317 bpf_program__exit(prog
);
322 bpf_object__add_program(struct bpf_object
*obj
, void *data
, size_t size
,
325 struct bpf_program prog
, *progs
;
328 err
= bpf_program__init(data
, size
, name
, idx
, &prog
);
332 progs
= obj
->programs
;
333 nr_progs
= obj
->nr_programs
;
335 progs
= realloc(progs
, sizeof(progs
[0]) * (nr_progs
+ 1));
338 * In this case the original obj->programs
339 * is still valid, so don't need special treat for
340 * bpf_close_object().
342 pr_warning("failed to alloc a new program '%s'\n",
344 bpf_program__exit(&prog
);
348 pr_debug("found program %s\n", prog
.section_name
);
349 obj
->programs
= progs
;
350 obj
->nr_programs
= nr_progs
+ 1;
352 progs
[nr_progs
] = prog
;
356 static struct bpf_object
*bpf_object__new(const char *path
,
360 struct bpf_object
*obj
;
362 obj
= calloc(1, sizeof(struct bpf_object
) + strlen(path
) + 1);
364 pr_warning("alloc memory failed for %s\n", path
);
365 return ERR_PTR(-ENOMEM
);
368 strcpy(obj
->path
, path
);
372 * Caller of this function should also calls
373 * bpf_object__elf_finish() after data collection to return
374 * obj_buf to user. If not, we should duplicate the buffer to
375 * avoid user freeing them before elf finish.
377 obj
->efile
.obj_buf
= obj_buf
;
378 obj
->efile
.obj_buf_sz
= obj_buf_sz
;
379 obj
->efile
.maps_shndx
= -1;
383 INIT_LIST_HEAD(&obj
->list
);
384 list_add(&obj
->list
, &bpf_objects_list
);
388 static void bpf_object__elf_finish(struct bpf_object
*obj
)
390 if (!obj_elf_valid(obj
))
393 if (obj
->efile
.elf
) {
394 elf_end(obj
->efile
.elf
);
395 obj
->efile
.elf
= NULL
;
397 obj
->efile
.symbols
= NULL
;
399 zfree(&obj
->efile
.reloc
);
400 obj
->efile
.nr_reloc
= 0;
401 zclose(obj
->efile
.fd
);
402 obj
->efile
.obj_buf
= NULL
;
403 obj
->efile
.obj_buf_sz
= 0;
406 static int bpf_object__elf_init(struct bpf_object
*obj
)
411 if (obj_elf_valid(obj
)) {
412 pr_warning("elf init: internal error\n");
413 return -LIBBPF_ERRNO__LIBELF
;
416 if (obj
->efile
.obj_buf_sz
> 0) {
418 * obj_buf should have been validated by
419 * bpf_object__open_buffer().
421 obj
->efile
.elf
= elf_memory(obj
->efile
.obj_buf
,
422 obj
->efile
.obj_buf_sz
);
424 obj
->efile
.fd
= open(obj
->path
, O_RDONLY
);
425 if (obj
->efile
.fd
< 0) {
426 pr_warning("failed to open %s: %s\n", obj
->path
,
431 obj
->efile
.elf
= elf_begin(obj
->efile
.fd
,
432 LIBBPF_ELF_C_READ_MMAP
,
436 if (!obj
->efile
.elf
) {
437 pr_warning("failed to open %s as ELF file\n",
439 err
= -LIBBPF_ERRNO__LIBELF
;
443 if (!gelf_getehdr(obj
->efile
.elf
, &obj
->efile
.ehdr
)) {
444 pr_warning("failed to get EHDR from %s\n",
446 err
= -LIBBPF_ERRNO__FORMAT
;
449 ep
= &obj
->efile
.ehdr
;
451 /* Old LLVM set e_machine to EM_NONE */
452 if ((ep
->e_type
!= ET_REL
) || (ep
->e_machine
&& (ep
->e_machine
!= EM_BPF
))) {
453 pr_warning("%s is not an eBPF object file\n",
455 err
= -LIBBPF_ERRNO__FORMAT
;
461 bpf_object__elf_finish(obj
);
466 bpf_object__check_endianness(struct bpf_object
*obj
)
468 static unsigned int const endian
= 1;
470 switch (obj
->efile
.ehdr
.e_ident
[EI_DATA
]) {
472 /* We are big endian, BPF obj is little endian. */
473 if (*(unsigned char const *)&endian
!= 1)
478 /* We are little endian, BPF obj is big endian. */
479 if (*(unsigned char const *)&endian
!= 0)
483 return -LIBBPF_ERRNO__ENDIAN
;
489 pr_warning("Error: endianness mismatch.\n");
490 return -LIBBPF_ERRNO__ENDIAN
;
494 bpf_object__init_license(struct bpf_object
*obj
,
495 void *data
, size_t size
)
497 memcpy(obj
->license
, data
,
498 min(size
, sizeof(obj
->license
) - 1));
499 pr_debug("license of %s is %s\n", obj
->path
, obj
->license
);
504 bpf_object__init_kversion(struct bpf_object
*obj
,
505 void *data
, size_t size
)
509 if (size
!= sizeof(kver
)) {
510 pr_warning("invalid kver section in %s\n", obj
->path
);
511 return -LIBBPF_ERRNO__FORMAT
;
513 memcpy(&kver
, data
, sizeof(kver
));
514 obj
->kern_version
= kver
;
515 pr_debug("kernel version of %s is %x\n", obj
->path
,
521 bpf_object__validate_maps(struct bpf_object
*obj
)
526 * If there's only 1 map, the only error case should have been
527 * catched in bpf_object__init_maps().
529 if (!obj
->maps
|| !obj
->nr_maps
|| (obj
->nr_maps
== 1))
532 for (i
= 1; i
< obj
->nr_maps
; i
++) {
533 const struct bpf_map
*a
= &obj
->maps
[i
- 1];
534 const struct bpf_map
*b
= &obj
->maps
[i
];
536 if (b
->offset
- a
->offset
< sizeof(struct bpf_map_def
)) {
537 pr_warning("corrupted map section in %s: map \"%s\" too small\n",
545 static int compare_bpf_map(const void *_a
, const void *_b
)
547 const struct bpf_map
*a
= _a
;
548 const struct bpf_map
*b
= _b
;
550 return a
->offset
- b
->offset
;
554 bpf_object__init_maps(struct bpf_object
*obj
)
556 int i
, map_idx
, nr_maps
= 0;
559 Elf_Data
*symbols
= obj
->efile
.symbols
;
561 if (obj
->efile
.maps_shndx
< 0)
566 scn
= elf_getscn(obj
->efile
.elf
, obj
->efile
.maps_shndx
);
568 data
= elf_getdata(scn
, NULL
);
570 pr_warning("failed to get Elf_Data from map section %d\n",
571 obj
->efile
.maps_shndx
);
576 * Count number of maps. Each map has a name.
577 * Array of maps is not supported: only the first element is
580 * TODO: Detect array of map and report error.
582 for (i
= 0; i
< symbols
->d_size
/ sizeof(GElf_Sym
); i
++) {
585 if (!gelf_getsym(symbols
, i
, &sym
))
587 if (sym
.st_shndx
!= obj
->efile
.maps_shndx
)
592 /* Alloc obj->maps and fill nr_maps. */
593 pr_debug("maps in %s: %d maps in %zd bytes\n", obj
->path
,
594 nr_maps
, data
->d_size
);
599 obj
->maps
= calloc(nr_maps
, sizeof(obj
->maps
[0]));
601 pr_warning("alloc maps for object failed\n");
604 obj
->nr_maps
= nr_maps
;
607 * fill all fd with -1 so won't close incorrect
608 * fd (fd=0 is stdin) when failure (zclose won't close
611 for (i
= 0; i
< nr_maps
; i
++)
612 obj
->maps
[i
].fd
= -1;
615 * Fill obj->maps using data in "maps" section.
617 for (i
= 0, map_idx
= 0; i
< symbols
->d_size
/ sizeof(GElf_Sym
); i
++) {
619 const char *map_name
;
620 struct bpf_map_def
*def
;
622 if (!gelf_getsym(symbols
, i
, &sym
))
624 if (sym
.st_shndx
!= obj
->efile
.maps_shndx
)
627 map_name
= elf_strptr(obj
->efile
.elf
,
628 obj
->efile
.strtabidx
,
630 obj
->maps
[map_idx
].offset
= sym
.st_value
;
631 if (sym
.st_value
+ sizeof(struct bpf_map_def
) > data
->d_size
) {
632 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
633 obj
->path
, map_name
);
637 obj
->maps
[map_idx
].name
= strdup(map_name
);
638 if (!obj
->maps
[map_idx
].name
) {
639 pr_warning("failed to alloc map name\n");
642 pr_debug("map %d is \"%s\"\n", map_idx
,
643 obj
->maps
[map_idx
].name
);
644 def
= (struct bpf_map_def
*)(data
->d_buf
+ sym
.st_value
);
645 obj
->maps
[map_idx
].def
= *def
;
649 qsort(obj
->maps
, obj
->nr_maps
, sizeof(obj
->maps
[0]), compare_bpf_map
);
650 return bpf_object__validate_maps(obj
);
653 static int bpf_object__elf_collect(struct bpf_object
*obj
)
655 Elf
*elf
= obj
->efile
.elf
;
656 GElf_Ehdr
*ep
= &obj
->efile
.ehdr
;
658 int idx
= 0, err
= 0;
660 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
661 if (!elf_rawdata(elf_getscn(elf
, ep
->e_shstrndx
), NULL
)) {
662 pr_warning("failed to get e_shstrndx from %s\n",
664 return -LIBBPF_ERRNO__FORMAT
;
667 while ((scn
= elf_nextscn(elf
, scn
)) != NULL
) {
673 if (gelf_getshdr(scn
, &sh
) != &sh
) {
674 pr_warning("failed to get section header from %s\n",
676 err
= -LIBBPF_ERRNO__FORMAT
;
680 name
= elf_strptr(elf
, ep
->e_shstrndx
, sh
.sh_name
);
682 pr_warning("failed to get section name from %s\n",
684 err
= -LIBBPF_ERRNO__FORMAT
;
688 data
= elf_getdata(scn
, 0);
690 pr_warning("failed to get section data from %s(%s)\n",
692 err
= -LIBBPF_ERRNO__FORMAT
;
695 pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
696 name
, (unsigned long)data
->d_size
,
697 (int)sh
.sh_link
, (unsigned long)sh
.sh_flags
,
700 if (strcmp(name
, "license") == 0)
701 err
= bpf_object__init_license(obj
,
704 else if (strcmp(name
, "version") == 0)
705 err
= bpf_object__init_kversion(obj
,
708 else if (strcmp(name
, "maps") == 0)
709 obj
->efile
.maps_shndx
= idx
;
710 else if (sh
.sh_type
== SHT_SYMTAB
) {
711 if (obj
->efile
.symbols
) {
712 pr_warning("bpf: multiple SYMTAB in %s\n",
714 err
= -LIBBPF_ERRNO__FORMAT
;
716 obj
->efile
.symbols
= data
;
717 obj
->efile
.strtabidx
= sh
.sh_link
;
719 } else if ((sh
.sh_type
== SHT_PROGBITS
) &&
720 (sh
.sh_flags
& SHF_EXECINSTR
) &&
721 (data
->d_size
> 0)) {
722 err
= bpf_object__add_program(obj
, data
->d_buf
,
723 data
->d_size
, name
, idx
);
725 char errmsg
[STRERR_BUFSIZE
];
727 strerror_r(-err
, errmsg
, sizeof(errmsg
));
728 pr_warning("failed to alloc program %s (%s): %s",
729 name
, obj
->path
, errmsg
);
731 } else if (sh
.sh_type
== SHT_REL
) {
732 void *reloc
= obj
->efile
.reloc
;
733 int nr_reloc
= obj
->efile
.nr_reloc
+ 1;
735 reloc
= realloc(reloc
,
736 sizeof(*obj
->efile
.reloc
) * nr_reloc
);
738 pr_warning("realloc failed\n");
741 int n
= nr_reloc
- 1;
743 obj
->efile
.reloc
= reloc
;
744 obj
->efile
.nr_reloc
= nr_reloc
;
746 obj
->efile
.reloc
[n
].shdr
= sh
;
747 obj
->efile
.reloc
[n
].data
= data
;
754 if (!obj
->efile
.strtabidx
|| obj
->efile
.strtabidx
>= idx
) {
755 pr_warning("Corrupted ELF file: index of strtab invalid\n");
756 return LIBBPF_ERRNO__FORMAT
;
758 if (obj
->efile
.maps_shndx
>= 0)
759 err
= bpf_object__init_maps(obj
);
764 static struct bpf_program
*
765 bpf_object__find_prog_by_idx(struct bpf_object
*obj
, int idx
)
767 struct bpf_program
*prog
;
770 for (i
= 0; i
< obj
->nr_programs
; i
++) {
771 prog
= &obj
->programs
[i
];
772 if (prog
->idx
== idx
)
779 bpf_program__collect_reloc(struct bpf_program
*prog
,
780 size_t nr_maps
, GElf_Shdr
*shdr
,
781 Elf_Data
*data
, Elf_Data
*symbols
,
786 pr_debug("collecting relocating info for: '%s'\n",
788 nrels
= shdr
->sh_size
/ shdr
->sh_entsize
;
790 prog
->reloc_desc
= malloc(sizeof(*prog
->reloc_desc
) * nrels
);
791 if (!prog
->reloc_desc
) {
792 pr_warning("failed to alloc memory in relocation\n");
795 prog
->nr_reloc
= nrels
;
797 for (i
= 0; i
< nrels
; i
++) {
800 unsigned int insn_idx
;
801 struct bpf_insn
*insns
= prog
->insns
;
804 if (!gelf_getrel(data
, i
, &rel
)) {
805 pr_warning("relocation: failed to get %d reloc\n", i
);
806 return -LIBBPF_ERRNO__FORMAT
;
809 if (!gelf_getsym(symbols
,
810 GELF_R_SYM(rel
.r_info
),
812 pr_warning("relocation: symbol %"PRIx64
" not found\n",
813 GELF_R_SYM(rel
.r_info
));
814 return -LIBBPF_ERRNO__FORMAT
;
817 if (sym
.st_shndx
!= maps_shndx
) {
818 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
819 prog
->section_name
, sym
.st_shndx
);
820 return -LIBBPF_ERRNO__RELOC
;
823 insn_idx
= rel
.r_offset
/ sizeof(struct bpf_insn
);
824 pr_debug("relocation: insn_idx=%u\n", insn_idx
);
826 if (insns
[insn_idx
].code
!= (BPF_LD
| BPF_IMM
| BPF_DW
)) {
827 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
828 insn_idx
, insns
[insn_idx
].code
);
829 return -LIBBPF_ERRNO__RELOC
;
832 map_idx
= sym
.st_value
/ sizeof(struct bpf_map_def
);
833 if (map_idx
>= nr_maps
) {
834 pr_warning("bpf relocation: map_idx %d large than %d\n",
835 (int)map_idx
, (int)nr_maps
- 1);
836 return -LIBBPF_ERRNO__RELOC
;
839 prog
->reloc_desc
[i
].insn_idx
= insn_idx
;
840 prog
->reloc_desc
[i
].map_idx
= map_idx
;
846 bpf_object__create_maps(struct bpf_object
*obj
)
850 for (i
= 0; i
< obj
->nr_maps
; i
++) {
851 struct bpf_map_def
*def
= &obj
->maps
[i
].def
;
852 int *pfd
= &obj
->maps
[i
].fd
;
854 *pfd
= bpf_create_map(def
->type
,
863 pr_warning("failed to create map: %s\n",
865 for (j
= 0; j
< i
; j
++)
866 zclose(obj
->maps
[j
].fd
);
869 pr_debug("create map %s: fd=%d\n", obj
->maps
[i
].name
, *pfd
);
876 bpf_program__relocate(struct bpf_program
*prog
, struct bpf_object
*obj
)
880 if (!prog
|| !prog
->reloc_desc
)
883 for (i
= 0; i
< prog
->nr_reloc
; i
++) {
884 int insn_idx
, map_idx
;
885 struct bpf_insn
*insns
= prog
->insns
;
887 insn_idx
= prog
->reloc_desc
[i
].insn_idx
;
888 map_idx
= prog
->reloc_desc
[i
].map_idx
;
890 if (insn_idx
>= (int)prog
->insns_cnt
) {
891 pr_warning("relocation out of range: '%s'\n",
893 return -LIBBPF_ERRNO__RELOC
;
895 insns
[insn_idx
].src_reg
= BPF_PSEUDO_MAP_FD
;
896 insns
[insn_idx
].imm
= obj
->maps
[map_idx
].fd
;
899 zfree(&prog
->reloc_desc
);
906 bpf_object__relocate(struct bpf_object
*obj
)
908 struct bpf_program
*prog
;
912 for (i
= 0; i
< obj
->nr_programs
; i
++) {
913 prog
= &obj
->programs
[i
];
915 err
= bpf_program__relocate(prog
, obj
);
917 pr_warning("failed to relocate '%s'\n",
925 static int bpf_object__collect_reloc(struct bpf_object
*obj
)
929 if (!obj_elf_valid(obj
)) {
930 pr_warning("Internal error: elf object is closed\n");
931 return -LIBBPF_ERRNO__INTERNAL
;
934 for (i
= 0; i
< obj
->efile
.nr_reloc
; i
++) {
935 GElf_Shdr
*shdr
= &obj
->efile
.reloc
[i
].shdr
;
936 Elf_Data
*data
= obj
->efile
.reloc
[i
].data
;
937 int idx
= shdr
->sh_info
;
938 struct bpf_program
*prog
;
939 size_t nr_maps
= obj
->nr_maps
;
941 if (shdr
->sh_type
!= SHT_REL
) {
942 pr_warning("internal error at %d\n", __LINE__
);
943 return -LIBBPF_ERRNO__INTERNAL
;
946 prog
= bpf_object__find_prog_by_idx(obj
, idx
);
948 pr_warning("relocation failed: no %d section\n",
950 return -LIBBPF_ERRNO__RELOC
;
953 err
= bpf_program__collect_reloc(prog
, nr_maps
,
956 obj
->efile
.maps_shndx
);
964 load_program(enum bpf_prog_type type
, struct bpf_insn
*insns
,
965 int insns_cnt
, char *license
, u32 kern_version
, int *pfd
)
970 if (!insns
|| !insns_cnt
)
973 log_buf
= malloc(BPF_LOG_BUF_SIZE
);
975 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
977 ret
= bpf_load_program(type
, insns
, insns_cnt
, license
,
978 kern_version
, log_buf
, BPF_LOG_BUF_SIZE
);
986 ret
= -LIBBPF_ERRNO__LOAD
;
987 pr_warning("load bpf program failed: %s\n", strerror(errno
));
989 if (log_buf
&& log_buf
[0] != '\0') {
990 ret
= -LIBBPF_ERRNO__VERIFY
;
991 pr_warning("-- BEGIN DUMP LOG ---\n");
992 pr_warning("\n%s\n", log_buf
);
993 pr_warning("-- END LOG --\n");
994 } else if (insns_cnt
>= BPF_MAXINSNS
) {
995 pr_warning("Program too large (%d insns), at most %d insns\n",
996 insns_cnt
, BPF_MAXINSNS
);
997 ret
= -LIBBPF_ERRNO__PROG2BIG
;
999 /* Wrong program type? */
1000 if (type
!= BPF_PROG_TYPE_KPROBE
) {
1003 fd
= bpf_load_program(BPF_PROG_TYPE_KPROBE
, insns
,
1004 insns_cnt
, license
, kern_version
,
1008 ret
= -LIBBPF_ERRNO__PROGTYPE
;
1014 ret
= -LIBBPF_ERRNO__KVER
;
1023 bpf_program__load(struct bpf_program
*prog
,
1024 char *license
, u32 kern_version
)
1028 if (prog
->instances
.nr
< 0 || !prog
->instances
.fds
) {
1029 if (prog
->preprocessor
) {
1030 pr_warning("Internal error: can't load program '%s'\n",
1031 prog
->section_name
);
1032 return -LIBBPF_ERRNO__INTERNAL
;
1035 prog
->instances
.fds
= malloc(sizeof(int));
1036 if (!prog
->instances
.fds
) {
1037 pr_warning("Not enough memory for BPF fds\n");
1040 prog
->instances
.nr
= 1;
1041 prog
->instances
.fds
[0] = -1;
1044 if (!prog
->preprocessor
) {
1045 if (prog
->instances
.nr
!= 1) {
1046 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1047 prog
->section_name
, prog
->instances
.nr
);
1049 err
= load_program(prog
->type
, prog
->insns
, prog
->insns_cnt
,
1050 license
, kern_version
, &fd
);
1052 prog
->instances
.fds
[0] = fd
;
1056 for (i
= 0; i
< prog
->instances
.nr
; i
++) {
1057 struct bpf_prog_prep_result result
;
1058 bpf_program_prep_t preprocessor
= prog
->preprocessor
;
1060 bzero(&result
, sizeof(result
));
1061 err
= preprocessor(prog
, i
, prog
->insns
,
1062 prog
->insns_cnt
, &result
);
1064 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1065 i
, prog
->section_name
);
1069 if (!result
.new_insn_ptr
|| !result
.new_insn_cnt
) {
1070 pr_debug("Skip loading the %dth instance of program '%s'\n",
1071 i
, prog
->section_name
);
1072 prog
->instances
.fds
[i
] = -1;
1078 err
= load_program(prog
->type
, result
.new_insn_ptr
,
1079 result
.new_insn_cnt
,
1080 license
, kern_version
, &fd
);
1083 pr_warning("Loading the %dth instance of program '%s' failed\n",
1084 i
, prog
->section_name
);
1090 prog
->instances
.fds
[i
] = fd
;
1094 pr_warning("failed to load program '%s'\n",
1095 prog
->section_name
);
1096 zfree(&prog
->insns
);
1097 prog
->insns_cnt
= 0;
1102 bpf_object__load_progs(struct bpf_object
*obj
)
1107 for (i
= 0; i
< obj
->nr_programs
; i
++) {
1108 err
= bpf_program__load(&obj
->programs
[i
],
1117 static int bpf_object__validate(struct bpf_object
*obj
)
1119 if (obj
->kern_version
== 0) {
1120 pr_warning("%s doesn't provide kernel version\n",
1122 return -LIBBPF_ERRNO__KVERSION
;
1127 static struct bpf_object
*
1128 __bpf_object__open(const char *path
, void *obj_buf
, size_t obj_buf_sz
)
1130 struct bpf_object
*obj
;
1133 if (elf_version(EV_CURRENT
) == EV_NONE
) {
1134 pr_warning("failed to init libelf for %s\n", path
);
1135 return ERR_PTR(-LIBBPF_ERRNO__LIBELF
);
1138 obj
= bpf_object__new(path
, obj_buf
, obj_buf_sz
);
1142 CHECK_ERR(bpf_object__elf_init(obj
), err
, out
);
1143 CHECK_ERR(bpf_object__check_endianness(obj
), err
, out
);
1144 CHECK_ERR(bpf_object__elf_collect(obj
), err
, out
);
1145 CHECK_ERR(bpf_object__collect_reloc(obj
), err
, out
);
1146 CHECK_ERR(bpf_object__validate(obj
), err
, out
);
1148 bpf_object__elf_finish(obj
);
1151 bpf_object__close(obj
);
1152 return ERR_PTR(err
);
1155 struct bpf_object
*bpf_object__open(const char *path
)
1157 /* param validation */
1161 pr_debug("loading %s\n", path
);
1163 return __bpf_object__open(path
, NULL
, 0);
1166 struct bpf_object
*bpf_object__open_buffer(void *obj_buf
,
1172 /* param validation */
1173 if (!obj_buf
|| obj_buf_sz
<= 0)
1177 snprintf(tmp_name
, sizeof(tmp_name
), "%lx-%lx",
1178 (unsigned long)obj_buf
,
1179 (unsigned long)obj_buf_sz
);
1180 tmp_name
[sizeof(tmp_name
) - 1] = '\0';
1183 pr_debug("loading object '%s' from buffer\n",
1186 return __bpf_object__open(name
, obj_buf
, obj_buf_sz
);
1189 int bpf_object__unload(struct bpf_object
*obj
)
1196 for (i
= 0; i
< obj
->nr_maps
; i
++)
1197 zclose(obj
->maps
[i
].fd
);
1199 for (i
= 0; i
< obj
->nr_programs
; i
++)
1200 bpf_program__unload(&obj
->programs
[i
]);
1205 int bpf_object__load(struct bpf_object
*obj
)
1213 pr_warning("object should not be loaded twice\n");
1219 CHECK_ERR(bpf_object__create_maps(obj
), err
, out
);
1220 CHECK_ERR(bpf_object__relocate(obj
), err
, out
);
1221 CHECK_ERR(bpf_object__load_progs(obj
), err
, out
);
1225 bpf_object__unload(obj
);
1226 pr_warning("failed to load object '%s'\n", obj
->path
);
1230 void bpf_object__close(struct bpf_object
*obj
)
1237 if (obj
->clear_priv
)
1238 obj
->clear_priv(obj
, obj
->priv
);
1240 bpf_object__elf_finish(obj
);
1241 bpf_object__unload(obj
);
1243 for (i
= 0; i
< obj
->nr_maps
; i
++) {
1244 zfree(&obj
->maps
[i
].name
);
1245 if (obj
->maps
[i
].clear_priv
)
1246 obj
->maps
[i
].clear_priv(&obj
->maps
[i
],
1248 obj
->maps
[i
].priv
= NULL
;
1249 obj
->maps
[i
].clear_priv
= NULL
;
1254 if (obj
->programs
&& obj
->nr_programs
) {
1255 for (i
= 0; i
< obj
->nr_programs
; i
++)
1256 bpf_program__exit(&obj
->programs
[i
]);
1258 zfree(&obj
->programs
);
1260 list_del(&obj
->list
);
1265 bpf_object__next(struct bpf_object
*prev
)
1267 struct bpf_object
*next
;
1270 next
= list_first_entry(&bpf_objects_list
,
1274 next
= list_next_entry(prev
, list
);
1276 /* Empty list is noticed here so don't need checking on entry. */
1277 if (&next
->list
== &bpf_objects_list
)
1283 const char *bpf_object__name(struct bpf_object
*obj
)
1285 return obj
? obj
->path
: ERR_PTR(-EINVAL
);
1288 unsigned int bpf_object__kversion(struct bpf_object
*obj
)
1290 return obj
? obj
->kern_version
: 0;
1293 int bpf_object__set_priv(struct bpf_object
*obj
, void *priv
,
1294 bpf_object_clear_priv_t clear_priv
)
1296 if (obj
->priv
&& obj
->clear_priv
)
1297 obj
->clear_priv(obj
, obj
->priv
);
1300 obj
->clear_priv
= clear_priv
;
1304 void *bpf_object__priv(struct bpf_object
*obj
)
1306 return obj
? obj
->priv
: ERR_PTR(-EINVAL
);
1309 struct bpf_program
*
1310 bpf_program__next(struct bpf_program
*prev
, struct bpf_object
*obj
)
1318 return &obj
->programs
[0];
1320 if (prev
->obj
!= obj
) {
1321 pr_warning("error: program handler doesn't match object\n");
1325 idx
= (prev
- obj
->programs
) + 1;
1326 if (idx
>= obj
->nr_programs
)
1328 return &obj
->programs
[idx
];
1331 int bpf_program__set_priv(struct bpf_program
*prog
, void *priv
,
1332 bpf_program_clear_priv_t clear_priv
)
1334 if (prog
->priv
&& prog
->clear_priv
)
1335 prog
->clear_priv(prog
, prog
->priv
);
1338 prog
->clear_priv
= clear_priv
;
1342 void *bpf_program__priv(struct bpf_program
*prog
)
1344 return prog
? prog
->priv
: ERR_PTR(-EINVAL
);
1347 const char *bpf_program__title(struct bpf_program
*prog
, bool needs_copy
)
1351 title
= prog
->section_name
;
1353 title
= strdup(title
);
1355 pr_warning("failed to strdup program title\n");
1356 return ERR_PTR(-ENOMEM
);
1363 int bpf_program__fd(struct bpf_program
*prog
)
1365 return bpf_program__nth_fd(prog
, 0);
1368 int bpf_program__set_prep(struct bpf_program
*prog
, int nr_instances
,
1369 bpf_program_prep_t prep
)
1373 if (nr_instances
<= 0 || !prep
)
1376 if (prog
->instances
.nr
> 0 || prog
->instances
.fds
) {
1377 pr_warning("Can't set pre-processor after loading\n");
1381 instances_fds
= malloc(sizeof(int) * nr_instances
);
1382 if (!instances_fds
) {
1383 pr_warning("alloc memory failed for fds\n");
1387 /* fill all fd with -1 */
1388 memset(instances_fds
, -1, sizeof(int) * nr_instances
);
1390 prog
->instances
.nr
= nr_instances
;
1391 prog
->instances
.fds
= instances_fds
;
1392 prog
->preprocessor
= prep
;
1396 int bpf_program__nth_fd(struct bpf_program
*prog
, int n
)
1400 if (n
>= prog
->instances
.nr
|| n
< 0) {
1401 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
1402 n
, prog
->section_name
, prog
->instances
.nr
);
1406 fd
= prog
->instances
.fds
[n
];
1408 pr_warning("%dth instance of program '%s' is invalid\n",
1409 n
, prog
->section_name
);
1416 static void bpf_program__set_type(struct bpf_program
*prog
,
1417 enum bpf_prog_type type
)
1422 int bpf_program__set_tracepoint(struct bpf_program
*prog
)
1426 bpf_program__set_type(prog
, BPF_PROG_TYPE_TRACEPOINT
);
1430 int bpf_program__set_kprobe(struct bpf_program
*prog
)
1434 bpf_program__set_type(prog
, BPF_PROG_TYPE_KPROBE
);
1438 static bool bpf_program__is_type(struct bpf_program
*prog
,
1439 enum bpf_prog_type type
)
1441 return prog
? (prog
->type
== type
) : false;
1444 bool bpf_program__is_tracepoint(struct bpf_program
*prog
)
1446 return bpf_program__is_type(prog
, BPF_PROG_TYPE_TRACEPOINT
);
1449 bool bpf_program__is_kprobe(struct bpf_program
*prog
)
1451 return bpf_program__is_type(prog
, BPF_PROG_TYPE_KPROBE
);
1454 int bpf_map__fd(struct bpf_map
*map
)
1456 return map
? map
->fd
: -EINVAL
;
1459 const struct bpf_map_def
*bpf_map__def(struct bpf_map
*map
)
1461 return map
? &map
->def
: ERR_PTR(-EINVAL
);
1464 const char *bpf_map__name(struct bpf_map
*map
)
1466 return map
? map
->name
: NULL
;
1469 int bpf_map__set_priv(struct bpf_map
*map
, void *priv
,
1470 bpf_map_clear_priv_t clear_priv
)
1476 if (map
->clear_priv
)
1477 map
->clear_priv(map
, map
->priv
);
1481 map
->clear_priv
= clear_priv
;
1485 void *bpf_map__priv(struct bpf_map
*map
)
1487 return map
? map
->priv
: ERR_PTR(-EINVAL
);
1491 bpf_map__next(struct bpf_map
*prev
, struct bpf_object
*obj
)
1494 struct bpf_map
*s
, *e
;
1496 if (!obj
|| !obj
->maps
)
1500 e
= obj
->maps
+ obj
->nr_maps
;
1505 if ((prev
< s
) || (prev
>= e
)) {
1506 pr_warning("error in %s: map handler doesn't belong to object\n",
1511 idx
= (prev
- obj
->maps
) + 1;
1512 if (idx
>= obj
->nr_maps
)
1514 return &obj
->maps
[idx
];
1518 bpf_object__find_map_by_name(struct bpf_object
*obj
, const char *name
)
1520 struct bpf_map
*pos
;
1522 bpf_map__for_each(pos
, obj
) {
1523 if (pos
->name
&& !strcmp(pos
->name
, name
))
1530 bpf_object__find_map_by_offset(struct bpf_object
*obj
, size_t offset
)
1534 for (i
= 0; i
< obj
->nr_maps
; i
++) {
1535 if (obj
->maps
[i
].offset
== offset
)
1536 return &obj
->maps
[i
];
1538 return ERR_PTR(-ENOENT
);