1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/bpf.h>
14 #include <linux/filter.h>
15 #include <linux/perf_event.h>
16 #include <linux/netlink.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/types.h>
19 #include <sys/socket.h>
20 #include <sys/syscall.h>
21 #include <sys/ioctl.h>
30 #define DEBUGFS "/sys/kernel/debug/tracing/"
32 static char license
[128];
33 static int kern_version
;
34 static bool processed_sec
[128];
35 char bpf_log_buf
[BPF_LOG_BUF_SIZE
];
37 int prog_fd
[MAX_PROGS
];
38 int event_fd
[MAX_PROGS
];
40 int prog_array_fd
= -1;
42 struct bpf_map_data map_data
[MAX_MAPS
];
43 int map_data_count
= 0;
45 static int populate_prog_array(const char *event
, int prog_fd
)
47 int ind
= atoi(event
), err
;
49 err
= bpf_map_update_elem(prog_array_fd
, &ind
, &prog_fd
, BPF_ANY
);
51 printf("failed to store prog_fd in prog_array\n");
57 static int write_kprobe_events(const char *val
)
63 else if (val
[0] == '\0')
64 flags
= O_WRONLY
| O_TRUNC
;
66 flags
= O_WRONLY
| O_APPEND
;
68 fd
= open("/sys/kernel/debug/tracing/kprobe_events", flags
);
70 ret
= write(fd
, val
, strlen(val
));
76 static int load_and_attach(const char *event
, struct bpf_insn
*prog
, int size
)
78 bool is_socket
= strncmp(event
, "socket", 6) == 0;
79 bool is_kprobe
= strncmp(event
, "kprobe/", 7) == 0;
80 bool is_kretprobe
= strncmp(event
, "kretprobe/", 10) == 0;
81 bool is_tracepoint
= strncmp(event
, "tracepoint/", 11) == 0;
82 bool is_raw_tracepoint
= strncmp(event
, "raw_tracepoint/", 15) == 0;
83 bool is_xdp
= strncmp(event
, "xdp", 3) == 0;
84 bool is_perf_event
= strncmp(event
, "perf_event", 10) == 0;
85 bool is_cgroup_skb
= strncmp(event
, "cgroup/skb", 10) == 0;
86 bool is_cgroup_sk
= strncmp(event
, "cgroup/sock", 11) == 0;
87 bool is_sockops
= strncmp(event
, "sockops", 7) == 0;
88 bool is_sk_skb
= strncmp(event
, "sk_skb", 6) == 0;
89 bool is_sk_msg
= strncmp(event
, "sk_msg", 6) == 0;
90 size_t insns_cnt
= size
/ sizeof(struct bpf_insn
);
91 enum bpf_prog_type prog_type
;
94 struct perf_event_attr attr
= {};
96 attr
.type
= PERF_TYPE_TRACEPOINT
;
97 attr
.sample_type
= PERF_SAMPLE_RAW
;
98 attr
.sample_period
= 1;
99 attr
.wakeup_events
= 1;
102 prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
;
103 } else if (is_kprobe
|| is_kretprobe
) {
104 prog_type
= BPF_PROG_TYPE_KPROBE
;
105 } else if (is_tracepoint
) {
106 prog_type
= BPF_PROG_TYPE_TRACEPOINT
;
107 } else if (is_raw_tracepoint
) {
108 prog_type
= BPF_PROG_TYPE_RAW_TRACEPOINT
;
110 prog_type
= BPF_PROG_TYPE_XDP
;
111 } else if (is_perf_event
) {
112 prog_type
= BPF_PROG_TYPE_PERF_EVENT
;
113 } else if (is_cgroup_skb
) {
114 prog_type
= BPF_PROG_TYPE_CGROUP_SKB
;
115 } else if (is_cgroup_sk
) {
116 prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
;
117 } else if (is_sockops
) {
118 prog_type
= BPF_PROG_TYPE_SOCK_OPS
;
119 } else if (is_sk_skb
) {
120 prog_type
= BPF_PROG_TYPE_SK_SKB
;
121 } else if (is_sk_msg
) {
122 prog_type
= BPF_PROG_TYPE_SK_MSG
;
124 printf("Unknown event '%s'\n", event
);
128 if (prog_cnt
== MAX_PROGS
)
131 fd
= bpf_load_program(prog_type
, prog
, insns_cnt
, license
, kern_version
,
132 bpf_log_buf
, BPF_LOG_BUF_SIZE
);
134 printf("bpf_load_program() err=%d\n%s", errno
, bpf_log_buf
);
138 prog_fd
[prog_cnt
++] = fd
;
140 if (is_xdp
|| is_perf_event
|| is_cgroup_skb
|| is_cgroup_sk
)
143 if (is_socket
|| is_sockops
|| is_sk_skb
|| is_sk_msg
) {
151 if (!isdigit(*event
)) {
152 printf("invalid prog number\n");
155 return populate_prog_array(event
, fd
);
158 if (is_raw_tracepoint
) {
159 efd
= bpf_raw_tracepoint_open(event
+ 15, fd
);
161 printf("tracepoint %s %s\n", event
+ 15, strerror(errno
));
164 event_fd
[prog_cnt
- 1] = efd
;
168 if (is_kprobe
|| is_kretprobe
) {
169 bool need_normal_check
= true;
170 const char *event_prefix
= "";
178 printf("event name cannot be empty\n");
183 return populate_prog_array(event
, fd
);
186 if (strncmp(event
, "sys_", 4) == 0) {
187 snprintf(buf
, sizeof(buf
), "%c:__x64_%s __x64_%s",
188 is_kprobe
? 'p' : 'r', event
, event
);
189 err
= write_kprobe_events(buf
);
191 need_normal_check
= false;
192 event_prefix
= "__x64_";
196 if (need_normal_check
) {
197 snprintf(buf
, sizeof(buf
), "%c:%s %s",
198 is_kprobe
? 'p' : 'r', event
, event
);
199 err
= write_kprobe_events(buf
);
201 printf("failed to create kprobe '%s' error '%s'\n",
202 event
, strerror(errno
));
207 strcpy(buf
, DEBUGFS
);
208 strcat(buf
, "events/kprobes/");
209 strcat(buf
, event_prefix
);
212 } else if (is_tracepoint
) {
216 printf("event name cannot be empty\n");
219 strcpy(buf
, DEBUGFS
);
220 strcat(buf
, "events/");
225 efd
= open(buf
, O_RDONLY
, 0);
227 printf("failed to open event %s\n", event
);
231 err
= read(efd
, buf
, sizeof(buf
));
232 if (err
< 0 || err
>= sizeof(buf
)) {
233 printf("read from '%s' failed '%s'\n", event
, strerror(errno
));
243 efd
= sys_perf_event_open(&attr
, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
245 printf("event %d fd %d err %s\n", id
, efd
, strerror(errno
));
248 event_fd
[prog_cnt
- 1] = efd
;
249 err
= ioctl(efd
, PERF_EVENT_IOC_ENABLE
, 0);
251 printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
255 err
= ioctl(efd
, PERF_EVENT_IOC_SET_BPF
, fd
);
257 printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n",
265 static int load_maps(struct bpf_map_data
*maps
, int nr_maps
,
266 fixup_map_cb fixup_map
)
270 for (i
= 0; i
< nr_maps
; i
++) {
272 fixup_map(&maps
[i
], i
);
273 /* Allow userspace to assign map FD prior to creation */
274 if (maps
[i
].fd
!= -1) {
275 map_fd
[i
] = maps
[i
].fd
;
280 numa_node
= maps
[i
].def
.map_flags
& BPF_F_NUMA_NODE
?
281 maps
[i
].def
.numa_node
: -1;
283 if (maps
[i
].def
.type
== BPF_MAP_TYPE_ARRAY_OF_MAPS
||
284 maps
[i
].def
.type
== BPF_MAP_TYPE_HASH_OF_MAPS
) {
285 int inner_map_fd
= map_fd
[maps
[i
].def
.inner_map_idx
];
287 map_fd
[i
] = bpf_create_map_in_map_node(maps
[i
].def
.type
,
289 maps
[i
].def
.key_size
,
291 maps
[i
].def
.max_entries
,
292 maps
[i
].def
.map_flags
,
295 map_fd
[i
] = bpf_create_map_node(maps
[i
].def
.type
,
297 maps
[i
].def
.key_size
,
298 maps
[i
].def
.value_size
,
299 maps
[i
].def
.max_entries
,
300 maps
[i
].def
.map_flags
,
304 printf("failed to create map %d (%s): %d %s\n",
305 i
, maps
[i
].name
, errno
, strerror(errno
));
308 maps
[i
].fd
= map_fd
[i
];
310 if (maps
[i
].def
.type
== BPF_MAP_TYPE_PROG_ARRAY
)
311 prog_array_fd
= map_fd
[i
];
316 static int get_sec(Elf
*elf
, int i
, GElf_Ehdr
*ehdr
, char **shname
,
317 GElf_Shdr
*shdr
, Elf_Data
**data
)
321 scn
= elf_getscn(elf
, i
);
325 if (gelf_getshdr(scn
, shdr
) != shdr
)
328 *shname
= elf_strptr(elf
, ehdr
->e_shstrndx
, shdr
->sh_name
);
329 if (!*shname
|| !shdr
->sh_size
)
332 *data
= elf_getdata(scn
, 0);
333 if (!*data
|| elf_getdata(scn
, *data
) != NULL
)
339 static int parse_relo_and_apply(Elf_Data
*data
, Elf_Data
*symbols
,
340 GElf_Shdr
*shdr
, struct bpf_insn
*insn
,
341 struct bpf_map_data
*maps
, int nr_maps
)
345 nrels
= shdr
->sh_size
/ shdr
->sh_entsize
;
347 for (i
= 0; i
< nrels
; i
++) {
350 unsigned int insn_idx
;
354 gelf_getrel(data
, i
, &rel
);
356 insn_idx
= rel
.r_offset
/ sizeof(struct bpf_insn
);
358 gelf_getsym(symbols
, GELF_R_SYM(rel
.r_info
), &sym
);
360 if (insn
[insn_idx
].code
!= (BPF_LD
| BPF_IMM
| BPF_DW
)) {
361 printf("invalid relo for insn[%d].code 0x%x\n",
362 insn_idx
, insn
[insn_idx
].code
);
365 insn
[insn_idx
].src_reg
= BPF_PSEUDO_MAP_FD
;
367 /* Match FD relocation against recorded map_data[] offset */
368 for (map_idx
= 0; map_idx
< nr_maps
; map_idx
++) {
369 if (maps
[map_idx
].elf_offset
== sym
.st_value
) {
375 insn
[insn_idx
].imm
= maps
[map_idx
].fd
;
377 printf("invalid relo for insn[%d] no map_data match\n",
386 static int cmp_symbols(const void *l
, const void *r
)
388 const GElf_Sym
*lsym
= (const GElf_Sym
*)l
;
389 const GElf_Sym
*rsym
= (const GElf_Sym
*)r
;
391 if (lsym
->st_value
< rsym
->st_value
)
393 else if (lsym
->st_value
> rsym
->st_value
)
399 static int load_elf_maps_section(struct bpf_map_data
*maps
, int maps_shndx
,
400 Elf
*elf
, Elf_Data
*symbols
, int strtabidx
)
402 int map_sz_elf
, map_sz_copy
;
403 bool validate_zero
= false;
415 /* Get data for maps section via elf index */
416 scn
= elf_getscn(elf
, maps_shndx
);
418 data_maps
= elf_getdata(scn
, NULL
);
419 if (!scn
|| !data_maps
) {
420 printf("Failed to get Elf_Data from maps section %d\n",
425 /* For each map get corrosponding symbol table entry */
426 sym
= calloc(MAX_MAPS
+1, sizeof(GElf_Sym
));
427 for (i
= 0, nr_maps
= 0; i
< symbols
->d_size
/ sizeof(GElf_Sym
); i
++) {
428 assert(nr_maps
< MAX_MAPS
+1);
429 if (!gelf_getsym(symbols
, i
, &sym
[nr_maps
]))
431 if (sym
[nr_maps
].st_shndx
!= maps_shndx
)
433 /* Only increment iif maps section */
437 /* Align to map_fd[] order, via sort on offset in sym.st_value */
438 qsort(sym
, nr_maps
, sizeof(GElf_Sym
), cmp_symbols
);
440 /* Keeping compatible with ELF maps section changes
441 * ------------------------------------------------
442 * The program size of struct bpf_load_map_def is known by loader
443 * code, but struct stored in ELF file can be different.
445 * Unfortunately sym[i].st_size is zero. To calculate the
446 * struct size stored in the ELF file, assume all struct have
447 * the same size, and simply divide with number of map
450 map_sz_elf
= data_maps
->d_size
/ nr_maps
;
451 map_sz_copy
= sizeof(struct bpf_load_map_def
);
452 if (map_sz_elf
< map_sz_copy
) {
454 * Backward compat, loading older ELF file with
455 * smaller struct, keeping remaining bytes zero.
457 map_sz_copy
= map_sz_elf
;
458 } else if (map_sz_elf
> map_sz_copy
) {
460 * Forward compat, loading newer ELF file with larger
461 * struct with unknown features. Assume zero means
462 * feature not used. Thus, validate rest of struct
465 validate_zero
= true;
468 /* Memcpy relevant part of ELF maps data to loader maps */
469 for (i
= 0; i
< nr_maps
; i
++) {
470 struct bpf_load_map_def
*def
;
471 unsigned char *addr
, *end
;
472 const char *map_name
;
475 map_name
= elf_strptr(elf
, strtabidx
, sym
[i
].st_name
);
476 maps
[i
].name
= strdup(map_name
);
478 printf("strdup(%s): %s(%d)\n", map_name
,
479 strerror(errno
), errno
);
484 /* Symbol value is offset into ELF maps section data area */
485 offset
= sym
[i
].st_value
;
486 def
= (struct bpf_load_map_def
*)(data_maps
->d_buf
+ offset
);
487 maps
[i
].elf_offset
= offset
;
488 memset(&maps
[i
].def
, 0, sizeof(struct bpf_load_map_def
));
489 memcpy(&maps
[i
].def
, def
, map_sz_copy
);
491 /* Verify no newer features were requested */
493 addr
= (unsigned char*) def
+ map_sz_copy
;
494 end
= (unsigned char*) def
+ map_sz_elf
;
495 for (; addr
< end
; addr
++) {
508 static int do_load_bpf_file(const char *path
, fixup_map_cb fixup_map
)
510 int fd
, i
, ret
, maps_shndx
= -1, strtabidx
= -1;
513 GElf_Shdr shdr
, shdr_prog
;
514 Elf_Data
*data
, *data_prog
, *data_maps
= NULL
, *symbols
= NULL
;
515 char *shname
, *shname_prog
;
518 /* reset global variables */
520 memset(license
, 0, sizeof(license
));
521 memset(processed_sec
, 0, sizeof(processed_sec
));
523 if (elf_version(EV_CURRENT
) == EV_NONE
)
526 fd
= open(path
, O_RDONLY
, 0);
530 elf
= elf_begin(fd
, ELF_C_READ
, NULL
);
535 if (gelf_getehdr(elf
, &ehdr
) != &ehdr
)
538 /* clear all kprobes */
539 i
= write_kprobe_events("");
541 /* scan over all elf sections to get license and map info */
542 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
544 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
547 if (0) /* helpful for llvm debugging */
548 printf("section %d:%s data %p size %zd link %d flags %d\n",
549 i
, shname
, data
->d_buf
, data
->d_size
,
550 shdr
.sh_link
, (int) shdr
.sh_flags
);
552 if (strcmp(shname
, "license") == 0) {
553 processed_sec
[i
] = true;
554 memcpy(license
, data
->d_buf
, data
->d_size
);
555 } else if (strcmp(shname
, "version") == 0) {
556 processed_sec
[i
] = true;
557 if (data
->d_size
!= sizeof(int)) {
558 printf("invalid size of version section %zd\n",
562 memcpy(&kern_version
, data
->d_buf
, sizeof(int));
563 } else if (strcmp(shname
, "maps") == 0) {
568 for (j
= 0; j
< MAX_MAPS
; j
++)
570 } else if (shdr
.sh_type
== SHT_SYMTAB
) {
571 strtabidx
= shdr
.sh_link
;
579 printf("missing SHT_SYMTAB section\n");
584 nr_maps
= load_elf_maps_section(map_data
, maps_shndx
,
585 elf
, symbols
, strtabidx
);
587 printf("Error: Failed loading ELF maps (errno:%d):%s\n",
588 nr_maps
, strerror(-nr_maps
));
591 if (load_maps(map_data
, nr_maps
, fixup_map
))
593 map_data_count
= nr_maps
;
595 processed_sec
[maps_shndx
] = true;
598 /* process all relo sections, and rewrite bpf insns for maps */
599 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
600 if (processed_sec
[i
])
603 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
606 if (shdr
.sh_type
== SHT_REL
) {
607 struct bpf_insn
*insns
;
609 /* locate prog sec that need map fixup (relocations) */
610 if (get_sec(elf
, shdr
.sh_info
, &ehdr
, &shname_prog
,
611 &shdr_prog
, &data_prog
))
614 if (shdr_prog
.sh_type
!= SHT_PROGBITS
||
615 !(shdr_prog
.sh_flags
& SHF_EXECINSTR
))
618 insns
= (struct bpf_insn
*) data_prog
->d_buf
;
619 processed_sec
[i
] = true; /* relo section */
621 if (parse_relo_and_apply(data
, symbols
, &shdr
, insns
,
628 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
630 if (processed_sec
[i
])
633 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
636 if (memcmp(shname
, "kprobe/", 7) == 0 ||
637 memcmp(shname
, "kretprobe/", 10) == 0 ||
638 memcmp(shname
, "tracepoint/", 11) == 0 ||
639 memcmp(shname
, "raw_tracepoint/", 15) == 0 ||
640 memcmp(shname
, "xdp", 3) == 0 ||
641 memcmp(shname
, "perf_event", 10) == 0 ||
642 memcmp(shname
, "socket", 6) == 0 ||
643 memcmp(shname
, "cgroup/", 7) == 0 ||
644 memcmp(shname
, "sockops", 7) == 0 ||
645 memcmp(shname
, "sk_skb", 6) == 0 ||
646 memcmp(shname
, "sk_msg", 6) == 0) {
647 ret
= load_and_attach(shname
, data
->d_buf
,
659 int load_bpf_file(char *path
)
661 return do_load_bpf_file(path
, NULL
);
664 int load_bpf_file_fixup_map(const char *path
, fixup_map_cb fixup_map
)
666 return do_load_bpf_file(path
, fixup_map
);
669 void read_trace_pipe(void)
673 trace_fd
= open(DEBUGFS
"trace_pipe", O_RDONLY
, 0);
678 static char buf
[4096];
681 sz
= read(trace_fd
, buf
, sizeof(buf
));