1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/bpf.h>
14 #include <linux/filter.h>
15 #include <linux/perf_event.h>
16 #include <linux/netlink.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/types.h>
19 #include <sys/types.h>
20 #include <sys/socket.h>
21 #include <sys/syscall.h>
22 #include <sys/ioctl.h>
31 #define DEBUGFS "/sys/kernel/debug/tracing/"
33 static char license
[128];
34 static int kern_version
;
35 static bool processed_sec
[128];
36 char bpf_log_buf
[BPF_LOG_BUF_SIZE
];
38 int prog_fd
[MAX_PROGS
];
39 int event_fd
[MAX_PROGS
];
41 int prog_array_fd
= -1;
43 struct bpf_map_data map_data
[MAX_MAPS
];
44 int map_data_count
= 0;
46 static int populate_prog_array(const char *event
, int prog_fd
)
48 int ind
= atoi(event
), err
;
50 err
= bpf_map_update_elem(prog_array_fd
, &ind
, &prog_fd
, BPF_ANY
);
52 printf("failed to store prog_fd in prog_array\n");
58 static int write_kprobe_events(const char *val
)
64 else if (val
[0] == '\0')
65 flags
= O_WRONLY
| O_TRUNC
;
67 flags
= O_WRONLY
| O_APPEND
;
69 fd
= open("/sys/kernel/debug/tracing/kprobe_events", flags
);
71 ret
= write(fd
, val
, strlen(val
));
77 static int load_and_attach(const char *event
, struct bpf_insn
*prog
, int size
)
79 bool is_socket
= strncmp(event
, "socket", 6) == 0;
80 bool is_kprobe
= strncmp(event
, "kprobe/", 7) == 0;
81 bool is_kretprobe
= strncmp(event
, "kretprobe/", 10) == 0;
82 bool is_tracepoint
= strncmp(event
, "tracepoint/", 11) == 0;
83 bool is_raw_tracepoint
= strncmp(event
, "raw_tracepoint/", 15) == 0;
84 bool is_xdp
= strncmp(event
, "xdp", 3) == 0;
85 bool is_perf_event
= strncmp(event
, "perf_event", 10) == 0;
86 bool is_cgroup_skb
= strncmp(event
, "cgroup/skb", 10) == 0;
87 bool is_cgroup_sk
= strncmp(event
, "cgroup/sock", 11) == 0;
88 bool is_sockops
= strncmp(event
, "sockops", 7) == 0;
89 bool is_sk_skb
= strncmp(event
, "sk_skb", 6) == 0;
90 bool is_sk_msg
= strncmp(event
, "sk_msg", 6) == 0;
91 size_t insns_cnt
= size
/ sizeof(struct bpf_insn
);
92 enum bpf_prog_type prog_type
;
95 struct perf_event_attr attr
= {};
97 attr
.type
= PERF_TYPE_TRACEPOINT
;
98 attr
.sample_type
= PERF_SAMPLE_RAW
;
99 attr
.sample_period
= 1;
100 attr
.wakeup_events
= 1;
103 prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
;
104 } else if (is_kprobe
|| is_kretprobe
) {
105 prog_type
= BPF_PROG_TYPE_KPROBE
;
106 } else if (is_tracepoint
) {
107 prog_type
= BPF_PROG_TYPE_TRACEPOINT
;
108 } else if (is_raw_tracepoint
) {
109 prog_type
= BPF_PROG_TYPE_RAW_TRACEPOINT
;
111 prog_type
= BPF_PROG_TYPE_XDP
;
112 } else if (is_perf_event
) {
113 prog_type
= BPF_PROG_TYPE_PERF_EVENT
;
114 } else if (is_cgroup_skb
) {
115 prog_type
= BPF_PROG_TYPE_CGROUP_SKB
;
116 } else if (is_cgroup_sk
) {
117 prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
;
118 } else if (is_sockops
) {
119 prog_type
= BPF_PROG_TYPE_SOCK_OPS
;
120 } else if (is_sk_skb
) {
121 prog_type
= BPF_PROG_TYPE_SK_SKB
;
122 } else if (is_sk_msg
) {
123 prog_type
= BPF_PROG_TYPE_SK_MSG
;
125 printf("Unknown event '%s'\n", event
);
129 if (prog_cnt
== MAX_PROGS
)
132 fd
= bpf_load_program(prog_type
, prog
, insns_cnt
, license
, kern_version
,
133 bpf_log_buf
, BPF_LOG_BUF_SIZE
);
135 printf("bpf_load_program() err=%d\n%s", errno
, bpf_log_buf
);
139 prog_fd
[prog_cnt
++] = fd
;
141 if (is_xdp
|| is_perf_event
|| is_cgroup_skb
|| is_cgroup_sk
)
144 if (is_socket
|| is_sockops
|| is_sk_skb
|| is_sk_msg
) {
152 if (!isdigit(*event
)) {
153 printf("invalid prog number\n");
156 return populate_prog_array(event
, fd
);
159 if (is_raw_tracepoint
) {
160 efd
= bpf_raw_tracepoint_open(event
+ 15, fd
);
162 printf("tracepoint %s %s\n", event
+ 15, strerror(errno
));
165 event_fd
[prog_cnt
- 1] = efd
;
169 if (is_kprobe
|| is_kretprobe
) {
170 bool need_normal_check
= true;
171 const char *event_prefix
= "";
179 printf("event name cannot be empty\n");
184 return populate_prog_array(event
, fd
);
187 if (strncmp(event
, "sys_", 4) == 0) {
188 snprintf(buf
, sizeof(buf
), "%c:__x64_%s __x64_%s",
189 is_kprobe
? 'p' : 'r', event
, event
);
190 err
= write_kprobe_events(buf
);
192 need_normal_check
= false;
193 event_prefix
= "__x64_";
197 if (need_normal_check
) {
198 snprintf(buf
, sizeof(buf
), "%c:%s %s",
199 is_kprobe
? 'p' : 'r', event
, event
);
200 err
= write_kprobe_events(buf
);
202 printf("failed to create kprobe '%s' error '%s'\n",
203 event
, strerror(errno
));
208 strcpy(buf
, DEBUGFS
);
209 strcat(buf
, "events/kprobes/");
210 strcat(buf
, event_prefix
);
213 } else if (is_tracepoint
) {
217 printf("event name cannot be empty\n");
220 strcpy(buf
, DEBUGFS
);
221 strcat(buf
, "events/");
226 efd
= open(buf
, O_RDONLY
, 0);
228 printf("failed to open event %s\n", event
);
232 err
= read(efd
, buf
, sizeof(buf
));
233 if (err
< 0 || err
>= sizeof(buf
)) {
234 printf("read from '%s' failed '%s'\n", event
, strerror(errno
));
244 efd
= sys_perf_event_open(&attr
, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
246 printf("event %d fd %d err %s\n", id
, efd
, strerror(errno
));
249 event_fd
[prog_cnt
- 1] = efd
;
250 err
= ioctl(efd
, PERF_EVENT_IOC_ENABLE
, 0);
252 printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
256 err
= ioctl(efd
, PERF_EVENT_IOC_SET_BPF
, fd
);
258 printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n",
266 static int load_maps(struct bpf_map_data
*maps
, int nr_maps
,
267 fixup_map_cb fixup_map
)
271 for (i
= 0; i
< nr_maps
; i
++) {
273 fixup_map(&maps
[i
], i
);
274 /* Allow userspace to assign map FD prior to creation */
275 if (maps
[i
].fd
!= -1) {
276 map_fd
[i
] = maps
[i
].fd
;
281 numa_node
= maps
[i
].def
.map_flags
& BPF_F_NUMA_NODE
?
282 maps
[i
].def
.numa_node
: -1;
284 if (maps
[i
].def
.type
== BPF_MAP_TYPE_ARRAY_OF_MAPS
||
285 maps
[i
].def
.type
== BPF_MAP_TYPE_HASH_OF_MAPS
) {
286 int inner_map_fd
= map_fd
[maps
[i
].def
.inner_map_idx
];
288 map_fd
[i
] = bpf_create_map_in_map_node(maps
[i
].def
.type
,
290 maps
[i
].def
.key_size
,
292 maps
[i
].def
.max_entries
,
293 maps
[i
].def
.map_flags
,
296 map_fd
[i
] = bpf_create_map_node(maps
[i
].def
.type
,
298 maps
[i
].def
.key_size
,
299 maps
[i
].def
.value_size
,
300 maps
[i
].def
.max_entries
,
301 maps
[i
].def
.map_flags
,
305 printf("failed to create a map: %d %s\n",
306 errno
, strerror(errno
));
309 maps
[i
].fd
= map_fd
[i
];
311 if (maps
[i
].def
.type
== BPF_MAP_TYPE_PROG_ARRAY
)
312 prog_array_fd
= map_fd
[i
];
317 static int get_sec(Elf
*elf
, int i
, GElf_Ehdr
*ehdr
, char **shname
,
318 GElf_Shdr
*shdr
, Elf_Data
**data
)
322 scn
= elf_getscn(elf
, i
);
326 if (gelf_getshdr(scn
, shdr
) != shdr
)
329 *shname
= elf_strptr(elf
, ehdr
->e_shstrndx
, shdr
->sh_name
);
330 if (!*shname
|| !shdr
->sh_size
)
333 *data
= elf_getdata(scn
, 0);
334 if (!*data
|| elf_getdata(scn
, *data
) != NULL
)
340 static int parse_relo_and_apply(Elf_Data
*data
, Elf_Data
*symbols
,
341 GElf_Shdr
*shdr
, struct bpf_insn
*insn
,
342 struct bpf_map_data
*maps
, int nr_maps
)
346 nrels
= shdr
->sh_size
/ shdr
->sh_entsize
;
348 for (i
= 0; i
< nrels
; i
++) {
351 unsigned int insn_idx
;
355 gelf_getrel(data
, i
, &rel
);
357 insn_idx
= rel
.r_offset
/ sizeof(struct bpf_insn
);
359 gelf_getsym(symbols
, GELF_R_SYM(rel
.r_info
), &sym
);
361 if (insn
[insn_idx
].code
!= (BPF_LD
| BPF_IMM
| BPF_DW
)) {
362 printf("invalid relo for insn[%d].code 0x%x\n",
363 insn_idx
, insn
[insn_idx
].code
);
366 insn
[insn_idx
].src_reg
= BPF_PSEUDO_MAP_FD
;
368 /* Match FD relocation against recorded map_data[] offset */
369 for (map_idx
= 0; map_idx
< nr_maps
; map_idx
++) {
370 if (maps
[map_idx
].elf_offset
== sym
.st_value
) {
376 insn
[insn_idx
].imm
= maps
[map_idx
].fd
;
378 printf("invalid relo for insn[%d] no map_data match\n",
387 static int cmp_symbols(const void *l
, const void *r
)
389 const GElf_Sym
*lsym
= (const GElf_Sym
*)l
;
390 const GElf_Sym
*rsym
= (const GElf_Sym
*)r
;
392 if (lsym
->st_value
< rsym
->st_value
)
394 else if (lsym
->st_value
> rsym
->st_value
)
400 static int load_elf_maps_section(struct bpf_map_data
*maps
, int maps_shndx
,
401 Elf
*elf
, Elf_Data
*symbols
, int strtabidx
)
403 int map_sz_elf
, map_sz_copy
;
404 bool validate_zero
= false;
416 /* Get data for maps section via elf index */
417 scn
= elf_getscn(elf
, maps_shndx
);
419 data_maps
= elf_getdata(scn
, NULL
);
420 if (!scn
|| !data_maps
) {
421 printf("Failed to get Elf_Data from maps section %d\n",
426 /* For each map get corrosponding symbol table entry */
427 sym
= calloc(MAX_MAPS
+1, sizeof(GElf_Sym
));
428 for (i
= 0, nr_maps
= 0; i
< symbols
->d_size
/ sizeof(GElf_Sym
); i
++) {
429 assert(nr_maps
< MAX_MAPS
+1);
430 if (!gelf_getsym(symbols
, i
, &sym
[nr_maps
]))
432 if (sym
[nr_maps
].st_shndx
!= maps_shndx
)
434 /* Only increment iif maps section */
438 /* Align to map_fd[] order, via sort on offset in sym.st_value */
439 qsort(sym
, nr_maps
, sizeof(GElf_Sym
), cmp_symbols
);
441 /* Keeping compatible with ELF maps section changes
442 * ------------------------------------------------
443 * The program size of struct bpf_load_map_def is known by loader
444 * code, but struct stored in ELF file can be different.
446 * Unfortunately sym[i].st_size is zero. To calculate the
447 * struct size stored in the ELF file, assume all struct have
448 * the same size, and simply divide with number of map
451 map_sz_elf
= data_maps
->d_size
/ nr_maps
;
452 map_sz_copy
= sizeof(struct bpf_load_map_def
);
453 if (map_sz_elf
< map_sz_copy
) {
455 * Backward compat, loading older ELF file with
456 * smaller struct, keeping remaining bytes zero.
458 map_sz_copy
= map_sz_elf
;
459 } else if (map_sz_elf
> map_sz_copy
) {
461 * Forward compat, loading newer ELF file with larger
462 * struct with unknown features. Assume zero means
463 * feature not used. Thus, validate rest of struct
466 validate_zero
= true;
469 /* Memcpy relevant part of ELF maps data to loader maps */
470 for (i
= 0; i
< nr_maps
; i
++) {
471 struct bpf_load_map_def
*def
;
472 unsigned char *addr
, *end
;
473 const char *map_name
;
476 map_name
= elf_strptr(elf
, strtabidx
, sym
[i
].st_name
);
477 maps
[i
].name
= strdup(map_name
);
479 printf("strdup(%s): %s(%d)\n", map_name
,
480 strerror(errno
), errno
);
485 /* Symbol value is offset into ELF maps section data area */
486 offset
= sym
[i
].st_value
;
487 def
= (struct bpf_load_map_def
*)(data_maps
->d_buf
+ offset
);
488 maps
[i
].elf_offset
= offset
;
489 memset(&maps
[i
].def
, 0, sizeof(struct bpf_load_map_def
));
490 memcpy(&maps
[i
].def
, def
, map_sz_copy
);
492 /* Verify no newer features were requested */
494 addr
= (unsigned char*) def
+ map_sz_copy
;
495 end
= (unsigned char*) def
+ map_sz_elf
;
496 for (; addr
< end
; addr
++) {
509 static int do_load_bpf_file(const char *path
, fixup_map_cb fixup_map
)
511 int fd
, i
, ret
, maps_shndx
= -1, strtabidx
= -1;
514 GElf_Shdr shdr
, shdr_prog
;
515 Elf_Data
*data
, *data_prog
, *data_maps
= NULL
, *symbols
= NULL
;
516 char *shname
, *shname_prog
;
519 /* reset global variables */
521 memset(license
, 0, sizeof(license
));
522 memset(processed_sec
, 0, sizeof(processed_sec
));
524 if (elf_version(EV_CURRENT
) == EV_NONE
)
527 fd
= open(path
, O_RDONLY
, 0);
531 elf
= elf_begin(fd
, ELF_C_READ
, NULL
);
536 if (gelf_getehdr(elf
, &ehdr
) != &ehdr
)
539 /* clear all kprobes */
540 i
= write_kprobe_events("");
542 /* scan over all elf sections to get license and map info */
543 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
545 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
548 if (0) /* helpful for llvm debugging */
549 printf("section %d:%s data %p size %zd link %d flags %d\n",
550 i
, shname
, data
->d_buf
, data
->d_size
,
551 shdr
.sh_link
, (int) shdr
.sh_flags
);
553 if (strcmp(shname
, "license") == 0) {
554 processed_sec
[i
] = true;
555 memcpy(license
, data
->d_buf
, data
->d_size
);
556 } else if (strcmp(shname
, "version") == 0) {
557 processed_sec
[i
] = true;
558 if (data
->d_size
!= sizeof(int)) {
559 printf("invalid size of version section %zd\n",
563 memcpy(&kern_version
, data
->d_buf
, sizeof(int));
564 } else if (strcmp(shname
, "maps") == 0) {
569 for (j
= 0; j
< MAX_MAPS
; j
++)
571 } else if (shdr
.sh_type
== SHT_SYMTAB
) {
572 strtabidx
= shdr
.sh_link
;
580 printf("missing SHT_SYMTAB section\n");
585 nr_maps
= load_elf_maps_section(map_data
, maps_shndx
,
586 elf
, symbols
, strtabidx
);
588 printf("Error: Failed loading ELF maps (errno:%d):%s\n",
589 nr_maps
, strerror(-nr_maps
));
592 if (load_maps(map_data
, nr_maps
, fixup_map
))
594 map_data_count
= nr_maps
;
596 processed_sec
[maps_shndx
] = true;
599 /* process all relo sections, and rewrite bpf insns for maps */
600 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
601 if (processed_sec
[i
])
604 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
607 if (shdr
.sh_type
== SHT_REL
) {
608 struct bpf_insn
*insns
;
610 /* locate prog sec that need map fixup (relocations) */
611 if (get_sec(elf
, shdr
.sh_info
, &ehdr
, &shname_prog
,
612 &shdr_prog
, &data_prog
))
615 if (shdr_prog
.sh_type
!= SHT_PROGBITS
||
616 !(shdr_prog
.sh_flags
& SHF_EXECINSTR
))
619 insns
= (struct bpf_insn
*) data_prog
->d_buf
;
620 processed_sec
[i
] = true; /* relo section */
622 if (parse_relo_and_apply(data
, symbols
, &shdr
, insns
,
629 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
631 if (processed_sec
[i
])
634 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
637 if (memcmp(shname
, "kprobe/", 7) == 0 ||
638 memcmp(shname
, "kretprobe/", 10) == 0 ||
639 memcmp(shname
, "tracepoint/", 11) == 0 ||
640 memcmp(shname
, "raw_tracepoint/", 15) == 0 ||
641 memcmp(shname
, "xdp", 3) == 0 ||
642 memcmp(shname
, "perf_event", 10) == 0 ||
643 memcmp(shname
, "socket", 6) == 0 ||
644 memcmp(shname
, "cgroup/", 7) == 0 ||
645 memcmp(shname
, "sockops", 7) == 0 ||
646 memcmp(shname
, "sk_skb", 6) == 0 ||
647 memcmp(shname
, "sk_msg", 6) == 0) {
648 ret
= load_and_attach(shname
, data
->d_buf
,
660 int load_bpf_file(char *path
)
662 return do_load_bpf_file(path
, NULL
);
665 int load_bpf_file_fixup_map(const char *path
, fixup_map_cb fixup_map
)
667 return do_load_bpf_file(path
, fixup_map
);
670 void read_trace_pipe(void)
674 trace_fd
= open(DEBUGFS
"trace_pipe", O_RDONLY
, 0);
679 static char buf
[4096];
682 sz
= read(trace_fd
, buf
, sizeof(buf
) - 1);