12 #include <linux/bpf.h>
13 #include <linux/filter.h>
14 #include <linux/perf_event.h>
15 #include <sys/syscall.h>
16 #include <sys/ioctl.h>
21 #include "bpf_helpers.h"
24 #define DEBUGFS "/sys/kernel/debug/tracing/"
26 static char license
[128];
27 static int kern_version
;
28 static bool processed_sec
[128];
30 int prog_fd
[MAX_PROGS
];
31 int event_fd
[MAX_PROGS
];
33 int prog_array_fd
= -1;
35 static int populate_prog_array(const char *event
, int prog_fd
)
37 int ind
= atoi(event
), err
;
39 err
= bpf_update_elem(prog_array_fd
, &ind
, &prog_fd
, BPF_ANY
);
41 printf("failed to store prog_fd in prog_array\n");
47 static int load_and_attach(const char *event
, struct bpf_insn
*prog
, int size
)
49 bool is_socket
= strncmp(event
, "socket", 6) == 0;
50 bool is_kprobe
= strncmp(event
, "kprobe/", 7) == 0;
51 bool is_kretprobe
= strncmp(event
, "kretprobe/", 10) == 0;
52 bool is_tracepoint
= strncmp(event
, "tracepoint/", 11) == 0;
53 bool is_xdp
= strncmp(event
, "xdp", 3) == 0;
54 bool is_perf_event
= strncmp(event
, "perf_event", 10) == 0;
55 enum bpf_prog_type prog_type
;
58 struct perf_event_attr attr
= {};
60 attr
.type
= PERF_TYPE_TRACEPOINT
;
61 attr
.sample_type
= PERF_SAMPLE_RAW
;
62 attr
.sample_period
= 1;
63 attr
.wakeup_events
= 1;
66 prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
;
67 } else if (is_kprobe
|| is_kretprobe
) {
68 prog_type
= BPF_PROG_TYPE_KPROBE
;
69 } else if (is_tracepoint
) {
70 prog_type
= BPF_PROG_TYPE_TRACEPOINT
;
72 prog_type
= BPF_PROG_TYPE_XDP
;
73 } else if (is_perf_event
) {
74 prog_type
= BPF_PROG_TYPE_PERF_EVENT
;
76 printf("Unknown event '%s'\n", event
);
80 fd
= bpf_prog_load(prog_type
, prog
, size
, license
, kern_version
);
82 printf("bpf_prog_load() err=%d\n%s", errno
, bpf_log_buf
);
86 prog_fd
[prog_cnt
++] = fd
;
88 if (is_xdp
|| is_perf_event
)
96 if (!isdigit(*event
)) {
97 printf("invalid prog number\n");
100 return populate_prog_array(event
, fd
);
103 if (is_kprobe
|| is_kretprobe
) {
110 printf("event name cannot be empty\n");
115 return populate_prog_array(event
, fd
);
117 snprintf(buf
, sizeof(buf
),
118 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
119 is_kprobe
? 'p' : 'r', event
, event
);
122 printf("failed to create kprobe '%s' error '%s'\n",
123 event
, strerror(errno
));
127 strcpy(buf
, DEBUGFS
);
128 strcat(buf
, "events/kprobes/");
131 } else if (is_tracepoint
) {
135 printf("event name cannot be empty\n");
138 strcpy(buf
, DEBUGFS
);
139 strcat(buf
, "events/");
144 efd
= open(buf
, O_RDONLY
, 0);
146 printf("failed to open event %s\n", event
);
150 err
= read(efd
, buf
, sizeof(buf
));
151 if (err
< 0 || err
>= sizeof(buf
)) {
152 printf("read from '%s' failed '%s'\n", event
, strerror(errno
));
162 efd
= perf_event_open(&attr
, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
164 printf("event %d fd %d err %s\n", id
, efd
, strerror(errno
));
167 event_fd
[prog_cnt
- 1] = efd
;
168 ioctl(efd
, PERF_EVENT_IOC_ENABLE
, 0);
169 ioctl(efd
, PERF_EVENT_IOC_SET_BPF
, fd
);
174 static int load_maps(struct bpf_map_def
*maps
, int len
)
178 for (i
= 0; i
< len
/ sizeof(struct bpf_map_def
); i
++) {
180 map_fd
[i
] = bpf_create_map(maps
[i
].type
,
186 printf("failed to create a map: %d %s\n",
187 errno
, strerror(errno
));
191 if (maps
[i
].type
== BPF_MAP_TYPE_PROG_ARRAY
)
192 prog_array_fd
= map_fd
[i
];
197 static int get_sec(Elf
*elf
, int i
, GElf_Ehdr
*ehdr
, char **shname
,
198 GElf_Shdr
*shdr
, Elf_Data
**data
)
202 scn
= elf_getscn(elf
, i
);
206 if (gelf_getshdr(scn
, shdr
) != shdr
)
209 *shname
= elf_strptr(elf
, ehdr
->e_shstrndx
, shdr
->sh_name
);
210 if (!*shname
|| !shdr
->sh_size
)
213 *data
= elf_getdata(scn
, 0);
214 if (!*data
|| elf_getdata(scn
, *data
) != NULL
)
220 static int parse_relo_and_apply(Elf_Data
*data
, Elf_Data
*symbols
,
221 GElf_Shdr
*shdr
, struct bpf_insn
*insn
)
225 nrels
= shdr
->sh_size
/ shdr
->sh_entsize
;
227 for (i
= 0; i
< nrels
; i
++) {
230 unsigned int insn_idx
;
232 gelf_getrel(data
, i
, &rel
);
234 insn_idx
= rel
.r_offset
/ sizeof(struct bpf_insn
);
236 gelf_getsym(symbols
, GELF_R_SYM(rel
.r_info
), &sym
);
238 if (insn
[insn_idx
].code
!= (BPF_LD
| BPF_IMM
| BPF_DW
)) {
239 printf("invalid relo for insn[%d].code 0x%x\n",
240 insn_idx
, insn
[insn_idx
].code
);
243 insn
[insn_idx
].src_reg
= BPF_PSEUDO_MAP_FD
;
244 insn
[insn_idx
].imm
= map_fd
[sym
.st_value
/ sizeof(struct bpf_map_def
)];
250 int load_bpf_file(char *path
)
255 GElf_Shdr shdr
, shdr_prog
;
256 Elf_Data
*data
, *data_prog
, *symbols
= NULL
;
257 char *shname
, *shname_prog
;
259 if (elf_version(EV_CURRENT
) == EV_NONE
)
262 fd
= open(path
, O_RDONLY
, 0);
266 elf
= elf_begin(fd
, ELF_C_READ
, NULL
);
271 if (gelf_getehdr(elf
, &ehdr
) != &ehdr
)
274 /* clear all kprobes */
275 i
= system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
277 /* scan over all elf sections to get license and map info */
278 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
280 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
283 if (0) /* helpful for llvm debugging */
284 printf("section %d:%s data %p size %zd link %d flags %d\n",
285 i
, shname
, data
->d_buf
, data
->d_size
,
286 shdr
.sh_link
, (int) shdr
.sh_flags
);
288 if (strcmp(shname
, "license") == 0) {
289 processed_sec
[i
] = true;
290 memcpy(license
, data
->d_buf
, data
->d_size
);
291 } else if (strcmp(shname
, "version") == 0) {
292 processed_sec
[i
] = true;
293 if (data
->d_size
!= sizeof(int)) {
294 printf("invalid size of version section %zd\n",
298 memcpy(&kern_version
, data
->d_buf
, sizeof(int));
299 } else if (strcmp(shname
, "maps") == 0) {
300 processed_sec
[i
] = true;
301 if (load_maps(data
->d_buf
, data
->d_size
))
303 } else if (shdr
.sh_type
== SHT_SYMTAB
) {
308 /* load programs that need map fixup (relocations) */
309 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
311 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
313 if (shdr
.sh_type
== SHT_REL
) {
314 struct bpf_insn
*insns
;
316 if (get_sec(elf
, shdr
.sh_info
, &ehdr
, &shname_prog
,
317 &shdr_prog
, &data_prog
))
320 insns
= (struct bpf_insn
*) data_prog
->d_buf
;
322 processed_sec
[shdr
.sh_info
] = true;
323 processed_sec
[i
] = true;
325 if (parse_relo_and_apply(data
, symbols
, &shdr
, insns
))
328 if (memcmp(shname_prog
, "kprobe/", 7) == 0 ||
329 memcmp(shname_prog
, "kretprobe/", 10) == 0 ||
330 memcmp(shname_prog
, "tracepoint/", 11) == 0 ||
331 memcmp(shname_prog
, "xdp", 3) == 0 ||
332 memcmp(shname_prog
, "perf_event", 10) == 0 ||
333 memcmp(shname_prog
, "socket", 6) == 0)
334 load_and_attach(shname_prog
, insns
, data_prog
->d_size
);
338 /* load programs that don't use maps */
339 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
341 if (processed_sec
[i
])
344 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
347 if (memcmp(shname
, "kprobe/", 7) == 0 ||
348 memcmp(shname
, "kretprobe/", 10) == 0 ||
349 memcmp(shname
, "tracepoint/", 11) == 0 ||
350 memcmp(shname
, "xdp", 3) == 0 ||
351 memcmp(shname
, "perf_event", 10) == 0 ||
352 memcmp(shname
, "socket", 6) == 0)
353 load_and_attach(shname
, data
->d_buf
, data
->d_size
);
360 void read_trace_pipe(void)
364 trace_fd
= open(DEBUGFS
"trace_pipe", O_RDONLY
, 0);
369 static char buf
[4096];
372 sz
= read(trace_fd
, buf
, sizeof(buf
));
380 #define MAX_SYMS 300000
381 static struct ksym syms
[MAX_SYMS
];
384 static int ksym_cmp(const void *p1
, const void *p2
)
386 return ((struct ksym
*)p1
)->addr
- ((struct ksym
*)p2
)->addr
;
389 int load_kallsyms(void)
391 FILE *f
= fopen("/proc/kallsyms", "r");
392 char func
[256], buf
[256];
401 if (!fgets(buf
, sizeof(buf
), f
))
403 if (sscanf(buf
, "%p %c %s", &addr
, &symbol
, func
) != 3)
407 syms
[i
].addr
= (long) addr
;
408 syms
[i
].name
= strdup(func
);
412 qsort(syms
, sym_cnt
, sizeof(struct ksym
), ksym_cmp
);
416 struct ksym
*ksym_search(long key
)
418 int start
= 0, end
= sym_cnt
;
421 while (start
< end
) {
422 size_t mid
= start
+ (end
- start
) / 2;
424 result
= key
- syms
[mid
].addr
;
433 if (start
>= 1 && syms
[start
- 1].addr
< key
&&
434 key
< syms
[start
].addr
)
436 return &syms
[start
- 1];
438 /* out of range. return _stext */