12 #include <linux/bpf.h>
13 #include <linux/filter.h>
14 #include <linux/perf_event.h>
15 #include <sys/syscall.h>
16 #include <sys/ioctl.h>
20 #include "bpf_helpers.h"
23 #define DEBUGFS "/sys/kernel/debug/tracing/"
25 static char license
[128];
26 static int kern_version
;
27 static bool processed_sec
[128];
29 int prog_fd
[MAX_PROGS
];
30 int event_fd
[MAX_PROGS
];
33 static int load_and_attach(const char *event
, struct bpf_insn
*prog
, int size
)
35 bool is_socket
= strncmp(event
, "socket", 6) == 0;
36 bool is_kprobe
= strncmp(event
, "kprobe/", 7) == 0;
37 bool is_kretprobe
= strncmp(event
, "kretprobe/", 10) == 0;
38 enum bpf_prog_type prog_type
;
41 struct perf_event_attr attr
= {};
43 attr
.type
= PERF_TYPE_TRACEPOINT
;
44 attr
.sample_type
= PERF_SAMPLE_RAW
;
45 attr
.sample_period
= 1;
46 attr
.wakeup_events
= 1;
49 prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
;
50 } else if (is_kprobe
|| is_kretprobe
) {
51 prog_type
= BPF_PROG_TYPE_KPROBE
;
53 printf("Unknown event '%s'\n", event
);
57 if (is_kprobe
|| is_kretprobe
) {
63 snprintf(buf
, sizeof(buf
),
64 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
65 is_kprobe
? 'p' : 'r', event
, event
);
68 printf("failed to create kprobe '%s' error '%s'\n",
69 event
, strerror(errno
));
74 fd
= bpf_prog_load(prog_type
, prog
, size
, license
, kern_version
);
77 printf("bpf_prog_load() err=%d\n%s", errno
, bpf_log_buf
);
81 prog_fd
[prog_cnt
++] = fd
;
87 strcat(buf
, "events/kprobes/");
91 efd
= open(buf
, O_RDONLY
, 0);
93 printf("failed to open event %s\n", event
);
97 err
= read(efd
, buf
, sizeof(buf
));
98 if (err
< 0 || err
>= sizeof(buf
)) {
99 printf("read from '%s' failed '%s'\n", event
, strerror(errno
));
109 efd
= perf_event_open(&attr
, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
111 printf("event %d fd %d err %s\n", id
, efd
, strerror(errno
));
114 event_fd
[prog_cnt
- 1] = efd
;
115 ioctl(efd
, PERF_EVENT_IOC_ENABLE
, 0);
116 ioctl(efd
, PERF_EVENT_IOC_SET_BPF
, fd
);
121 static int load_maps(struct bpf_map_def
*maps
, int len
)
125 for (i
= 0; i
< len
/ sizeof(struct bpf_map_def
); i
++) {
127 map_fd
[i
] = bpf_create_map(maps
[i
].type
,
130 maps
[i
].max_entries
);
137 static int get_sec(Elf
*elf
, int i
, GElf_Ehdr
*ehdr
, char **shname
,
138 GElf_Shdr
*shdr
, Elf_Data
**data
)
142 scn
= elf_getscn(elf
, i
);
146 if (gelf_getshdr(scn
, shdr
) != shdr
)
149 *shname
= elf_strptr(elf
, ehdr
->e_shstrndx
, shdr
->sh_name
);
150 if (!*shname
|| !shdr
->sh_size
)
153 *data
= elf_getdata(scn
, 0);
154 if (!*data
|| elf_getdata(scn
, *data
) != NULL
)
160 static int parse_relo_and_apply(Elf_Data
*data
, Elf_Data
*symbols
,
161 GElf_Shdr
*shdr
, struct bpf_insn
*insn
)
165 nrels
= shdr
->sh_size
/ shdr
->sh_entsize
;
167 for (i
= 0; i
< nrels
; i
++) {
170 unsigned int insn_idx
;
172 gelf_getrel(data
, i
, &rel
);
174 insn_idx
= rel
.r_offset
/ sizeof(struct bpf_insn
);
176 gelf_getsym(symbols
, GELF_R_SYM(rel
.r_info
), &sym
);
178 if (insn
[insn_idx
].code
!= (BPF_LD
| BPF_IMM
| BPF_DW
)) {
179 printf("invalid relo for insn[%d].code 0x%x\n",
180 insn_idx
, insn
[insn_idx
].code
);
183 insn
[insn_idx
].src_reg
= BPF_PSEUDO_MAP_FD
;
184 insn
[insn_idx
].imm
= map_fd
[sym
.st_value
/ sizeof(struct bpf_map_def
)];
190 int load_bpf_file(char *path
)
195 GElf_Shdr shdr
, shdr_prog
;
196 Elf_Data
*data
, *data_prog
, *symbols
= NULL
;
197 char *shname
, *shname_prog
;
199 if (elf_version(EV_CURRENT
) == EV_NONE
)
202 fd
= open(path
, O_RDONLY
, 0);
206 elf
= elf_begin(fd
, ELF_C_READ
, NULL
);
211 if (gelf_getehdr(elf
, &ehdr
) != &ehdr
)
214 /* clear all kprobes */
215 i
= system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
217 /* scan over all elf sections to get license and map info */
218 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
220 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
223 if (0) /* helpful for llvm debugging */
224 printf("section %d:%s data %p size %zd link %d flags %d\n",
225 i
, shname
, data
->d_buf
, data
->d_size
,
226 shdr
.sh_link
, (int) shdr
.sh_flags
);
228 if (strcmp(shname
, "license") == 0) {
229 processed_sec
[i
] = true;
230 memcpy(license
, data
->d_buf
, data
->d_size
);
231 } else if (strcmp(shname
, "version") == 0) {
232 processed_sec
[i
] = true;
233 if (data
->d_size
!= sizeof(int)) {
234 printf("invalid size of version section %zd\n",
238 memcpy(&kern_version
, data
->d_buf
, sizeof(int));
239 } else if (strcmp(shname
, "maps") == 0) {
240 processed_sec
[i
] = true;
241 if (load_maps(data
->d_buf
, data
->d_size
))
243 } else if (shdr
.sh_type
== SHT_SYMTAB
) {
248 /* load programs that need map fixup (relocations) */
249 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
251 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
253 if (shdr
.sh_type
== SHT_REL
) {
254 struct bpf_insn
*insns
;
256 if (get_sec(elf
, shdr
.sh_info
, &ehdr
, &shname_prog
,
257 &shdr_prog
, &data_prog
))
260 insns
= (struct bpf_insn
*) data_prog
->d_buf
;
262 processed_sec
[shdr
.sh_info
] = true;
263 processed_sec
[i
] = true;
265 if (parse_relo_and_apply(data
, symbols
, &shdr
, insns
))
268 if (memcmp(shname_prog
, "kprobe/", 7) == 0 ||
269 memcmp(shname_prog
, "kretprobe/", 10) == 0 ||
270 memcmp(shname_prog
, "socket", 6) == 0)
271 load_and_attach(shname_prog
, insns
, data_prog
->d_size
);
275 /* load programs that don't use maps */
276 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
278 if (processed_sec
[i
])
281 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
284 if (memcmp(shname
, "kprobe/", 7) == 0 ||
285 memcmp(shname
, "kretprobe/", 10) == 0 ||
286 memcmp(shname
, "socket", 6) == 0)
287 load_and_attach(shname
, data
->d_buf
, data
->d_size
);
294 void read_trace_pipe(void)
298 trace_fd
= open(DEBUGFS
"trace_pipe", O_RDONLY
, 0);
303 static char buf
[4096];
306 sz
= read(trace_fd
, buf
, sizeof(buf
));