stmmac: review RX/TX ring management
[linux/fpc-iii.git] / samples / bpf / bpf_load.c
blobda86a8e0a95afb0fcc79f96d310115803b5a9d55
1 #include <stdio.h>
2 #include <sys/types.h>
3 #include <sys/stat.h>
4 #include <fcntl.h>
5 #include <libelf.h>
6 #include <gelf.h>
7 #include <errno.h>
8 #include <unistd.h>
9 #include <string.h>
10 #include <stdbool.h>
11 #include <stdlib.h>
12 #include <linux/bpf.h>
13 #include <linux/filter.h>
14 #include <linux/perf_event.h>
15 #include <sys/syscall.h>
16 #include <sys/ioctl.h>
17 #include <sys/mman.h>
18 #include <poll.h>
19 #include <ctype.h>
20 #include "libbpf.h"
21 #include "bpf_helpers.h"
22 #include "bpf_load.h"
24 #define DEBUGFS "/sys/kernel/debug/tracing/"
26 static char license[128];
27 static int kern_version;
28 static bool processed_sec[128];
29 int map_fd[MAX_MAPS];
30 int prog_fd[MAX_PROGS];
31 int event_fd[MAX_PROGS];
32 int prog_cnt;
33 int prog_array_fd = -1;
35 static int populate_prog_array(const char *event, int prog_fd)
37 int ind = atoi(event), err;
39 err = bpf_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
40 if (err < 0) {
41 printf("failed to store prog_fd in prog_array\n");
42 return -1;
44 return 0;
47 static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
49 bool is_socket = strncmp(event, "socket", 6) == 0;
50 bool is_kprobe = strncmp(event, "kprobe/", 7) == 0;
51 bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
52 enum bpf_prog_type prog_type;
53 char buf[256];
54 int fd, efd, err, id;
55 struct perf_event_attr attr = {};
57 attr.type = PERF_TYPE_TRACEPOINT;
58 attr.sample_type = PERF_SAMPLE_RAW;
59 attr.sample_period = 1;
60 attr.wakeup_events = 1;
62 if (is_socket) {
63 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
64 } else if (is_kprobe || is_kretprobe) {
65 prog_type = BPF_PROG_TYPE_KPROBE;
66 } else {
67 printf("Unknown event '%s'\n", event);
68 return -1;
71 fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
72 if (fd < 0) {
73 printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
74 return -1;
77 prog_fd[prog_cnt++] = fd;
79 if (is_socket) {
80 event += 6;
81 if (*event != '/')
82 return 0;
83 event++;
84 if (!isdigit(*event)) {
85 printf("invalid prog number\n");
86 return -1;
88 return populate_prog_array(event, fd);
91 if (is_kprobe || is_kretprobe) {
92 if (is_kprobe)
93 event += 7;
94 else
95 event += 10;
97 if (*event == 0) {
98 printf("event name cannot be empty\n");
99 return -1;
102 if (isdigit(*event))
103 return populate_prog_array(event, fd);
105 snprintf(buf, sizeof(buf),
106 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
107 is_kprobe ? 'p' : 'r', event, event);
108 err = system(buf);
109 if (err < 0) {
110 printf("failed to create kprobe '%s' error '%s'\n",
111 event, strerror(errno));
112 return -1;
116 strcpy(buf, DEBUGFS);
117 strcat(buf, "events/kprobes/");
118 strcat(buf, event);
119 strcat(buf, "/id");
121 efd = open(buf, O_RDONLY, 0);
122 if (efd < 0) {
123 printf("failed to open event %s\n", event);
124 return -1;
127 err = read(efd, buf, sizeof(buf));
128 if (err < 0 || err >= sizeof(buf)) {
129 printf("read from '%s' failed '%s'\n", event, strerror(errno));
130 return -1;
133 close(efd);
135 buf[err] = 0;
136 id = atoi(buf);
137 attr.config = id;
139 efd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
140 if (efd < 0) {
141 printf("event %d fd %d err %s\n", id, efd, strerror(errno));
142 return -1;
144 event_fd[prog_cnt - 1] = efd;
145 ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
146 ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
148 return 0;
151 static int load_maps(struct bpf_map_def *maps, int len)
153 int i;
155 for (i = 0; i < len / sizeof(struct bpf_map_def); i++) {
157 map_fd[i] = bpf_create_map(maps[i].type,
158 maps[i].key_size,
159 maps[i].value_size,
160 maps[i].max_entries);
161 if (map_fd[i] < 0)
162 return 1;
164 if (maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
165 prog_array_fd = map_fd[i];
167 return 0;
170 static int get_sec(Elf *elf, int i, GElf_Ehdr *ehdr, char **shname,
171 GElf_Shdr *shdr, Elf_Data **data)
173 Elf_Scn *scn;
175 scn = elf_getscn(elf, i);
176 if (!scn)
177 return 1;
179 if (gelf_getshdr(scn, shdr) != shdr)
180 return 2;
182 *shname = elf_strptr(elf, ehdr->e_shstrndx, shdr->sh_name);
183 if (!*shname || !shdr->sh_size)
184 return 3;
186 *data = elf_getdata(scn, 0);
187 if (!*data || elf_getdata(scn, *data) != NULL)
188 return 4;
190 return 0;
193 static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols,
194 GElf_Shdr *shdr, struct bpf_insn *insn)
196 int i, nrels;
198 nrels = shdr->sh_size / shdr->sh_entsize;
200 for (i = 0; i < nrels; i++) {
201 GElf_Sym sym;
202 GElf_Rel rel;
203 unsigned int insn_idx;
205 gelf_getrel(data, i, &rel);
207 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
209 gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym);
211 if (insn[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
212 printf("invalid relo for insn[%d].code 0x%x\n",
213 insn_idx, insn[insn_idx].code);
214 return 1;
216 insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
217 insn[insn_idx].imm = map_fd[sym.st_value / sizeof(struct bpf_map_def)];
220 return 0;
223 int load_bpf_file(char *path)
225 int fd, i;
226 Elf *elf;
227 GElf_Ehdr ehdr;
228 GElf_Shdr shdr, shdr_prog;
229 Elf_Data *data, *data_prog, *symbols = NULL;
230 char *shname, *shname_prog;
232 if (elf_version(EV_CURRENT) == EV_NONE)
233 return 1;
235 fd = open(path, O_RDONLY, 0);
236 if (fd < 0)
237 return 1;
239 elf = elf_begin(fd, ELF_C_READ, NULL);
241 if (!elf)
242 return 1;
244 if (gelf_getehdr(elf, &ehdr) != &ehdr)
245 return 1;
247 /* clear all kprobes */
248 i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
250 /* scan over all elf sections to get license and map info */
251 for (i = 1; i < ehdr.e_shnum; i++) {
253 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
254 continue;
256 if (0) /* helpful for llvm debugging */
257 printf("section %d:%s data %p size %zd link %d flags %d\n",
258 i, shname, data->d_buf, data->d_size,
259 shdr.sh_link, (int) shdr.sh_flags);
261 if (strcmp(shname, "license") == 0) {
262 processed_sec[i] = true;
263 memcpy(license, data->d_buf, data->d_size);
264 } else if (strcmp(shname, "version") == 0) {
265 processed_sec[i] = true;
266 if (data->d_size != sizeof(int)) {
267 printf("invalid size of version section %zd\n",
268 data->d_size);
269 return 1;
271 memcpy(&kern_version, data->d_buf, sizeof(int));
272 } else if (strcmp(shname, "maps") == 0) {
273 processed_sec[i] = true;
274 if (load_maps(data->d_buf, data->d_size))
275 return 1;
276 } else if (shdr.sh_type == SHT_SYMTAB) {
277 symbols = data;
281 /* load programs that need map fixup (relocations) */
282 for (i = 1; i < ehdr.e_shnum; i++) {
284 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
285 continue;
286 if (shdr.sh_type == SHT_REL) {
287 struct bpf_insn *insns;
289 if (get_sec(elf, shdr.sh_info, &ehdr, &shname_prog,
290 &shdr_prog, &data_prog))
291 continue;
293 insns = (struct bpf_insn *) data_prog->d_buf;
295 processed_sec[shdr.sh_info] = true;
296 processed_sec[i] = true;
298 if (parse_relo_and_apply(data, symbols, &shdr, insns))
299 continue;
301 if (memcmp(shname_prog, "kprobe/", 7) == 0 ||
302 memcmp(shname_prog, "kretprobe/", 10) == 0 ||
303 memcmp(shname_prog, "socket", 6) == 0)
304 load_and_attach(shname_prog, insns, data_prog->d_size);
308 /* load programs that don't use maps */
309 for (i = 1; i < ehdr.e_shnum; i++) {
311 if (processed_sec[i])
312 continue;
314 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
315 continue;
317 if (memcmp(shname, "kprobe/", 7) == 0 ||
318 memcmp(shname, "kretprobe/", 10) == 0 ||
319 memcmp(shname, "socket", 6) == 0)
320 load_and_attach(shname, data->d_buf, data->d_size);
323 close(fd);
324 return 0;
327 void read_trace_pipe(void)
329 int trace_fd;
331 trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
332 if (trace_fd < 0)
333 return;
335 while (1) {
336 static char buf[4096];
337 ssize_t sz;
339 sz = read(trace_fd, buf, sizeof(buf));
340 if (sz > 0) {
341 buf[sz] = 0;
342 puts(buf);