1 /* Copyright (c) 2017 Facebook
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
15 #include <linux/types.h>
16 typedef __u16 __sum16
;
17 #include <arpa/inet.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_packet.h>
21 #include <linux/ipv6.h>
22 #include <linux/tcp.h>
23 #include <linux/filter.h>
24 #include <linux/perf_event.h>
25 #include <linux/unistd.h>
27 #include <sys/ioctl.h>
29 #include <sys/types.h>
32 #include <linux/bpf.h>
33 #include <linux/err.h>
35 #include <bpf/libbpf.h>
37 #include "test_iptunnel_common.h"
39 #include "bpf_endian.h"
40 #include "bpf_rlimit.h"
41 #include "trace_helpers.h"
43 static int error_cnt
, pass_cnt
;
44 static bool jit_enabled
;
46 #define MAGIC_BYTES 123
48 /* ipv4 test vector */
54 .eth
.h_proto
= bpf_htons(ETH_P_IP
),
57 .iph
.tot_len
= bpf_htons(MAGIC_BYTES
),
61 /* ipv6 test vector */
67 .eth
.h_proto
= bpf_htons(ETH_P_IPV6
),
69 .iph
.payload_len
= bpf_htons(MAGIC_BYTES
),
73 #define CHECK(condition, tag, format...) ({ \
74 int __ret = !!(condition); \
77 printf("%s:FAIL:%s ", __func__, tag); \
81 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
86 static int bpf_find_map(const char *test
, struct bpf_object
*obj
,
91 map
= bpf_object__find_map_by_name(obj
, name
);
93 printf("%s:FAIL:map '%s' not found\n", test
, name
);
97 return bpf_map__fd(map
);
100 static void test_pkt_access(void)
102 const char *file
= "./test_pkt_access.o";
103 struct bpf_object
*obj
;
104 __u32 duration
, retval
;
107 err
= bpf_prog_load(file
, BPF_PROG_TYPE_SCHED_CLS
, &obj
, &prog_fd
);
113 err
= bpf_prog_test_run(prog_fd
, 100000, &pkt_v4
, sizeof(pkt_v4
),
114 NULL
, NULL
, &retval
, &duration
);
115 CHECK(err
|| errno
|| retval
, "ipv4",
116 "err %d errno %d retval %d duration %d\n",
117 err
, errno
, retval
, duration
);
119 err
= bpf_prog_test_run(prog_fd
, 100000, &pkt_v6
, sizeof(pkt_v6
),
120 NULL
, NULL
, &retval
, &duration
);
121 CHECK(err
|| errno
|| retval
, "ipv6",
122 "err %d errno %d retval %d duration %d\n",
123 err
, errno
, retval
, duration
);
124 bpf_object__close(obj
);
127 static void test_xdp(void)
129 struct vip key4
= {.protocol
= 6, .family
= AF_INET
};
130 struct vip key6
= {.protocol
= 6, .family
= AF_INET6
};
131 struct iptnl_info value4
= {.family
= AF_INET
};
132 struct iptnl_info value6
= {.family
= AF_INET6
};
133 const char *file
= "./test_xdp.o";
134 struct bpf_object
*obj
;
136 struct ipv6hdr
*iph6
= (void *)buf
+ sizeof(struct ethhdr
);
137 struct iphdr
*iph
= (void *)buf
+ sizeof(struct ethhdr
);
138 __u32 duration
, retval
, size
;
139 int err
, prog_fd
, map_fd
;
141 err
= bpf_prog_load(file
, BPF_PROG_TYPE_XDP
, &obj
, &prog_fd
);
147 map_fd
= bpf_find_map(__func__
, obj
, "vip2tnl");
150 bpf_map_update_elem(map_fd
, &key4
, &value4
, 0);
151 bpf_map_update_elem(map_fd
, &key6
, &value6
, 0);
153 err
= bpf_prog_test_run(prog_fd
, 1, &pkt_v4
, sizeof(pkt_v4
),
154 buf
, &size
, &retval
, &duration
);
156 CHECK(err
|| errno
|| retval
!= XDP_TX
|| size
!= 74 ||
157 iph
->protocol
!= IPPROTO_IPIP
, "ipv4",
158 "err %d errno %d retval %d size %d\n",
159 err
, errno
, retval
, size
);
161 err
= bpf_prog_test_run(prog_fd
, 1, &pkt_v6
, sizeof(pkt_v6
),
162 buf
, &size
, &retval
, &duration
);
163 CHECK(err
|| errno
|| retval
!= XDP_TX
|| size
!= 114 ||
164 iph6
->nexthdr
!= IPPROTO_IPV6
, "ipv6",
165 "err %d errno %d retval %d size %d\n",
166 err
, errno
, retval
, size
);
168 bpf_object__close(obj
);
171 static void test_xdp_adjust_tail(void)
173 const char *file
= "./test_adjust_tail.o";
174 struct bpf_object
*obj
;
176 __u32 duration
, retval
, size
;
179 err
= bpf_prog_load(file
, BPF_PROG_TYPE_XDP
, &obj
, &prog_fd
);
185 err
= bpf_prog_test_run(prog_fd
, 1, &pkt_v4
, sizeof(pkt_v4
),
186 buf
, &size
, &retval
, &duration
);
188 CHECK(err
|| errno
|| retval
!= XDP_DROP
,
189 "ipv4", "err %d errno %d retval %d size %d\n",
190 err
, errno
, retval
, size
);
192 err
= bpf_prog_test_run(prog_fd
, 1, &pkt_v6
, sizeof(pkt_v6
),
193 buf
, &size
, &retval
, &duration
);
194 CHECK(err
|| errno
|| retval
!= XDP_TX
|| size
!= 54,
195 "ipv6", "err %d errno %d retval %d size %d\n",
196 err
, errno
, retval
, size
);
197 bpf_object__close(obj
);
202 #define MAGIC_VAL 0x1234
203 #define NUM_ITER 100000
206 static void test_l4lb(const char *file
)
208 unsigned int nr_cpus
= bpf_num_possible_cpus();
209 struct vip key
= {.protocol
= 6};
213 } value
= {.vip_num
= VIP_NUM
};
214 __u32 stats_key
= VIP_NUM
;
219 struct real_definition
{
225 } real_def
= {.dst
= MAGIC_VAL
};
226 __u32 ch_key
= 11, real_num
= 3;
227 __u32 duration
, retval
, size
;
228 int err
, i
, prog_fd
, map_fd
;
229 __u64 bytes
= 0, pkts
= 0;
230 struct bpf_object
*obj
;
232 u32
*magic
= (u32
*)buf
;
234 err
= bpf_prog_load(file
, BPF_PROG_TYPE_SCHED_CLS
, &obj
, &prog_fd
);
240 map_fd
= bpf_find_map(__func__
, obj
, "vip_map");
243 bpf_map_update_elem(map_fd
, &key
, &value
, 0);
245 map_fd
= bpf_find_map(__func__
, obj
, "ch_rings");
248 bpf_map_update_elem(map_fd
, &ch_key
, &real_num
, 0);
250 map_fd
= bpf_find_map(__func__
, obj
, "reals");
253 bpf_map_update_elem(map_fd
, &real_num
, &real_def
, 0);
255 err
= bpf_prog_test_run(prog_fd
, NUM_ITER
, &pkt_v4
, sizeof(pkt_v4
),
256 buf
, &size
, &retval
, &duration
);
257 CHECK(err
|| errno
|| retval
!= 7/*TC_ACT_REDIRECT*/ || size
!= 54 ||
258 *magic
!= MAGIC_VAL
, "ipv4",
259 "err %d errno %d retval %d size %d magic %x\n",
260 err
, errno
, retval
, size
, *magic
);
262 err
= bpf_prog_test_run(prog_fd
, NUM_ITER
, &pkt_v6
, sizeof(pkt_v6
),
263 buf
, &size
, &retval
, &duration
);
264 CHECK(err
|| errno
|| retval
!= 7/*TC_ACT_REDIRECT*/ || size
!= 74 ||
265 *magic
!= MAGIC_VAL
, "ipv6",
266 "err %d errno %d retval %d size %d magic %x\n",
267 err
, errno
, retval
, size
, *magic
);
269 map_fd
= bpf_find_map(__func__
, obj
, "stats");
272 bpf_map_lookup_elem(map_fd
, &stats_key
, stats
);
273 for (i
= 0; i
< nr_cpus
; i
++) {
274 bytes
+= stats
[i
].bytes
;
275 pkts
+= stats
[i
].pkts
;
277 if (bytes
!= MAGIC_BYTES
* NUM_ITER
* 2 || pkts
!= NUM_ITER
* 2) {
279 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes
, pkts
);
282 bpf_object__close(obj
);
285 static void test_l4lb_all(void)
287 const char *file1
= "./test_l4lb.o";
288 const char *file2
= "./test_l4lb_noinline.o";
294 static void test_xdp_noinline(void)
296 const char *file
= "./test_xdp_noinline.o";
297 unsigned int nr_cpus
= bpf_num_possible_cpus();
298 struct vip key
= {.protocol
= 6};
302 } value
= {.vip_num
= VIP_NUM
};
303 __u32 stats_key
= VIP_NUM
;
308 struct real_definition
{
314 } real_def
= {.dst
= MAGIC_VAL
};
315 __u32 ch_key
= 11, real_num
= 3;
316 __u32 duration
, retval
, size
;
317 int err
, i
, prog_fd
, map_fd
;
318 __u64 bytes
= 0, pkts
= 0;
319 struct bpf_object
*obj
;
321 u32
*magic
= (u32
*)buf
;
323 err
= bpf_prog_load(file
, BPF_PROG_TYPE_XDP
, &obj
, &prog_fd
);
329 map_fd
= bpf_find_map(__func__
, obj
, "vip_map");
332 bpf_map_update_elem(map_fd
, &key
, &value
, 0);
334 map_fd
= bpf_find_map(__func__
, obj
, "ch_rings");
337 bpf_map_update_elem(map_fd
, &ch_key
, &real_num
, 0);
339 map_fd
= bpf_find_map(__func__
, obj
, "reals");
342 bpf_map_update_elem(map_fd
, &real_num
, &real_def
, 0);
344 err
= bpf_prog_test_run(prog_fd
, NUM_ITER
, &pkt_v4
, sizeof(pkt_v4
),
345 buf
, &size
, &retval
, &duration
);
346 CHECK(err
|| errno
|| retval
!= 1 || size
!= 54 ||
347 *magic
!= MAGIC_VAL
, "ipv4",
348 "err %d errno %d retval %d size %d magic %x\n",
349 err
, errno
, retval
, size
, *magic
);
351 err
= bpf_prog_test_run(prog_fd
, NUM_ITER
, &pkt_v6
, sizeof(pkt_v6
),
352 buf
, &size
, &retval
, &duration
);
353 CHECK(err
|| errno
|| retval
!= 1 || size
!= 74 ||
354 *magic
!= MAGIC_VAL
, "ipv6",
355 "err %d errno %d retval %d size %d magic %x\n",
356 err
, errno
, retval
, size
, *magic
);
358 map_fd
= bpf_find_map(__func__
, obj
, "stats");
361 bpf_map_lookup_elem(map_fd
, &stats_key
, stats
);
362 for (i
= 0; i
< nr_cpus
; i
++) {
363 bytes
+= stats
[i
].bytes
;
364 pkts
+= stats
[i
].pkts
;
366 if (bytes
!= MAGIC_BYTES
* NUM_ITER
* 2 || pkts
!= NUM_ITER
* 2) {
368 printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes
, pkts
);
371 bpf_object__close(obj
);
374 static void test_tcp_estats(void)
376 const char *file
= "./test_tcp_estats.o";
378 struct bpf_object
*obj
;
381 err
= bpf_prog_load(file
, BPF_PROG_TYPE_TRACEPOINT
, &obj
, &prog_fd
);
382 CHECK(err
, "", "err %d errno %d\n", err
, errno
);
388 bpf_object__close(obj
);
391 static inline __u64
ptr_to_u64(const void *ptr
)
393 return (__u64
) (unsigned long) ptr
;
396 static bool is_jit_enabled(void)
398 const char *jit_sysctl
= "/proc/sys/net/core/bpf_jit_enable";
399 bool enabled
= false;
402 sysctl_fd
= open(jit_sysctl
, 0, O_RDONLY
);
403 if (sysctl_fd
!= -1) {
406 if (read(sysctl_fd
, &tmpc
, sizeof(tmpc
)) == 1)
407 enabled
= (tmpc
!= '0');
414 static void test_bpf_obj_id(void)
416 const __u64 array_magic_value
= 0xfaceb00c;
417 const __u32 array_key
= 0;
418 const int nr_iters
= 2;
419 const char *file
= "./test_obj_id.o";
420 const char *expected_prog_name
= "test_obj_id";
421 const char *expected_map_name
= "test_map_id";
422 const __u64 nsec_per_sec
= 1000000000;
424 struct bpf_object
*objs
[nr_iters
];
425 int prog_fds
[nr_iters
], map_fds
[nr_iters
];
426 /* +1 to test for the info_len returned by kernel */
427 struct bpf_prog_info prog_infos
[nr_iters
+ 1];
428 struct bpf_map_info map_infos
[nr_iters
+ 1];
429 /* Each prog only uses one map. +1 to test nr_map_ids
430 * returned by kernel.
432 __u32 map_ids
[nr_iters
+ 1];
433 char jited_insns
[128], xlated_insns
[128], zeros
[128];
434 __u32 i
, next_id
, info_len
, nr_id_found
, duration
= 0;
435 struct timespec real_time_ts
, boot_time_ts
;
438 uid_t my_uid
= getuid();
439 time_t now
, load_time
;
441 err
= bpf_prog_get_fd_by_id(0);
442 CHECK(err
>= 0 || errno
!= ENOENT
,
443 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err
, errno
);
445 err
= bpf_map_get_fd_by_id(0);
446 CHECK(err
>= 0 || errno
!= ENOENT
,
447 "get-fd-by-notexist-map-id", "err %d errno %d\n", err
, errno
);
449 for (i
= 0; i
< nr_iters
; i
++)
452 /* Check bpf_obj_get_info_by_fd() */
453 bzero(zeros
, sizeof(zeros
));
454 for (i
= 0; i
< nr_iters
; i
++) {
456 err
= bpf_prog_load(file
, BPF_PROG_TYPE_SOCKET_FILTER
,
457 &objs
[i
], &prog_fds
[i
]);
458 /* test_obj_id.o is a dumb prog. It should never fail
465 /* Insert a magic value to the map */
466 map_fds
[i
] = bpf_find_map(__func__
, objs
[i
], "test_map_id");
467 assert(map_fds
[i
] >= 0);
468 err
= bpf_map_update_elem(map_fds
[i
], &array_key
,
469 &array_magic_value
, 0);
472 /* Check getting map info */
473 info_len
= sizeof(struct bpf_map_info
) * 2;
474 bzero(&map_infos
[i
], info_len
);
475 err
= bpf_obj_get_info_by_fd(map_fds
[i
], &map_infos
[i
],
478 map_infos
[i
].type
!= BPF_MAP_TYPE_ARRAY
||
479 map_infos
[i
].key_size
!= sizeof(__u32
) ||
480 map_infos
[i
].value_size
!= sizeof(__u64
) ||
481 map_infos
[i
].max_entries
!= 1 ||
482 map_infos
[i
].map_flags
!= 0 ||
483 info_len
!= sizeof(struct bpf_map_info
) ||
484 strcmp((char *)map_infos
[i
].name
, expected_map_name
),
486 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
488 map_infos
[i
].type
, BPF_MAP_TYPE_ARRAY
,
489 info_len
, sizeof(struct bpf_map_info
),
490 map_infos
[i
].key_size
,
491 map_infos
[i
].value_size
,
492 map_infos
[i
].max_entries
,
493 map_infos
[i
].map_flags
,
494 map_infos
[i
].name
, expected_map_name
))
497 /* Check getting prog info */
498 info_len
= sizeof(struct bpf_prog_info
) * 2;
499 bzero(&prog_infos
[i
], info_len
);
500 bzero(jited_insns
, sizeof(jited_insns
));
501 bzero(xlated_insns
, sizeof(xlated_insns
));
502 prog_infos
[i
].jited_prog_insns
= ptr_to_u64(jited_insns
);
503 prog_infos
[i
].jited_prog_len
= sizeof(jited_insns
);
504 prog_infos
[i
].xlated_prog_insns
= ptr_to_u64(xlated_insns
);
505 prog_infos
[i
].xlated_prog_len
= sizeof(xlated_insns
);
506 prog_infos
[i
].map_ids
= ptr_to_u64(map_ids
+ i
);
507 prog_infos
[i
].nr_map_ids
= 2;
508 err
= clock_gettime(CLOCK_REALTIME
, &real_time_ts
);
510 err
= clock_gettime(CLOCK_BOOTTIME
, &boot_time_ts
);
512 err
= bpf_obj_get_info_by_fd(prog_fds
[i
], &prog_infos
[i
],
514 load_time
= (real_time_ts
.tv_sec
- boot_time_ts
.tv_sec
)
515 + (prog_infos
[i
].load_time
/ nsec_per_sec
);
517 prog_infos
[i
].type
!= BPF_PROG_TYPE_SOCKET_FILTER
||
518 info_len
!= sizeof(struct bpf_prog_info
) ||
519 (jit_enabled
&& !prog_infos
[i
].jited_prog_len
) ||
521 !memcmp(jited_insns
, zeros
, sizeof(zeros
))) ||
522 !prog_infos
[i
].xlated_prog_len
||
523 !memcmp(xlated_insns
, zeros
, sizeof(zeros
)) ||
524 load_time
< now
- 60 || load_time
> now
+ 60 ||
525 prog_infos
[i
].created_by_uid
!= my_uid
||
526 prog_infos
[i
].nr_map_ids
!= 1 ||
527 *(int *)prog_infos
[i
].map_ids
!= map_infos
[i
].id
||
528 strcmp((char *)prog_infos
[i
].name
, expected_prog_name
),
530 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
532 prog_infos
[i
].type
, BPF_PROG_TYPE_SOCKET_FILTER
,
533 info_len
, sizeof(struct bpf_prog_info
),
535 prog_infos
[i
].jited_prog_len
,
536 prog_infos
[i
].xlated_prog_len
,
537 !!memcmp(jited_insns
, zeros
, sizeof(zeros
)),
538 !!memcmp(xlated_insns
, zeros
, sizeof(zeros
)),
540 prog_infos
[i
].created_by_uid
, my_uid
,
541 prog_infos
[i
].nr_map_ids
, 1,
542 *(int *)prog_infos
[i
].map_ids
, map_infos
[i
].id
,
543 prog_infos
[i
].name
, expected_prog_name
))
547 /* Check bpf_prog_get_next_id() */
550 while (!bpf_prog_get_next_id(next_id
, &next_id
)) {
551 struct bpf_prog_info prog_info
= {};
555 info_len
= sizeof(prog_info
);
557 prog_fd
= bpf_prog_get_fd_by_id(next_id
);
558 if (prog_fd
< 0 && errno
== ENOENT
)
559 /* The bpf_prog is in the dead row */
561 if (CHECK(prog_fd
< 0, "get-prog-fd(next_id)",
562 "prog_fd %d next_id %d errno %d\n",
563 prog_fd
, next_id
, errno
))
566 for (i
= 0; i
< nr_iters
; i
++)
567 if (prog_infos
[i
].id
== next_id
)
576 * prog_info.nr_map_ids = 1
577 * prog_info.map_ids = NULL
579 prog_info
.nr_map_ids
= 1;
580 err
= bpf_obj_get_info_by_fd(prog_fd
, &prog_info
, &info_len
);
581 if (CHECK(!err
|| errno
!= EFAULT
,
582 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
585 bzero(&prog_info
, sizeof(prog_info
));
586 info_len
= sizeof(prog_info
);
588 saved_map_id
= *(int *)(prog_infos
[i
].map_ids
);
589 prog_info
.map_ids
= prog_infos
[i
].map_ids
;
590 prog_info
.nr_map_ids
= 2;
591 err
= bpf_obj_get_info_by_fd(prog_fd
, &prog_info
, &info_len
);
592 prog_infos
[i
].jited_prog_insns
= 0;
593 prog_infos
[i
].xlated_prog_insns
= 0;
594 CHECK(err
|| info_len
!= sizeof(struct bpf_prog_info
) ||
595 memcmp(&prog_info
, &prog_infos
[i
], info_len
) ||
596 *(int *)prog_info
.map_ids
!= saved_map_id
,
597 "get-prog-info(next_id->fd)",
598 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
599 err
, errno
, info_len
, sizeof(struct bpf_prog_info
),
600 memcmp(&prog_info
, &prog_infos
[i
], info_len
),
601 *(int *)prog_info
.map_ids
, saved_map_id
);
604 CHECK(nr_id_found
!= nr_iters
,
605 "check total prog id found by get_next_id",
606 "nr_id_found %u(%u)\n",
607 nr_id_found
, nr_iters
);
609 /* Check bpf_map_get_next_id() */
612 while (!bpf_map_get_next_id(next_id
, &next_id
)) {
613 struct bpf_map_info map_info
= {};
616 info_len
= sizeof(map_info
);
618 map_fd
= bpf_map_get_fd_by_id(next_id
);
619 if (map_fd
< 0 && errno
== ENOENT
)
620 /* The bpf_map is in the dead row */
622 if (CHECK(map_fd
< 0, "get-map-fd(next_id)",
623 "map_fd %d next_id %u errno %d\n",
624 map_fd
, next_id
, errno
))
627 for (i
= 0; i
< nr_iters
; i
++)
628 if (map_infos
[i
].id
== next_id
)
636 err
= bpf_map_lookup_elem(map_fd
, &array_key
, &array_value
);
639 err
= bpf_obj_get_info_by_fd(map_fd
, &map_info
, &info_len
);
640 CHECK(err
|| info_len
!= sizeof(struct bpf_map_info
) ||
641 memcmp(&map_info
, &map_infos
[i
], info_len
) ||
642 array_value
!= array_magic_value
,
643 "check get-map-info(next_id->fd)",
644 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
645 err
, errno
, info_len
, sizeof(struct bpf_map_info
),
646 memcmp(&map_info
, &map_infos
[i
], info_len
),
647 array_value
, array_magic_value
);
651 CHECK(nr_id_found
!= nr_iters
,
652 "check total map id found by get_next_id",
653 "nr_id_found %u(%u)\n",
654 nr_id_found
, nr_iters
);
657 for (i
= 0; i
< nr_iters
; i
++)
658 bpf_object__close(objs
[i
]);
661 static void test_pkt_md_access(void)
663 const char *file
= "./test_pkt_md_access.o";
664 struct bpf_object
*obj
;
665 __u32 duration
, retval
;
668 err
= bpf_prog_load(file
, BPF_PROG_TYPE_SCHED_CLS
, &obj
, &prog_fd
);
674 err
= bpf_prog_test_run(prog_fd
, 10, &pkt_v4
, sizeof(pkt_v4
),
675 NULL
, NULL
, &retval
, &duration
);
676 CHECK(err
|| retval
, "",
677 "err %d errno %d retval %d duration %d\n",
678 err
, errno
, retval
, duration
);
680 bpf_object__close(obj
);
683 static void test_obj_name(void)
691 { "_123456789ABCDE", 1, 0 },
692 { "_123456789ABCDEF", 0, EINVAL
},
693 { "_123456789ABCD\n", 0, EINVAL
},
695 struct bpf_insn prog
[] = {
696 BPF_ALU64_IMM(BPF_MOV
, BPF_REG_0
, 0),
702 for (i
= 0; i
< sizeof(tests
) / sizeof(tests
[0]); i
++) {
703 size_t name_len
= strlen(tests
[i
].name
) + 1;
708 /* test different attr.prog_name during BPF_PROG_LOAD */
709 ncopy
= name_len
< sizeof(attr
.prog_name
) ?
710 name_len
: sizeof(attr
.prog_name
);
711 bzero(&attr
, sizeof(attr
));
712 attr
.prog_type
= BPF_PROG_TYPE_SCHED_CLS
;
714 attr
.insns
= ptr_to_u64(prog
);
715 attr
.license
= ptr_to_u64("");
716 memcpy(attr
.prog_name
, tests
[i
].name
, ncopy
);
718 fd
= syscall(__NR_bpf
, BPF_PROG_LOAD
, &attr
, sizeof(attr
));
719 CHECK((tests
[i
].success
&& fd
< 0) ||
720 (!tests
[i
].success
&& fd
!= -1) ||
721 (!tests
[i
].success
&& errno
!= tests
[i
].expected_errno
),
722 "check-bpf-prog-name",
723 "fd %d(%d) errno %d(%d)\n",
724 fd
, tests
[i
].success
, errno
, tests
[i
].expected_errno
);
729 /* test different attr.map_name during BPF_MAP_CREATE */
730 ncopy
= name_len
< sizeof(attr
.map_name
) ?
731 name_len
: sizeof(attr
.map_name
);
732 bzero(&attr
, sizeof(attr
));
733 attr
.map_type
= BPF_MAP_TYPE_ARRAY
;
736 attr
.max_entries
= 1;
738 memcpy(attr
.map_name
, tests
[i
].name
, ncopy
);
739 fd
= syscall(__NR_bpf
, BPF_MAP_CREATE
, &attr
, sizeof(attr
));
740 CHECK((tests
[i
].success
&& fd
< 0) ||
741 (!tests
[i
].success
&& fd
!= -1) ||
742 (!tests
[i
].success
&& errno
!= tests
[i
].expected_errno
),
743 "check-bpf-map-name",
744 "fd %d(%d) errno %d(%d)\n",
745 fd
, tests
[i
].success
, errno
, tests
[i
].expected_errno
);
752 static void test_tp_attach_query(void)
754 const int num_progs
= 3;
755 int i
, j
, bytes
, efd
, err
, prog_fd
[num_progs
], pmu_fd
[num_progs
];
756 __u32 duration
= 0, info_len
, saved_prog_ids
[num_progs
];
757 const char *file
= "./test_tracepoint.o";
758 struct perf_event_query_bpf
*query
;
759 struct perf_event_attr attr
= {};
760 struct bpf_object
*obj
[num_progs
];
761 struct bpf_prog_info prog_info
;
764 snprintf(buf
, sizeof(buf
),
765 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
766 efd
= open(buf
, O_RDONLY
, 0);
767 if (CHECK(efd
< 0, "open", "err %d errno %d\n", efd
, errno
))
769 bytes
= read(efd
, buf
, sizeof(buf
));
771 if (CHECK(bytes
<= 0 || bytes
>= sizeof(buf
),
772 "read", "bytes %d errno %d\n", bytes
, errno
))
775 attr
.config
= strtol(buf
, NULL
, 0);
776 attr
.type
= PERF_TYPE_TRACEPOINT
;
777 attr
.sample_type
= PERF_SAMPLE_RAW
| PERF_SAMPLE_CALLCHAIN
;
778 attr
.sample_period
= 1;
779 attr
.wakeup_events
= 1;
781 query
= malloc(sizeof(*query
) + sizeof(__u32
) * num_progs
);
782 for (i
= 0; i
< num_progs
; i
++) {
783 err
= bpf_prog_load(file
, BPF_PROG_TYPE_TRACEPOINT
, &obj
[i
],
785 if (CHECK(err
, "prog_load", "err %d errno %d\n", err
, errno
))
788 bzero(&prog_info
, sizeof(prog_info
));
789 prog_info
.jited_prog_len
= 0;
790 prog_info
.xlated_prog_len
= 0;
791 prog_info
.nr_map_ids
= 0;
792 info_len
= sizeof(prog_info
);
793 err
= bpf_obj_get_info_by_fd(prog_fd
[i
], &prog_info
, &info_len
);
794 if (CHECK(err
, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
797 saved_prog_ids
[i
] = prog_info
.id
;
799 pmu_fd
[i
] = syscall(__NR_perf_event_open
, &attr
, -1 /* pid */,
800 0 /* cpu 0 */, -1 /* group id */,
802 if (CHECK(pmu_fd
[i
] < 0, "perf_event_open", "err %d errno %d\n",
805 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_ENABLE
, 0);
806 if (CHECK(err
, "perf_event_ioc_enable", "err %d errno %d\n",
811 /* check NULL prog array query */
812 query
->ids_len
= num_progs
;
813 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
, query
);
814 if (CHECK(err
|| query
->prog_cnt
!= 0,
815 "perf_event_ioc_query_bpf",
816 "err %d errno %d query->prog_cnt %u\n",
817 err
, errno
, query
->prog_cnt
))
821 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_SET_BPF
, prog_fd
[i
]);
822 if (CHECK(err
, "perf_event_ioc_set_bpf", "err %d errno %d\n",
827 /* try to get # of programs only */
829 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
, query
);
830 if (CHECK(err
|| query
->prog_cnt
!= 2,
831 "perf_event_ioc_query_bpf",
832 "err %d errno %d query->prog_cnt %u\n",
833 err
, errno
, query
->prog_cnt
))
836 /* try a few negative tests */
837 /* invalid query pointer */
838 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
,
839 (struct perf_event_query_bpf
*)0x1);
840 if (CHECK(!err
|| errno
!= EFAULT
,
841 "perf_event_ioc_query_bpf",
842 "err %d errno %d\n", err
, errno
))
845 /* no enough space */
847 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
, query
);
848 if (CHECK(!err
|| errno
!= ENOSPC
|| query
->prog_cnt
!= 2,
849 "perf_event_ioc_query_bpf",
850 "err %d errno %d query->prog_cnt %u\n",
851 err
, errno
, query
->prog_cnt
))
855 query
->ids_len
= num_progs
;
856 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
, query
);
857 if (CHECK(err
|| query
->prog_cnt
!= (i
+ 1),
858 "perf_event_ioc_query_bpf",
859 "err %d errno %d query->prog_cnt %u\n",
860 err
, errno
, query
->prog_cnt
))
862 for (j
= 0; j
< i
+ 1; j
++)
863 if (CHECK(saved_prog_ids
[j
] != query
->ids
[j
],
864 "perf_event_ioc_query_bpf",
865 "#%d saved_prog_id %x query prog_id %x\n",
866 j
, saved_prog_ids
[j
], query
->ids
[j
]))
871 for (; i
>= 0; i
--) {
873 ioctl(pmu_fd
[i
], PERF_EVENT_IOC_DISABLE
);
877 bpf_object__close(obj
[i
]);
882 static int compare_map_keys(int map1_fd
, int map2_fd
)
885 char val_buf
[PERF_MAX_STACK_DEPTH
*
886 sizeof(struct bpf_stack_build_id
)];
889 err
= bpf_map_get_next_key(map1_fd
, NULL
, &key
);
892 err
= bpf_map_lookup_elem(map2_fd
, &key
, val_buf
);
896 while (bpf_map_get_next_key(map1_fd
, &key
, &next_key
) == 0) {
897 err
= bpf_map_lookup_elem(map2_fd
, &next_key
, val_buf
);
909 static int compare_stack_ips(int smap_fd
, int amap_fd
, int stack_trace_len
)
911 __u32 key
, next_key
, *cur_key_p
, *next_key_p
;
912 char *val_buf1
, *val_buf2
;
915 val_buf1
= malloc(stack_trace_len
);
916 val_buf2
= malloc(stack_trace_len
);
919 while (bpf_map_get_next_key(smap_fd
, cur_key_p
, next_key_p
) == 0) {
920 err
= bpf_map_lookup_elem(smap_fd
, next_key_p
, val_buf1
);
923 err
= bpf_map_lookup_elem(amap_fd
, next_key_p
, val_buf2
);
926 for (i
= 0; i
< stack_trace_len
; i
++) {
927 if (val_buf1
[i
] != val_buf2
[i
]) {
934 next_key_p
= &next_key
;
945 static void test_stacktrace_map()
947 int control_map_fd
, stackid_hmap_fd
, stackmap_fd
, stack_amap_fd
;
948 const char *file
= "./test_stacktrace_map.o";
949 int bytes
, efd
, err
, pmu_fd
, prog_fd
, stack_trace_len
;
950 struct perf_event_attr attr
= {};
951 __u32 key
, val
, duration
= 0;
952 struct bpf_object
*obj
;
955 err
= bpf_prog_load(file
, BPF_PROG_TYPE_TRACEPOINT
, &obj
, &prog_fd
);
956 if (CHECK(err
, "prog_load", "err %d errno %d\n", err
, errno
))
959 /* Get the ID for the sched/sched_switch tracepoint */
960 snprintf(buf
, sizeof(buf
),
961 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
962 efd
= open(buf
, O_RDONLY
, 0);
963 if (CHECK(efd
< 0, "open", "err %d errno %d\n", efd
, errno
))
966 bytes
= read(efd
, buf
, sizeof(buf
));
968 if (bytes
<= 0 || bytes
>= sizeof(buf
))
971 /* Open the perf event and attach bpf progrram */
972 attr
.config
= strtol(buf
, NULL
, 0);
973 attr
.type
= PERF_TYPE_TRACEPOINT
;
974 attr
.sample_type
= PERF_SAMPLE_RAW
| PERF_SAMPLE_CALLCHAIN
;
975 attr
.sample_period
= 1;
976 attr
.wakeup_events
= 1;
977 pmu_fd
= syscall(__NR_perf_event_open
, &attr
, -1 /* pid */,
978 0 /* cpu 0 */, -1 /* group id */,
980 if (CHECK(pmu_fd
< 0, "perf_event_open", "err %d errno %d\n",
984 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
, 0);
988 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_SET_BPF
, prog_fd
);
993 control_map_fd
= bpf_find_map(__func__
, obj
, "control_map");
994 if (control_map_fd
< 0)
997 stackid_hmap_fd
= bpf_find_map(__func__
, obj
, "stackid_hmap");
998 if (stackid_hmap_fd
< 0)
1001 stackmap_fd
= bpf_find_map(__func__
, obj
, "stackmap");
1002 if (stackmap_fd
< 0)
1005 stack_amap_fd
= bpf_find_map(__func__
, obj
, "stack_amap");
1006 if (stack_amap_fd
< 0)
1009 /* give some time for bpf program run */
1012 /* disable stack trace collection */
1015 bpf_map_update_elem(control_map_fd
, &key
, &val
, 0);
1017 /* for every element in stackid_hmap, we can find a corresponding one
1018 * in stackmap, and vise versa.
1020 err
= compare_map_keys(stackid_hmap_fd
, stackmap_fd
);
1021 if (CHECK(err
, "compare_map_keys stackid_hmap vs. stackmap",
1022 "err %d errno %d\n", err
, errno
))
1023 goto disable_pmu_noerr
;
1025 err
= compare_map_keys(stackmap_fd
, stackid_hmap_fd
);
1026 if (CHECK(err
, "compare_map_keys stackmap vs. stackid_hmap",
1027 "err %d errno %d\n", err
, errno
))
1028 goto disable_pmu_noerr
;
1030 stack_trace_len
= PERF_MAX_STACK_DEPTH
* sizeof(__u64
);
1031 err
= compare_stack_ips(stackmap_fd
, stack_amap_fd
, stack_trace_len
);
1032 if (CHECK(err
, "compare_stack_ips stackmap vs. stack_amap",
1033 "err %d errno %d\n", err
, errno
))
1034 goto disable_pmu_noerr
;
1036 goto disable_pmu_noerr
;
1040 ioctl(pmu_fd
, PERF_EVENT_IOC_DISABLE
);
1043 bpf_object__close(obj
);
1046 static void test_stacktrace_map_raw_tp()
1048 int control_map_fd
, stackid_hmap_fd
, stackmap_fd
;
1049 const char *file
= "./test_stacktrace_map.o";
1050 int efd
, err
, prog_fd
;
1051 __u32 key
, val
, duration
= 0;
1052 struct bpf_object
*obj
;
1054 err
= bpf_prog_load(file
, BPF_PROG_TYPE_RAW_TRACEPOINT
, &obj
, &prog_fd
);
1055 if (CHECK(err
, "prog_load raw tp", "err %d errno %d\n", err
, errno
))
1058 efd
= bpf_raw_tracepoint_open("sched_switch", prog_fd
);
1059 if (CHECK(efd
< 0, "raw_tp_open", "err %d errno %d\n", efd
, errno
))
1063 control_map_fd
= bpf_find_map(__func__
, obj
, "control_map");
1064 if (control_map_fd
< 0)
1067 stackid_hmap_fd
= bpf_find_map(__func__
, obj
, "stackid_hmap");
1068 if (stackid_hmap_fd
< 0)
1071 stackmap_fd
= bpf_find_map(__func__
, obj
, "stackmap");
1072 if (stackmap_fd
< 0)
1075 /* give some time for bpf program run */
1078 /* disable stack trace collection */
1081 bpf_map_update_elem(control_map_fd
, &key
, &val
, 0);
1083 /* for every element in stackid_hmap, we can find a corresponding one
1084 * in stackmap, and vise versa.
1086 err
= compare_map_keys(stackid_hmap_fd
, stackmap_fd
);
1087 if (CHECK(err
, "compare_map_keys stackid_hmap vs. stackmap",
1088 "err %d errno %d\n", err
, errno
))
1091 err
= compare_map_keys(stackmap_fd
, stackid_hmap_fd
);
1092 if (CHECK(err
, "compare_map_keys stackmap vs. stackid_hmap",
1093 "err %d errno %d\n", err
, errno
))
1096 goto close_prog_noerr
;
1100 bpf_object__close(obj
);
1103 static int extract_build_id(char *build_id
, size_t size
)
1109 fp
= popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
1113 if (getline(&line
, &len
, fp
) == -1)
1119 memcpy(build_id
, line
, len
);
1120 build_id
[len
] = '\0';
1127 static void test_stacktrace_build_id(void)
1129 int control_map_fd
, stackid_hmap_fd
, stackmap_fd
, stack_amap_fd
;
1130 const char *file
= "./test_stacktrace_build_id.o";
1131 int bytes
, efd
, err
, pmu_fd
, prog_fd
, stack_trace_len
;
1132 struct perf_event_attr attr
= {};
1133 __u32 key
, previous_key
, val
, duration
= 0;
1134 struct bpf_object
*obj
;
1137 struct bpf_stack_build_id id_offs
[PERF_MAX_STACK_DEPTH
];
1138 int build_id_matches
= 0;
1140 err
= bpf_prog_load(file
, BPF_PROG_TYPE_TRACEPOINT
, &obj
, &prog_fd
);
1141 if (CHECK(err
, "prog_load", "err %d errno %d\n", err
, errno
))
1144 /* Get the ID for the sched/sched_switch tracepoint */
1145 snprintf(buf
, sizeof(buf
),
1146 "/sys/kernel/debug/tracing/events/random/urandom_read/id");
1147 efd
= open(buf
, O_RDONLY
, 0);
1148 if (CHECK(efd
< 0, "open", "err %d errno %d\n", efd
, errno
))
1151 bytes
= read(efd
, buf
, sizeof(buf
));
1153 if (CHECK(bytes
<= 0 || bytes
>= sizeof(buf
),
1154 "read", "bytes %d errno %d\n", bytes
, errno
))
1157 /* Open the perf event and attach bpf progrram */
1158 attr
.config
= strtol(buf
, NULL
, 0);
1159 attr
.type
= PERF_TYPE_TRACEPOINT
;
1160 attr
.sample_type
= PERF_SAMPLE_RAW
| PERF_SAMPLE_CALLCHAIN
;
1161 attr
.sample_period
= 1;
1162 attr
.wakeup_events
= 1;
1163 pmu_fd
= syscall(__NR_perf_event_open
, &attr
, -1 /* pid */,
1164 0 /* cpu 0 */, -1 /* group id */,
1166 if (CHECK(pmu_fd
< 0, "perf_event_open", "err %d errno %d\n",
1170 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
, 0);
1171 if (CHECK(err
, "perf_event_ioc_enable", "err %d errno %d\n",
1175 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_SET_BPF
, prog_fd
);
1176 if (CHECK(err
, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1181 control_map_fd
= bpf_find_map(__func__
, obj
, "control_map");
1182 if (CHECK(control_map_fd
< 0, "bpf_find_map control_map",
1183 "err %d errno %d\n", err
, errno
))
1186 stackid_hmap_fd
= bpf_find_map(__func__
, obj
, "stackid_hmap");
1187 if (CHECK(stackid_hmap_fd
< 0, "bpf_find_map stackid_hmap",
1188 "err %d errno %d\n", err
, errno
))
1191 stackmap_fd
= bpf_find_map(__func__
, obj
, "stackmap");
1192 if (CHECK(stackmap_fd
< 0, "bpf_find_map stackmap", "err %d errno %d\n",
1196 stack_amap_fd
= bpf_find_map(__func__
, obj
, "stack_amap");
1197 if (CHECK(stack_amap_fd
< 0, "bpf_find_map stack_amap",
1198 "err %d errno %d\n", err
, errno
))
1201 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1203 assert(system("./urandom_read") == 0);
1204 /* disable stack trace collection */
1207 bpf_map_update_elem(control_map_fd
, &key
, &val
, 0);
1209 /* for every element in stackid_hmap, we can find a corresponding one
1210 * in stackmap, and vise versa.
1212 err
= compare_map_keys(stackid_hmap_fd
, stackmap_fd
);
1213 if (CHECK(err
, "compare_map_keys stackid_hmap vs. stackmap",
1214 "err %d errno %d\n", err
, errno
))
1217 err
= compare_map_keys(stackmap_fd
, stackid_hmap_fd
);
1218 if (CHECK(err
, "compare_map_keys stackmap vs. stackid_hmap",
1219 "err %d errno %d\n", err
, errno
))
1222 err
= extract_build_id(buf
, 256);
1224 if (CHECK(err
, "get build_id with readelf",
1225 "err %d errno %d\n", err
, errno
))
1228 err
= bpf_map_get_next_key(stackmap_fd
, NULL
, &key
);
1229 if (CHECK(err
, "get_next_key from stackmap",
1230 "err %d, errno %d\n", err
, errno
))
1236 err
= bpf_map_lookup_elem(stackmap_fd
, &key
, id_offs
);
1237 if (CHECK(err
, "lookup_elem from stackmap",
1238 "err %d, errno %d\n", err
, errno
))
1240 for (i
= 0; i
< PERF_MAX_STACK_DEPTH
; ++i
)
1241 if (id_offs
[i
].status
== BPF_STACK_BUILD_ID_VALID
&&
1242 id_offs
[i
].offset
!= 0) {
1243 for (j
= 0; j
< 20; ++j
)
1244 sprintf(build_id
+ 2 * j
, "%02x",
1245 id_offs
[i
].build_id
[j
] & 0xff);
1246 if (strstr(buf
, build_id
) != NULL
)
1247 build_id_matches
= 1;
1250 } while (bpf_map_get_next_key(stackmap_fd
, &previous_key
, &key
) == 0);
1252 if (CHECK(build_id_matches
< 1, "build id match",
1253 "Didn't find expected build ID from the map\n"))
1256 stack_trace_len
= PERF_MAX_STACK_DEPTH
1257 * sizeof(struct bpf_stack_build_id
);
1258 err
= compare_stack_ips(stackmap_fd
, stack_amap_fd
, stack_trace_len
);
1259 CHECK(err
, "compare_stack_ips stackmap vs. stack_amap",
1260 "err %d errno %d\n", err
, errno
);
1263 ioctl(pmu_fd
, PERF_EVENT_IOC_DISABLE
);
1269 bpf_object__close(obj
);
1275 static void test_stacktrace_build_id_nmi(void)
1277 int control_map_fd
, stackid_hmap_fd
, stackmap_fd
, stack_amap_fd
;
1278 const char *file
= "./test_stacktrace_build_id.o";
1279 int err
, pmu_fd
, prog_fd
;
1280 struct perf_event_attr attr
= {
1281 .sample_freq
= 5000,
1283 .type
= PERF_TYPE_HARDWARE
,
1284 .config
= PERF_COUNT_HW_CPU_CYCLES
,
1286 __u32 key
, previous_key
, val
, duration
= 0;
1287 struct bpf_object
*obj
;
1290 struct bpf_stack_build_id id_offs
[PERF_MAX_STACK_DEPTH
];
1291 int build_id_matches
= 0;
1293 err
= bpf_prog_load(file
, BPF_PROG_TYPE_PERF_EVENT
, &obj
, &prog_fd
);
1294 if (CHECK(err
, "prog_load", "err %d errno %d\n", err
, errno
))
1297 pmu_fd
= syscall(__NR_perf_event_open
, &attr
, -1 /* pid */,
1298 0 /* cpu 0 */, -1 /* group id */,
1300 if (CHECK(pmu_fd
< 0, "perf_event_open",
1301 "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
1305 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
, 0);
1306 if (CHECK(err
, "perf_event_ioc_enable", "err %d errno %d\n",
1310 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_SET_BPF
, prog_fd
);
1311 if (CHECK(err
, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1316 control_map_fd
= bpf_find_map(__func__
, obj
, "control_map");
1317 if (CHECK(control_map_fd
< 0, "bpf_find_map control_map",
1318 "err %d errno %d\n", err
, errno
))
1321 stackid_hmap_fd
= bpf_find_map(__func__
, obj
, "stackid_hmap");
1322 if (CHECK(stackid_hmap_fd
< 0, "bpf_find_map stackid_hmap",
1323 "err %d errno %d\n", err
, errno
))
1326 stackmap_fd
= bpf_find_map(__func__
, obj
, "stackmap");
1327 if (CHECK(stackmap_fd
< 0, "bpf_find_map stackmap", "err %d errno %d\n",
1331 stack_amap_fd
= bpf_find_map(__func__
, obj
, "stack_amap");
1332 if (CHECK(stack_amap_fd
< 0, "bpf_find_map stack_amap",
1333 "err %d errno %d\n", err
, errno
))
1336 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1338 assert(system("taskset 0x1 ./urandom_read 100000") == 0);
1339 /* disable stack trace collection */
1342 bpf_map_update_elem(control_map_fd
, &key
, &val
, 0);
1344 /* for every element in stackid_hmap, we can find a corresponding one
1345 * in stackmap, and vise versa.
1347 err
= compare_map_keys(stackid_hmap_fd
, stackmap_fd
);
1348 if (CHECK(err
, "compare_map_keys stackid_hmap vs. stackmap",
1349 "err %d errno %d\n", err
, errno
))
1352 err
= compare_map_keys(stackmap_fd
, stackid_hmap_fd
);
1353 if (CHECK(err
, "compare_map_keys stackmap vs. stackid_hmap",
1354 "err %d errno %d\n", err
, errno
))
1357 err
= extract_build_id(buf
, 256);
1359 if (CHECK(err
, "get build_id with readelf",
1360 "err %d errno %d\n", err
, errno
))
1363 err
= bpf_map_get_next_key(stackmap_fd
, NULL
, &key
);
1364 if (CHECK(err
, "get_next_key from stackmap",
1365 "err %d, errno %d\n", err
, errno
))
1371 err
= bpf_map_lookup_elem(stackmap_fd
, &key
, id_offs
);
1372 if (CHECK(err
, "lookup_elem from stackmap",
1373 "err %d, errno %d\n", err
, errno
))
1375 for (i
= 0; i
< PERF_MAX_STACK_DEPTH
; ++i
)
1376 if (id_offs
[i
].status
== BPF_STACK_BUILD_ID_VALID
&&
1377 id_offs
[i
].offset
!= 0) {
1378 for (j
= 0; j
< 20; ++j
)
1379 sprintf(build_id
+ 2 * j
, "%02x",
1380 id_offs
[i
].build_id
[j
] & 0xff);
1381 if (strstr(buf
, build_id
) != NULL
)
1382 build_id_matches
= 1;
1385 } while (bpf_map_get_next_key(stackmap_fd
, &previous_key
, &key
) == 0);
1387 if (CHECK(build_id_matches
< 1, "build id match",
1388 "Didn't find expected build ID from the map\n"))
1392 * We intentionally skip compare_stack_ips(). This is because we
1393 * only support one in_nmi() ips-to-build_id translation per cpu
1394 * at any time, thus stack_amap here will always fallback to
1395 * BPF_STACK_BUILD_ID_IP;
1399 ioctl(pmu_fd
, PERF_EVENT_IOC_DISABLE
);
1405 bpf_object__close(obj
);
1408 #define MAX_CNT_RAWTP 10ull
1409 #define MAX_STACK_RAWTP 100
1410 struct get_stack_trace_t
{
1412 int kern_stack_size
;
1413 int user_stack_size
;
1414 int user_stack_buildid_size
;
1415 __u64 kern_stack
[MAX_STACK_RAWTP
];
1416 __u64 user_stack
[MAX_STACK_RAWTP
];
1417 struct bpf_stack_build_id user_stack_buildid
[MAX_STACK_RAWTP
];
1420 static int get_stack_print_output(void *data
, int size
)
1422 bool good_kern_stack
= false, good_user_stack
= false;
1423 const char *nonjit_func
= "___bpf_prog_run";
1424 struct get_stack_trace_t
*e
= data
;
1431 if (size
< sizeof(struct get_stack_trace_t
)) {
1432 __u64
*raw_data
= data
;
1435 num_stack
= size
/ sizeof(__u64
);
1436 /* If jit is enabled, we do not have a good way to
1437 * verify the sanity of the kernel stack. So we
1438 * just assume it is good if the stack is not empty.
1439 * This could be improved in the future.
1442 found
= num_stack
> 0;
1444 for (i
= 0; i
< num_stack
; i
++) {
1445 ks
= ksym_search(raw_data
[i
]);
1446 if (strcmp(ks
->name
, nonjit_func
) == 0) {
1453 good_kern_stack
= true;
1454 good_user_stack
= true;
1457 num_stack
= e
->kern_stack_size
/ sizeof(__u64
);
1459 good_kern_stack
= num_stack
> 0;
1461 for (i
= 0; i
< num_stack
; i
++) {
1462 ks
= ksym_search(e
->kern_stack
[i
]);
1463 if (strcmp(ks
->name
, nonjit_func
) == 0) {
1464 good_kern_stack
= true;
1469 if (e
->user_stack_size
> 0 && e
->user_stack_buildid_size
> 0)
1470 good_user_stack
= true;
1472 if (!good_kern_stack
|| !good_user_stack
)
1473 return LIBBPF_PERF_EVENT_ERROR
;
1475 if (cnt
== MAX_CNT_RAWTP
)
1476 return LIBBPF_PERF_EVENT_DONE
;
1478 return LIBBPF_PERF_EVENT_CONT
;
1481 static void test_get_stack_raw_tp(void)
1483 const char *file
= "./test_get_stack_rawtp.o";
1484 int i
, efd
, err
, prog_fd
, pmu_fd
, perfmap_fd
;
1485 struct perf_event_attr attr
= {};
1486 struct timespec tv
= {0, 10};
1487 __u32 key
= 0, duration
= 0;
1488 struct bpf_object
*obj
;
1490 err
= bpf_prog_load(file
, BPF_PROG_TYPE_RAW_TRACEPOINT
, &obj
, &prog_fd
);
1491 if (CHECK(err
, "prog_load raw tp", "err %d errno %d\n", err
, errno
))
1494 efd
= bpf_raw_tracepoint_open("sys_enter", prog_fd
);
1495 if (CHECK(efd
< 0, "raw_tp_open", "err %d errno %d\n", efd
, errno
))
1498 perfmap_fd
= bpf_find_map(__func__
, obj
, "perfmap");
1499 if (CHECK(perfmap_fd
< 0, "bpf_find_map", "err %d errno %d\n",
1503 err
= load_kallsyms();
1504 if (CHECK(err
< 0, "load_kallsyms", "err %d errno %d\n", err
, errno
))
1507 attr
.sample_type
= PERF_SAMPLE_RAW
;
1508 attr
.type
= PERF_TYPE_SOFTWARE
;
1509 attr
.config
= PERF_COUNT_SW_BPF_OUTPUT
;
1510 pmu_fd
= syscall(__NR_perf_event_open
, &attr
, getpid()/*pid*/, -1/*cpu*/,
1512 if (CHECK(pmu_fd
< 0, "perf_event_open", "err %d errno %d\n", pmu_fd
,
1516 err
= bpf_map_update_elem(perfmap_fd
, &key
, &pmu_fd
, BPF_ANY
);
1517 if (CHECK(err
< 0, "bpf_map_update_elem", "err %d errno %d\n", err
,
1521 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
, 0);
1522 if (CHECK(err
< 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
1526 err
= perf_event_mmap(pmu_fd
);
1527 if (CHECK(err
< 0, "perf_event_mmap", "err %d errno %d\n", err
, errno
))
1530 /* trigger some syscall action */
1531 for (i
= 0; i
< MAX_CNT_RAWTP
; i
++)
1532 nanosleep(&tv
, NULL
);
1534 err
= perf_event_poller(pmu_fd
, get_stack_print_output
);
1535 if (CHECK(err
< 0, "perf_event_poller", "err %d errno %d\n", err
, errno
))
1538 goto close_prog_noerr
;
1542 bpf_object__close(obj
);
1545 static void test_task_fd_query_rawtp(void)
1547 const char *file
= "./test_get_stack_rawtp.o";
1548 __u64 probe_offset
, probe_addr
;
1549 __u32 len
, prog_id
, fd_type
;
1550 struct bpf_object
*obj
;
1551 int efd
, err
, prog_fd
;
1555 err
= bpf_prog_load(file
, BPF_PROG_TYPE_RAW_TRACEPOINT
, &obj
, &prog_fd
);
1556 if (CHECK(err
, "prog_load raw tp", "err %d errno %d\n", err
, errno
))
1559 efd
= bpf_raw_tracepoint_open("sys_enter", prog_fd
);
1560 if (CHECK(efd
< 0, "raw_tp_open", "err %d errno %d\n", efd
, errno
))
1563 /* query (getpid(), efd) */
1565 err
= bpf_task_fd_query(getpid(), efd
, 0, buf
, &len
, &prog_id
,
1566 &fd_type
, &probe_offset
, &probe_addr
);
1567 if (CHECK(err
< 0, "bpf_task_fd_query", "err %d errno %d\n", err
,
1571 err
= fd_type
== BPF_FD_TYPE_RAW_TRACEPOINT
&&
1572 strcmp(buf
, "sys_enter") == 0;
1573 if (CHECK(!err
, "check_results", "fd_type %d tp_name %s\n",
1579 err
= bpf_task_fd_query(getpid(), efd
, 0, buf
, &len
, &prog_id
,
1580 &fd_type
, &probe_offset
, &probe_addr
);
1581 if (CHECK(err
< 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
1584 err
= fd_type
== BPF_FD_TYPE_RAW_TRACEPOINT
&&
1585 len
== strlen("sys_enter");
1586 if (CHECK(!err
, "check_results", "fd_type %d len %u\n", fd_type
, len
))
1589 /* test empty buffer */
1591 err
= bpf_task_fd_query(getpid(), efd
, 0, 0, &len
, &prog_id
,
1592 &fd_type
, &probe_offset
, &probe_addr
);
1593 if (CHECK(err
< 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
1596 err
= fd_type
== BPF_FD_TYPE_RAW_TRACEPOINT
&&
1597 len
== strlen("sys_enter");
1598 if (CHECK(!err
, "check_results", "fd_type %d len %u\n", fd_type
, len
))
1601 /* test smaller buffer */
1603 err
= bpf_task_fd_query(getpid(), efd
, 0, buf
, &len
, &prog_id
,
1604 &fd_type
, &probe_offset
, &probe_addr
);
1605 if (CHECK(err
>= 0 || errno
!= ENOSPC
, "bpf_task_fd_query (len = 3)",
1606 "err %d errno %d\n", err
, errno
))
1608 err
= fd_type
== BPF_FD_TYPE_RAW_TRACEPOINT
&&
1609 len
== strlen("sys_enter") &&
1610 strcmp(buf
, "sy") == 0;
1611 if (CHECK(!err
, "check_results", "fd_type %d len %u\n", fd_type
, len
))
1614 goto close_prog_noerr
;
1618 bpf_object__close(obj
);
1621 static void test_task_fd_query_tp_core(const char *probe_name
,
1622 const char *tp_name
)
1624 const char *file
= "./test_tracepoint.o";
1625 int err
, bytes
, efd
, prog_fd
, pmu_fd
;
1626 struct perf_event_attr attr
= {};
1627 __u64 probe_offset
, probe_addr
;
1628 __u32 len
, prog_id
, fd_type
;
1629 struct bpf_object
*obj
;
1633 err
= bpf_prog_load(file
, BPF_PROG_TYPE_TRACEPOINT
, &obj
, &prog_fd
);
1634 if (CHECK(err
, "bpf_prog_load", "err %d errno %d\n", err
, errno
))
1637 snprintf(buf
, sizeof(buf
),
1638 "/sys/kernel/debug/tracing/events/%s/id", probe_name
);
1639 efd
= open(buf
, O_RDONLY
, 0);
1640 if (CHECK(efd
< 0, "open", "err %d errno %d\n", efd
, errno
))
1642 bytes
= read(efd
, buf
, sizeof(buf
));
1644 if (CHECK(bytes
<= 0 || bytes
>= sizeof(buf
), "read",
1645 "bytes %d errno %d\n", bytes
, errno
))
1648 attr
.config
= strtol(buf
, NULL
, 0);
1649 attr
.type
= PERF_TYPE_TRACEPOINT
;
1650 attr
.sample_type
= PERF_SAMPLE_RAW
;
1651 attr
.sample_period
= 1;
1652 attr
.wakeup_events
= 1;
1653 pmu_fd
= syscall(__NR_perf_event_open
, &attr
, -1 /* pid */,
1654 0 /* cpu 0 */, -1 /* group id */,
1656 if (CHECK(err
, "perf_event_open", "err %d errno %d\n", err
, errno
))
1659 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
, 0);
1660 if (CHECK(err
, "perf_event_ioc_enable", "err %d errno %d\n", err
,
1664 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_SET_BPF
, prog_fd
);
1665 if (CHECK(err
, "perf_event_ioc_set_bpf", "err %d errno %d\n", err
,
1669 /* query (getpid(), pmu_fd) */
1671 err
= bpf_task_fd_query(getpid(), pmu_fd
, 0, buf
, &len
, &prog_id
,
1672 &fd_type
, &probe_offset
, &probe_addr
);
1673 if (CHECK(err
< 0, "bpf_task_fd_query", "err %d errno %d\n", err
,
1677 err
= (fd_type
== BPF_FD_TYPE_TRACEPOINT
) && !strcmp(buf
, tp_name
);
1678 if (CHECK(!err
, "check_results", "fd_type %d tp_name %s\n",
1683 goto close_prog_noerr
;
1690 bpf_object__close(obj
);
1693 static void test_task_fd_query_tp(void)
1695 test_task_fd_query_tp_core("sched/sched_switch",
1697 test_task_fd_query_tp_core("syscalls/sys_enter_read",
1703 jit_enabled
= is_jit_enabled();
1707 test_xdp_adjust_tail();
1709 test_xdp_noinline();
1712 test_pkt_md_access();
1714 test_tp_attach_query();
1715 test_stacktrace_map();
1716 test_stacktrace_build_id();
1717 test_stacktrace_build_id_nmi();
1718 test_stacktrace_map_raw_tp();
1719 test_get_stack_raw_tp();
1720 test_task_fd_query_rawtp();
1721 test_task_fd_query_tp();
1723 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt
, error_cnt
);
1724 return error_cnt
? EXIT_FAILURE
: EXIT_SUCCESS
;