1 /* Copyright (c) 2017 Facebook
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
15 #include <linux/types.h>
16 typedef __u16 __sum16
;
17 #include <arpa/inet.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_packet.h>
21 #include <linux/ipv6.h>
22 #include <linux/tcp.h>
23 #include <linux/filter.h>
24 #include <linux/perf_event.h>
25 #include <linux/unistd.h>
27 #include <sys/ioctl.h>
29 #include <sys/resource.h>
30 #include <sys/types.h>
33 #include <linux/bpf.h>
34 #include <linux/err.h>
36 #include <bpf/libbpf.h>
37 #include "test_iptunnel_common.h"
39 #include "bpf_endian.h"
41 static int error_cnt
, pass_cnt
;
43 #define MAGIC_BYTES 123
45 /* ipv4 test vector */
51 .eth
.h_proto
= bpf_htons(ETH_P_IP
),
54 .iph
.tot_len
= bpf_htons(MAGIC_BYTES
),
58 /* ipv6 test vector */
64 .eth
.h_proto
= bpf_htons(ETH_P_IPV6
),
66 .iph
.payload_len
= bpf_htons(MAGIC_BYTES
),
70 #define CHECK(condition, tag, format...) ({ \
71 int __ret = !!(condition); \
74 printf("%s:FAIL:%s ", __func__, tag); \
78 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
83 static int bpf_find_map(const char *test
, struct bpf_object
*obj
,
88 map
= bpf_object__find_map_by_name(obj
, name
);
90 printf("%s:FAIL:map '%s' not found\n", test
, name
);
94 return bpf_map__fd(map
);
97 static void test_pkt_access(void)
99 const char *file
= "./test_pkt_access.o";
100 struct bpf_object
*obj
;
101 __u32 duration
, retval
;
104 err
= bpf_prog_load(file
, BPF_PROG_TYPE_SCHED_CLS
, &obj
, &prog_fd
);
110 err
= bpf_prog_test_run(prog_fd
, 100000, &pkt_v4
, sizeof(pkt_v4
),
111 NULL
, NULL
, &retval
, &duration
);
112 CHECK(err
|| errno
|| retval
, "ipv4",
113 "err %d errno %d retval %d duration %d\n",
114 err
, errno
, retval
, duration
);
116 err
= bpf_prog_test_run(prog_fd
, 100000, &pkt_v6
, sizeof(pkt_v6
),
117 NULL
, NULL
, &retval
, &duration
);
118 CHECK(err
|| errno
|| retval
, "ipv6",
119 "err %d errno %d retval %d duration %d\n",
120 err
, errno
, retval
, duration
);
121 bpf_object__close(obj
);
124 static void test_xdp(void)
126 struct vip key4
= {.protocol
= 6, .family
= AF_INET
};
127 struct vip key6
= {.protocol
= 6, .family
= AF_INET6
};
128 struct iptnl_info value4
= {.family
= AF_INET
};
129 struct iptnl_info value6
= {.family
= AF_INET6
};
130 const char *file
= "./test_xdp.o";
131 struct bpf_object
*obj
;
133 struct ipv6hdr
*iph6
= (void *)buf
+ sizeof(struct ethhdr
);
134 struct iphdr
*iph
= (void *)buf
+ sizeof(struct ethhdr
);
135 __u32 duration
, retval
, size
;
136 int err
, prog_fd
, map_fd
;
138 err
= bpf_prog_load(file
, BPF_PROG_TYPE_XDP
, &obj
, &prog_fd
);
144 map_fd
= bpf_find_map(__func__
, obj
, "vip2tnl");
147 bpf_map_update_elem(map_fd
, &key4
, &value4
, 0);
148 bpf_map_update_elem(map_fd
, &key6
, &value6
, 0);
150 err
= bpf_prog_test_run(prog_fd
, 1, &pkt_v4
, sizeof(pkt_v4
),
151 buf
, &size
, &retval
, &duration
);
153 CHECK(err
|| errno
|| retval
!= XDP_TX
|| size
!= 74 ||
154 iph
->protocol
!= IPPROTO_IPIP
, "ipv4",
155 "err %d errno %d retval %d size %d\n",
156 err
, errno
, retval
, size
);
158 err
= bpf_prog_test_run(prog_fd
, 1, &pkt_v6
, sizeof(pkt_v6
),
159 buf
, &size
, &retval
, &duration
);
160 CHECK(err
|| errno
|| retval
!= XDP_TX
|| size
!= 114 ||
161 iph6
->nexthdr
!= IPPROTO_IPV6
, "ipv6",
162 "err %d errno %d retval %d size %d\n",
163 err
, errno
, retval
, size
);
165 bpf_object__close(obj
);
168 #define MAGIC_VAL 0x1234
169 #define NUM_ITER 100000
172 static void test_l4lb(const char *file
)
174 unsigned int nr_cpus
= bpf_num_possible_cpus();
175 struct vip key
= {.protocol
= 6};
179 } value
= {.vip_num
= VIP_NUM
};
180 __u32 stats_key
= VIP_NUM
;
185 struct real_definition
{
191 } real_def
= {.dst
= MAGIC_VAL
};
192 __u32 ch_key
= 11, real_num
= 3;
193 __u32 duration
, retval
, size
;
194 int err
, i
, prog_fd
, map_fd
;
195 __u64 bytes
= 0, pkts
= 0;
196 struct bpf_object
*obj
;
198 u32
*magic
= (u32
*)buf
;
200 err
= bpf_prog_load(file
, BPF_PROG_TYPE_SCHED_CLS
, &obj
, &prog_fd
);
206 map_fd
= bpf_find_map(__func__
, obj
, "vip_map");
209 bpf_map_update_elem(map_fd
, &key
, &value
, 0);
211 map_fd
= bpf_find_map(__func__
, obj
, "ch_rings");
214 bpf_map_update_elem(map_fd
, &ch_key
, &real_num
, 0);
216 map_fd
= bpf_find_map(__func__
, obj
, "reals");
219 bpf_map_update_elem(map_fd
, &real_num
, &real_def
, 0);
221 err
= bpf_prog_test_run(prog_fd
, NUM_ITER
, &pkt_v4
, sizeof(pkt_v4
),
222 buf
, &size
, &retval
, &duration
);
223 CHECK(err
|| errno
|| retval
!= 7/*TC_ACT_REDIRECT*/ || size
!= 54 ||
224 *magic
!= MAGIC_VAL
, "ipv4",
225 "err %d errno %d retval %d size %d magic %x\n",
226 err
, errno
, retval
, size
, *magic
);
228 err
= bpf_prog_test_run(prog_fd
, NUM_ITER
, &pkt_v6
, sizeof(pkt_v6
),
229 buf
, &size
, &retval
, &duration
);
230 CHECK(err
|| errno
|| retval
!= 7/*TC_ACT_REDIRECT*/ || size
!= 74 ||
231 *magic
!= MAGIC_VAL
, "ipv6",
232 "err %d errno %d retval %d size %d magic %x\n",
233 err
, errno
, retval
, size
, *magic
);
235 map_fd
= bpf_find_map(__func__
, obj
, "stats");
238 bpf_map_lookup_elem(map_fd
, &stats_key
, stats
);
239 for (i
= 0; i
< nr_cpus
; i
++) {
240 bytes
+= stats
[i
].bytes
;
241 pkts
+= stats
[i
].pkts
;
243 if (bytes
!= MAGIC_BYTES
* NUM_ITER
* 2 || pkts
!= NUM_ITER
* 2) {
245 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes
, pkts
);
248 bpf_object__close(obj
);
251 static void test_l4lb_all(void)
253 const char *file1
= "./test_l4lb.o";
254 const char *file2
= "./test_l4lb_noinline.o";
260 static void test_xdp_noinline(void)
262 const char *file
= "./test_xdp_noinline.o";
263 unsigned int nr_cpus
= bpf_num_possible_cpus();
264 struct vip key
= {.protocol
= 6};
268 } value
= {.vip_num
= VIP_NUM
};
269 __u32 stats_key
= VIP_NUM
;
274 struct real_definition
{
280 } real_def
= {.dst
= MAGIC_VAL
};
281 __u32 ch_key
= 11, real_num
= 3;
282 __u32 duration
, retval
, size
;
283 int err
, i
, prog_fd
, map_fd
;
284 __u64 bytes
= 0, pkts
= 0;
285 struct bpf_object
*obj
;
287 u32
*magic
= (u32
*)buf
;
289 err
= bpf_prog_load(file
, BPF_PROG_TYPE_XDP
, &obj
, &prog_fd
);
295 map_fd
= bpf_find_map(__func__
, obj
, "vip_map");
298 bpf_map_update_elem(map_fd
, &key
, &value
, 0);
300 map_fd
= bpf_find_map(__func__
, obj
, "ch_rings");
303 bpf_map_update_elem(map_fd
, &ch_key
, &real_num
, 0);
305 map_fd
= bpf_find_map(__func__
, obj
, "reals");
308 bpf_map_update_elem(map_fd
, &real_num
, &real_def
, 0);
310 err
= bpf_prog_test_run(prog_fd
, NUM_ITER
, &pkt_v4
, sizeof(pkt_v4
),
311 buf
, &size
, &retval
, &duration
);
312 CHECK(err
|| errno
|| retval
!= 1 || size
!= 54 ||
313 *magic
!= MAGIC_VAL
, "ipv4",
314 "err %d errno %d retval %d size %d magic %x\n",
315 err
, errno
, retval
, size
, *magic
);
317 err
= bpf_prog_test_run(prog_fd
, NUM_ITER
, &pkt_v6
, sizeof(pkt_v6
),
318 buf
, &size
, &retval
, &duration
);
319 CHECK(err
|| errno
|| retval
!= 1 || size
!= 74 ||
320 *magic
!= MAGIC_VAL
, "ipv6",
321 "err %d errno %d retval %d size %d magic %x\n",
322 err
, errno
, retval
, size
, *magic
);
324 map_fd
= bpf_find_map(__func__
, obj
, "stats");
327 bpf_map_lookup_elem(map_fd
, &stats_key
, stats
);
328 for (i
= 0; i
< nr_cpus
; i
++) {
329 bytes
+= stats
[i
].bytes
;
330 pkts
+= stats
[i
].pkts
;
332 if (bytes
!= MAGIC_BYTES
* NUM_ITER
* 2 || pkts
!= NUM_ITER
* 2) {
334 printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes
, pkts
);
337 bpf_object__close(obj
);
340 static void test_tcp_estats(void)
342 const char *file
= "./test_tcp_estats.o";
344 struct bpf_object
*obj
;
347 err
= bpf_prog_load(file
, BPF_PROG_TYPE_TRACEPOINT
, &obj
, &prog_fd
);
348 CHECK(err
, "", "err %d errno %d\n", err
, errno
);
354 bpf_object__close(obj
);
357 static inline __u64
ptr_to_u64(const void *ptr
)
359 return (__u64
) (unsigned long) ptr
;
362 static void test_bpf_obj_id(void)
364 const __u64 array_magic_value
= 0xfaceb00c;
365 const __u32 array_key
= 0;
366 const int nr_iters
= 2;
367 const char *file
= "./test_obj_id.o";
368 const char *jit_sysctl
= "/proc/sys/net/core/bpf_jit_enable";
369 const char *expected_prog_name
= "test_obj_id";
370 const char *expected_map_name
= "test_map_id";
371 const __u64 nsec_per_sec
= 1000000000;
373 struct bpf_object
*objs
[nr_iters
];
374 int prog_fds
[nr_iters
], map_fds
[nr_iters
];
375 /* +1 to test for the info_len returned by kernel */
376 struct bpf_prog_info prog_infos
[nr_iters
+ 1];
377 struct bpf_map_info map_infos
[nr_iters
+ 1];
378 /* Each prog only uses one map. +1 to test nr_map_ids
379 * returned by kernel.
381 __u32 map_ids
[nr_iters
+ 1];
382 char jited_insns
[128], xlated_insns
[128], zeros
[128];
383 __u32 i
, next_id
, info_len
, nr_id_found
, duration
= 0;
384 struct timespec real_time_ts
, boot_time_ts
;
385 int sysctl_fd
, jit_enabled
= 0, err
= 0;
387 uid_t my_uid
= getuid();
388 time_t now
, load_time
;
390 sysctl_fd
= open(jit_sysctl
, 0, O_RDONLY
);
391 if (sysctl_fd
!= -1) {
394 if (read(sysctl_fd
, &tmpc
, sizeof(tmpc
)) == 1)
395 jit_enabled
= (tmpc
!= '0');
399 err
= bpf_prog_get_fd_by_id(0);
400 CHECK(err
>= 0 || errno
!= ENOENT
,
401 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err
, errno
);
403 err
= bpf_map_get_fd_by_id(0);
404 CHECK(err
>= 0 || errno
!= ENOENT
,
405 "get-fd-by-notexist-map-id", "err %d errno %d\n", err
, errno
);
407 for (i
= 0; i
< nr_iters
; i
++)
410 /* Check bpf_obj_get_info_by_fd() */
411 bzero(zeros
, sizeof(zeros
));
412 for (i
= 0; i
< nr_iters
; i
++) {
414 err
= bpf_prog_load(file
, BPF_PROG_TYPE_SOCKET_FILTER
,
415 &objs
[i
], &prog_fds
[i
]);
416 /* test_obj_id.o is a dumb prog. It should never fail
423 /* Insert a magic value to the map */
424 map_fds
[i
] = bpf_find_map(__func__
, objs
[i
], "test_map_id");
425 assert(map_fds
[i
] >= 0);
426 err
= bpf_map_update_elem(map_fds
[i
], &array_key
,
427 &array_magic_value
, 0);
430 /* Check getting map info */
431 info_len
= sizeof(struct bpf_map_info
) * 2;
432 bzero(&map_infos
[i
], info_len
);
433 err
= bpf_obj_get_info_by_fd(map_fds
[i
], &map_infos
[i
],
436 map_infos
[i
].type
!= BPF_MAP_TYPE_ARRAY
||
437 map_infos
[i
].key_size
!= sizeof(__u32
) ||
438 map_infos
[i
].value_size
!= sizeof(__u64
) ||
439 map_infos
[i
].max_entries
!= 1 ||
440 map_infos
[i
].map_flags
!= 0 ||
441 info_len
!= sizeof(struct bpf_map_info
) ||
442 strcmp((char *)map_infos
[i
].name
, expected_map_name
),
444 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
446 map_infos
[i
].type
, BPF_MAP_TYPE_ARRAY
,
447 info_len
, sizeof(struct bpf_map_info
),
448 map_infos
[i
].key_size
,
449 map_infos
[i
].value_size
,
450 map_infos
[i
].max_entries
,
451 map_infos
[i
].map_flags
,
452 map_infos
[i
].name
, expected_map_name
))
455 /* Check getting prog info */
456 info_len
= sizeof(struct bpf_prog_info
) * 2;
457 bzero(&prog_infos
[i
], info_len
);
458 bzero(jited_insns
, sizeof(jited_insns
));
459 bzero(xlated_insns
, sizeof(xlated_insns
));
460 prog_infos
[i
].jited_prog_insns
= ptr_to_u64(jited_insns
);
461 prog_infos
[i
].jited_prog_len
= sizeof(jited_insns
);
462 prog_infos
[i
].xlated_prog_insns
= ptr_to_u64(xlated_insns
);
463 prog_infos
[i
].xlated_prog_len
= sizeof(xlated_insns
);
464 prog_infos
[i
].map_ids
= ptr_to_u64(map_ids
+ i
);
465 prog_infos
[i
].nr_map_ids
= 2;
466 err
= clock_gettime(CLOCK_REALTIME
, &real_time_ts
);
468 err
= clock_gettime(CLOCK_BOOTTIME
, &boot_time_ts
);
470 err
= bpf_obj_get_info_by_fd(prog_fds
[i
], &prog_infos
[i
],
472 load_time
= (real_time_ts
.tv_sec
- boot_time_ts
.tv_sec
)
473 + (prog_infos
[i
].load_time
/ nsec_per_sec
);
475 prog_infos
[i
].type
!= BPF_PROG_TYPE_SOCKET_FILTER
||
476 info_len
!= sizeof(struct bpf_prog_info
) ||
477 (jit_enabled
&& !prog_infos
[i
].jited_prog_len
) ||
479 !memcmp(jited_insns
, zeros
, sizeof(zeros
))) ||
480 !prog_infos
[i
].xlated_prog_len
||
481 !memcmp(xlated_insns
, zeros
, sizeof(zeros
)) ||
482 load_time
< now
- 60 || load_time
> now
+ 60 ||
483 prog_infos
[i
].created_by_uid
!= my_uid
||
484 prog_infos
[i
].nr_map_ids
!= 1 ||
485 *(int *)prog_infos
[i
].map_ids
!= map_infos
[i
].id
||
486 strcmp((char *)prog_infos
[i
].name
, expected_prog_name
),
488 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
490 prog_infos
[i
].type
, BPF_PROG_TYPE_SOCKET_FILTER
,
491 info_len
, sizeof(struct bpf_prog_info
),
493 prog_infos
[i
].jited_prog_len
,
494 prog_infos
[i
].xlated_prog_len
,
495 !!memcmp(jited_insns
, zeros
, sizeof(zeros
)),
496 !!memcmp(xlated_insns
, zeros
, sizeof(zeros
)),
498 prog_infos
[i
].created_by_uid
, my_uid
,
499 prog_infos
[i
].nr_map_ids
, 1,
500 *(int *)prog_infos
[i
].map_ids
, map_infos
[i
].id
,
501 prog_infos
[i
].name
, expected_prog_name
))
505 /* Check bpf_prog_get_next_id() */
508 while (!bpf_prog_get_next_id(next_id
, &next_id
)) {
509 struct bpf_prog_info prog_info
= {};
513 info_len
= sizeof(prog_info
);
515 prog_fd
= bpf_prog_get_fd_by_id(next_id
);
516 if (prog_fd
< 0 && errno
== ENOENT
)
517 /* The bpf_prog is in the dead row */
519 if (CHECK(prog_fd
< 0, "get-prog-fd(next_id)",
520 "prog_fd %d next_id %d errno %d\n",
521 prog_fd
, next_id
, errno
))
524 for (i
= 0; i
< nr_iters
; i
++)
525 if (prog_infos
[i
].id
== next_id
)
534 * prog_info.nr_map_ids = 1
535 * prog_info.map_ids = NULL
537 prog_info
.nr_map_ids
= 1;
538 err
= bpf_obj_get_info_by_fd(prog_fd
, &prog_info
, &info_len
);
539 if (CHECK(!err
|| errno
!= EFAULT
,
540 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
543 bzero(&prog_info
, sizeof(prog_info
));
544 info_len
= sizeof(prog_info
);
546 saved_map_id
= *(int *)(prog_infos
[i
].map_ids
);
547 prog_info
.map_ids
= prog_infos
[i
].map_ids
;
548 prog_info
.nr_map_ids
= 2;
549 err
= bpf_obj_get_info_by_fd(prog_fd
, &prog_info
, &info_len
);
550 prog_infos
[i
].jited_prog_insns
= 0;
551 prog_infos
[i
].xlated_prog_insns
= 0;
552 CHECK(err
|| info_len
!= sizeof(struct bpf_prog_info
) ||
553 memcmp(&prog_info
, &prog_infos
[i
], info_len
) ||
554 *(int *)prog_info
.map_ids
!= saved_map_id
,
555 "get-prog-info(next_id->fd)",
556 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
557 err
, errno
, info_len
, sizeof(struct bpf_prog_info
),
558 memcmp(&prog_info
, &prog_infos
[i
], info_len
),
559 *(int *)prog_info
.map_ids
, saved_map_id
);
562 CHECK(nr_id_found
!= nr_iters
,
563 "check total prog id found by get_next_id",
564 "nr_id_found %u(%u)\n",
565 nr_id_found
, nr_iters
);
567 /* Check bpf_map_get_next_id() */
570 while (!bpf_map_get_next_id(next_id
, &next_id
)) {
571 struct bpf_map_info map_info
= {};
574 info_len
= sizeof(map_info
);
576 map_fd
= bpf_map_get_fd_by_id(next_id
);
577 if (map_fd
< 0 && errno
== ENOENT
)
578 /* The bpf_map is in the dead row */
580 if (CHECK(map_fd
< 0, "get-map-fd(next_id)",
581 "map_fd %d next_id %u errno %d\n",
582 map_fd
, next_id
, errno
))
585 for (i
= 0; i
< nr_iters
; i
++)
586 if (map_infos
[i
].id
== next_id
)
594 err
= bpf_map_lookup_elem(map_fd
, &array_key
, &array_value
);
597 err
= bpf_obj_get_info_by_fd(map_fd
, &map_info
, &info_len
);
598 CHECK(err
|| info_len
!= sizeof(struct bpf_map_info
) ||
599 memcmp(&map_info
, &map_infos
[i
], info_len
) ||
600 array_value
!= array_magic_value
,
601 "check get-map-info(next_id->fd)",
602 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
603 err
, errno
, info_len
, sizeof(struct bpf_map_info
),
604 memcmp(&map_info
, &map_infos
[i
], info_len
),
605 array_value
, array_magic_value
);
609 CHECK(nr_id_found
!= nr_iters
,
610 "check total map id found by get_next_id",
611 "nr_id_found %u(%u)\n",
612 nr_id_found
, nr_iters
);
615 for (i
= 0; i
< nr_iters
; i
++)
616 bpf_object__close(objs
[i
]);
619 static void test_pkt_md_access(void)
621 const char *file
= "./test_pkt_md_access.o";
622 struct bpf_object
*obj
;
623 __u32 duration
, retval
;
626 err
= bpf_prog_load(file
, BPF_PROG_TYPE_SCHED_CLS
, &obj
, &prog_fd
);
632 err
= bpf_prog_test_run(prog_fd
, 10, &pkt_v4
, sizeof(pkt_v4
),
633 NULL
, NULL
, &retval
, &duration
);
634 CHECK(err
|| retval
, "",
635 "err %d errno %d retval %d duration %d\n",
636 err
, errno
, retval
, duration
);
638 bpf_object__close(obj
);
641 static void test_obj_name(void)
649 { "_123456789ABCDE", 1, 0 },
650 { "_123456789ABCDEF", 0, EINVAL
},
651 { "_123456789ABCD\n", 0, EINVAL
},
653 struct bpf_insn prog
[] = {
654 BPF_ALU64_IMM(BPF_MOV
, BPF_REG_0
, 0),
660 for (i
= 0; i
< sizeof(tests
) / sizeof(tests
[0]); i
++) {
661 size_t name_len
= strlen(tests
[i
].name
) + 1;
666 /* test different attr.prog_name during BPF_PROG_LOAD */
667 ncopy
= name_len
< sizeof(attr
.prog_name
) ?
668 name_len
: sizeof(attr
.prog_name
);
669 bzero(&attr
, sizeof(attr
));
670 attr
.prog_type
= BPF_PROG_TYPE_SCHED_CLS
;
672 attr
.insns
= ptr_to_u64(prog
);
673 attr
.license
= ptr_to_u64("");
674 memcpy(attr
.prog_name
, tests
[i
].name
, ncopy
);
676 fd
= syscall(__NR_bpf
, BPF_PROG_LOAD
, &attr
, sizeof(attr
));
677 CHECK((tests
[i
].success
&& fd
< 0) ||
678 (!tests
[i
].success
&& fd
!= -1) ||
679 (!tests
[i
].success
&& errno
!= tests
[i
].expected_errno
),
680 "check-bpf-prog-name",
681 "fd %d(%d) errno %d(%d)\n",
682 fd
, tests
[i
].success
, errno
, tests
[i
].expected_errno
);
687 /* test different attr.map_name during BPF_MAP_CREATE */
688 ncopy
= name_len
< sizeof(attr
.map_name
) ?
689 name_len
: sizeof(attr
.map_name
);
690 bzero(&attr
, sizeof(attr
));
691 attr
.map_type
= BPF_MAP_TYPE_ARRAY
;
694 attr
.max_entries
= 1;
696 memcpy(attr
.map_name
, tests
[i
].name
, ncopy
);
697 fd
= syscall(__NR_bpf
, BPF_MAP_CREATE
, &attr
, sizeof(attr
));
698 CHECK((tests
[i
].success
&& fd
< 0) ||
699 (!tests
[i
].success
&& fd
!= -1) ||
700 (!tests
[i
].success
&& errno
!= tests
[i
].expected_errno
),
701 "check-bpf-map-name",
702 "fd %d(%d) errno %d(%d)\n",
703 fd
, tests
[i
].success
, errno
, tests
[i
].expected_errno
);
710 static void test_tp_attach_query(void)
712 const int num_progs
= 3;
713 int i
, j
, bytes
, efd
, err
, prog_fd
[num_progs
], pmu_fd
[num_progs
];
714 __u32 duration
= 0, info_len
, saved_prog_ids
[num_progs
];
715 const char *file
= "./test_tracepoint.o";
716 struct perf_event_query_bpf
*query
;
717 struct perf_event_attr attr
= {};
718 struct bpf_object
*obj
[num_progs
];
719 struct bpf_prog_info prog_info
;
722 snprintf(buf
, sizeof(buf
),
723 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
724 efd
= open(buf
, O_RDONLY
, 0);
725 if (CHECK(efd
< 0, "open", "err %d errno %d\n", efd
, errno
))
727 bytes
= read(efd
, buf
, sizeof(buf
));
729 if (CHECK(bytes
<= 0 || bytes
>= sizeof(buf
),
730 "read", "bytes %d errno %d\n", bytes
, errno
))
733 attr
.config
= strtol(buf
, NULL
, 0);
734 attr
.type
= PERF_TYPE_TRACEPOINT
;
735 attr
.sample_type
= PERF_SAMPLE_RAW
| PERF_SAMPLE_CALLCHAIN
;
736 attr
.sample_period
= 1;
737 attr
.wakeup_events
= 1;
739 query
= malloc(sizeof(*query
) + sizeof(__u32
) * num_progs
);
740 for (i
= 0; i
< num_progs
; i
++) {
741 err
= bpf_prog_load(file
, BPF_PROG_TYPE_TRACEPOINT
, &obj
[i
],
743 if (CHECK(err
, "prog_load", "err %d errno %d\n", err
, errno
))
746 bzero(&prog_info
, sizeof(prog_info
));
747 prog_info
.jited_prog_len
= 0;
748 prog_info
.xlated_prog_len
= 0;
749 prog_info
.nr_map_ids
= 0;
750 info_len
= sizeof(prog_info
);
751 err
= bpf_obj_get_info_by_fd(prog_fd
[i
], &prog_info
, &info_len
);
752 if (CHECK(err
, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
755 saved_prog_ids
[i
] = prog_info
.id
;
757 pmu_fd
[i
] = syscall(__NR_perf_event_open
, &attr
, -1 /* pid */,
758 0 /* cpu 0 */, -1 /* group id */,
760 if (CHECK(pmu_fd
[i
] < 0, "perf_event_open", "err %d errno %d\n",
763 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_ENABLE
, 0);
764 if (CHECK(err
, "perf_event_ioc_enable", "err %d errno %d\n",
769 /* check NULL prog array query */
770 query
->ids_len
= num_progs
;
771 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
, query
);
772 if (CHECK(err
|| query
->prog_cnt
!= 0,
773 "perf_event_ioc_query_bpf",
774 "err %d errno %d query->prog_cnt %u\n",
775 err
, errno
, query
->prog_cnt
))
779 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_SET_BPF
, prog_fd
[i
]);
780 if (CHECK(err
, "perf_event_ioc_set_bpf", "err %d errno %d\n",
785 /* try to get # of programs only */
787 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
, query
);
788 if (CHECK(err
|| query
->prog_cnt
!= 2,
789 "perf_event_ioc_query_bpf",
790 "err %d errno %d query->prog_cnt %u\n",
791 err
, errno
, query
->prog_cnt
))
794 /* try a few negative tests */
795 /* invalid query pointer */
796 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
,
797 (struct perf_event_query_bpf
*)0x1);
798 if (CHECK(!err
|| errno
!= EFAULT
,
799 "perf_event_ioc_query_bpf",
800 "err %d errno %d\n", err
, errno
))
803 /* no enough space */
805 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
, query
);
806 if (CHECK(!err
|| errno
!= ENOSPC
|| query
->prog_cnt
!= 2,
807 "perf_event_ioc_query_bpf",
808 "err %d errno %d query->prog_cnt %u\n",
809 err
, errno
, query
->prog_cnt
))
813 query
->ids_len
= num_progs
;
814 err
= ioctl(pmu_fd
[i
], PERF_EVENT_IOC_QUERY_BPF
, query
);
815 if (CHECK(err
|| query
->prog_cnt
!= (i
+ 1),
816 "perf_event_ioc_query_bpf",
817 "err %d errno %d query->prog_cnt %u\n",
818 err
, errno
, query
->prog_cnt
))
820 for (j
= 0; j
< i
+ 1; j
++)
821 if (CHECK(saved_prog_ids
[j
] != query
->ids
[j
],
822 "perf_event_ioc_query_bpf",
823 "#%d saved_prog_id %x query prog_id %x\n",
824 j
, saved_prog_ids
[j
], query
->ids
[j
]))
829 for (; i
>= 0; i
--) {
831 ioctl(pmu_fd
[i
], PERF_EVENT_IOC_DISABLE
);
835 bpf_object__close(obj
[i
]);
840 static int compare_map_keys(int map1_fd
, int map2_fd
)
843 char val_buf
[PERF_MAX_STACK_DEPTH
* sizeof(__u64
)];
846 err
= bpf_map_get_next_key(map1_fd
, NULL
, &key
);
849 err
= bpf_map_lookup_elem(map2_fd
, &key
, val_buf
);
853 while (bpf_map_get_next_key(map1_fd
, &key
, &next_key
) == 0) {
854 err
= bpf_map_lookup_elem(map2_fd
, &next_key
, val_buf
);
866 static void test_stacktrace_map()
868 int control_map_fd
, stackid_hmap_fd
, stackmap_fd
;
869 const char *file
= "./test_stacktrace_map.o";
870 int bytes
, efd
, err
, pmu_fd
, prog_fd
;
871 struct perf_event_attr attr
= {};
872 __u32 key
, val
, duration
= 0;
873 struct bpf_object
*obj
;
876 err
= bpf_prog_load(file
, BPF_PROG_TYPE_TRACEPOINT
, &obj
, &prog_fd
);
877 if (CHECK(err
, "prog_load", "err %d errno %d\n", err
, errno
))
880 /* Get the ID for the sched/sched_switch tracepoint */
881 snprintf(buf
, sizeof(buf
),
882 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
883 efd
= open(buf
, O_RDONLY
, 0);
884 if (CHECK(efd
< 0, "open", "err %d errno %d\n", efd
, errno
))
887 bytes
= read(efd
, buf
, sizeof(buf
));
889 if (CHECK(bytes
<= 0 || bytes
>= sizeof(buf
),
890 "read", "bytes %d errno %d\n", bytes
, errno
))
893 /* Open the perf event and attach bpf progrram */
894 attr
.config
= strtol(buf
, NULL
, 0);
895 attr
.type
= PERF_TYPE_TRACEPOINT
;
896 attr
.sample_type
= PERF_SAMPLE_RAW
| PERF_SAMPLE_CALLCHAIN
;
897 attr
.sample_period
= 1;
898 attr
.wakeup_events
= 1;
899 pmu_fd
= syscall(__NR_perf_event_open
, &attr
, -1 /* pid */,
900 0 /* cpu 0 */, -1 /* group id */,
902 if (CHECK(pmu_fd
< 0, "perf_event_open", "err %d errno %d\n",
906 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
, 0);
907 if (CHECK(err
, "perf_event_ioc_enable", "err %d errno %d\n",
911 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_SET_BPF
, prog_fd
);
912 if (CHECK(err
, "perf_event_ioc_set_bpf", "err %d errno %d\n",
917 control_map_fd
= bpf_find_map(__func__
, obj
, "control_map");
918 if (CHECK(control_map_fd
< 0, "bpf_find_map control_map",
919 "err %d errno %d\n", err
, errno
))
922 stackid_hmap_fd
= bpf_find_map(__func__
, obj
, "stackid_hmap");
923 if (CHECK(stackid_hmap_fd
< 0, "bpf_find_map stackid_hmap",
924 "err %d errno %d\n", err
, errno
))
927 stackmap_fd
= bpf_find_map(__func__
, obj
, "stackmap");
928 if (CHECK(stackmap_fd
< 0, "bpf_find_map stackmap", "err %d errno %d\n",
932 /* give some time for bpf program run */
935 /* disable stack trace collection */
938 bpf_map_update_elem(control_map_fd
, &key
, &val
, 0);
940 /* for every element in stackid_hmap, we can find a corresponding one
941 * in stackmap, and vise versa.
943 err
= compare_map_keys(stackid_hmap_fd
, stackmap_fd
);
944 if (CHECK(err
, "compare_map_keys stackid_hmap vs. stackmap",
945 "err %d errno %d\n", err
, errno
))
948 err
= compare_map_keys(stackmap_fd
, stackid_hmap_fd
);
949 if (CHECK(err
, "compare_map_keys stackmap vs. stackid_hmap",
950 "err %d errno %d\n", err
, errno
))
954 ioctl(pmu_fd
, PERF_EVENT_IOC_DISABLE
);
960 bpf_object__close(obj
);
968 struct rlimit rinf
= { RLIM_INFINITY
, RLIM_INFINITY
};
970 setrlimit(RLIMIT_MEMLOCK
, &rinf
);
978 test_pkt_md_access();
980 test_tp_attach_query();
981 test_stacktrace_map();
983 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt
, error_cnt
);
984 return error_cnt
? EXIT_FAILURE
: EXIT_SUCCESS
;