1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include "bpf_iter_ipv6_route.skel.h"
5 #include "bpf_iter_netlink.skel.h"
6 #include "bpf_iter_bpf_map.skel.h"
7 #include "bpf_iter_task.skel.h"
8 #include "bpf_iter_task_stack.skel.h"
9 #include "bpf_iter_task_file.skel.h"
10 #include "bpf_iter_task_btf.skel.h"
11 #include "bpf_iter_tcp4.skel.h"
12 #include "bpf_iter_tcp6.skel.h"
13 #include "bpf_iter_udp4.skel.h"
14 #include "bpf_iter_udp6.skel.h"
15 #include "bpf_iter_test_kern1.skel.h"
16 #include "bpf_iter_test_kern2.skel.h"
17 #include "bpf_iter_test_kern3.skel.h"
18 #include "bpf_iter_test_kern4.skel.h"
19 #include "bpf_iter_bpf_hash_map.skel.h"
20 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
21 #include "bpf_iter_bpf_array_map.skel.h"
22 #include "bpf_iter_bpf_percpu_array_map.skel.h"
23 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
24 #include "bpf_iter_bpf_sk_storage_map.skel.h"
25 #include "bpf_iter_test_kern5.skel.h"
26 #include "bpf_iter_test_kern6.skel.h"
30 static void test_btf_id_or_null(void)
32 struct bpf_iter_test_kern3
*skel
;
34 skel
= bpf_iter_test_kern3__open_and_load();
35 if (CHECK(skel
, "bpf_iter_test_kern3__open_and_load",
36 "skeleton open_and_load unexpectedly succeeded\n")) {
37 bpf_iter_test_kern3__destroy(skel
);
42 static void do_dummy_read(struct bpf_program
*prog
)
44 struct bpf_link
*link
;
48 link
= bpf_program__attach_iter(prog
, NULL
);
49 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
52 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
53 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
56 /* not check contents, but ensure read() ends without error */
57 while ((len
= read(iter_fd
, buf
, sizeof(buf
))) > 0)
59 CHECK(len
< 0, "read", "read failed: %s\n", strerror(errno
));
64 bpf_link__destroy(link
);
67 static void test_ipv6_route(void)
69 struct bpf_iter_ipv6_route
*skel
;
71 skel
= bpf_iter_ipv6_route__open_and_load();
72 if (CHECK(!skel
, "bpf_iter_ipv6_route__open_and_load",
73 "skeleton open_and_load failed\n"))
76 do_dummy_read(skel
->progs
.dump_ipv6_route
);
78 bpf_iter_ipv6_route__destroy(skel
);
81 static void test_netlink(void)
83 struct bpf_iter_netlink
*skel
;
85 skel
= bpf_iter_netlink__open_and_load();
86 if (CHECK(!skel
, "bpf_iter_netlink__open_and_load",
87 "skeleton open_and_load failed\n"))
90 do_dummy_read(skel
->progs
.dump_netlink
);
92 bpf_iter_netlink__destroy(skel
);
95 static void test_bpf_map(void)
97 struct bpf_iter_bpf_map
*skel
;
99 skel
= bpf_iter_bpf_map__open_and_load();
100 if (CHECK(!skel
, "bpf_iter_bpf_map__open_and_load",
101 "skeleton open_and_load failed\n"))
104 do_dummy_read(skel
->progs
.dump_bpf_map
);
106 bpf_iter_bpf_map__destroy(skel
);
109 static void test_task(void)
111 struct bpf_iter_task
*skel
;
113 skel
= bpf_iter_task__open_and_load();
114 if (CHECK(!skel
, "bpf_iter_task__open_and_load",
115 "skeleton open_and_load failed\n"))
118 do_dummy_read(skel
->progs
.dump_task
);
120 bpf_iter_task__destroy(skel
);
123 static void test_task_stack(void)
125 struct bpf_iter_task_stack
*skel
;
127 skel
= bpf_iter_task_stack__open_and_load();
128 if (CHECK(!skel
, "bpf_iter_task_stack__open_and_load",
129 "skeleton open_and_load failed\n"))
132 do_dummy_read(skel
->progs
.dump_task_stack
);
134 bpf_iter_task_stack__destroy(skel
);
137 static void *do_nothing(void *arg
)
142 static void test_task_file(void)
144 struct bpf_iter_task_file
*skel
;
148 skel
= bpf_iter_task_file__open_and_load();
149 if (CHECK(!skel
, "bpf_iter_task_file__open_and_load",
150 "skeleton open_and_load failed\n"))
153 skel
->bss
->tgid
= getpid();
155 if (CHECK(pthread_create(&thread_id
, NULL
, &do_nothing
, NULL
),
156 "pthread_create", "pthread_create failed\n"))
159 do_dummy_read(skel
->progs
.dump_task_file
);
161 if (CHECK(pthread_join(thread_id
, &ret
) || ret
!= NULL
,
162 "pthread_join", "pthread_join failed\n"))
165 CHECK(skel
->bss
->count
!= 0, "check_count",
166 "invalid non pthread file visit count %d\n", skel
->bss
->count
);
169 bpf_iter_task_file__destroy(skel
);
172 #define TASKBUFSZ 32768
174 static char taskbuf
[TASKBUFSZ
];
176 static int do_btf_read(struct bpf_iter_task_btf
*skel
)
178 struct bpf_program
*prog
= skel
->progs
.dump_task_struct
;
179 struct bpf_iter_task_btf__bss
*bss
= skel
->bss
;
180 int iter_fd
= -1, len
= 0, bufleft
= TASKBUFSZ
;
181 struct bpf_link
*link
;
185 link
= bpf_program__attach_iter(prog
, NULL
);
186 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
189 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
190 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
194 len
= read(iter_fd
, buf
, bufleft
);
202 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__
);
208 if (CHECK(len
< 0, "read", "read failed: %s\n", strerror(errno
)))
211 CHECK(strstr(taskbuf
, "(struct task_struct)") == NULL
,
212 "check for btf representation of task_struct in iter data",
213 "struct task_struct not found");
217 bpf_link__destroy(link
);
221 static void test_task_btf(void)
223 struct bpf_iter_task_btf__bss
*bss
;
224 struct bpf_iter_task_btf
*skel
;
227 skel
= bpf_iter_task_btf__open_and_load();
228 if (CHECK(!skel
, "bpf_iter_task_btf__open_and_load",
229 "skeleton open_and_load failed\n"))
234 ret
= do_btf_read(skel
);
238 if (CHECK(bss
->tasks
== 0, "check if iterated over tasks",
239 "no task iteration, did BPF program run?\n"))
242 CHECK(bss
->seq_err
!= 0, "check for unexpected err",
243 "bpf_seq_printf_btf returned %ld", bss
->seq_err
);
246 bpf_iter_task_btf__destroy(skel
);
249 static void test_tcp4(void)
251 struct bpf_iter_tcp4
*skel
;
253 skel
= bpf_iter_tcp4__open_and_load();
254 if (CHECK(!skel
, "bpf_iter_tcp4__open_and_load",
255 "skeleton open_and_load failed\n"))
258 do_dummy_read(skel
->progs
.dump_tcp4
);
260 bpf_iter_tcp4__destroy(skel
);
263 static void test_tcp6(void)
265 struct bpf_iter_tcp6
*skel
;
267 skel
= bpf_iter_tcp6__open_and_load();
268 if (CHECK(!skel
, "bpf_iter_tcp6__open_and_load",
269 "skeleton open_and_load failed\n"))
272 do_dummy_read(skel
->progs
.dump_tcp6
);
274 bpf_iter_tcp6__destroy(skel
);
277 static void test_udp4(void)
279 struct bpf_iter_udp4
*skel
;
281 skel
= bpf_iter_udp4__open_and_load();
282 if (CHECK(!skel
, "bpf_iter_udp4__open_and_load",
283 "skeleton open_and_load failed\n"))
286 do_dummy_read(skel
->progs
.dump_udp4
);
288 bpf_iter_udp4__destroy(skel
);
291 static void test_udp6(void)
293 struct bpf_iter_udp6
*skel
;
295 skel
= bpf_iter_udp6__open_and_load();
296 if (CHECK(!skel
, "bpf_iter_udp6__open_and_load",
297 "skeleton open_and_load failed\n"))
300 do_dummy_read(skel
->progs
.dump_udp6
);
302 bpf_iter_udp6__destroy(skel
);
305 /* The expected string is less than 16 bytes */
306 static int do_read_with_fd(int iter_fd
, const char *expected
,
309 int err
= -1, len
, read_buf_len
, start
;
312 read_buf_len
= read_one_char
? 1 : 16;
314 while ((len
= read(iter_fd
, buf
+ start
, read_buf_len
)) > 0) {
316 if (CHECK(start
>= 16, "read", "read len %d\n", len
))
318 read_buf_len
= read_one_char
? 1 : 16 - start
;
320 if (CHECK(len
< 0, "read", "read failed: %s\n", strerror(errno
)))
323 err
= strcmp(buf
, expected
);
324 if (CHECK(err
, "read", "incorrect read result: buf %s, expected %s\n",
331 static void test_anon_iter(bool read_one_char
)
333 struct bpf_iter_test_kern1
*skel
;
334 struct bpf_link
*link
;
337 skel
= bpf_iter_test_kern1__open_and_load();
338 if (CHECK(!skel
, "bpf_iter_test_kern1__open_and_load",
339 "skeleton open_and_load failed\n"))
342 err
= bpf_iter_test_kern1__attach(skel
);
343 if (CHECK(err
, "bpf_iter_test_kern1__attach",
344 "skeleton attach failed\n")) {
348 link
= skel
->links
.dump_task
;
349 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
350 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
353 do_read_with_fd(iter_fd
, "abcd", read_one_char
);
357 bpf_iter_test_kern1__destroy(skel
);
360 static int do_read(const char *path
, const char *expected
)
364 iter_fd
= open(path
, O_RDONLY
);
365 if (CHECK(iter_fd
< 0, "open", "open %s failed: %s\n",
366 path
, strerror(errno
)))
369 err
= do_read_with_fd(iter_fd
, expected
, false);
374 static void test_file_iter(void)
376 const char *path
= "/sys/fs/bpf/bpf_iter_test1";
377 struct bpf_iter_test_kern1
*skel1
;
378 struct bpf_iter_test_kern2
*skel2
;
379 struct bpf_link
*link
;
382 skel1
= bpf_iter_test_kern1__open_and_load();
383 if (CHECK(!skel1
, "bpf_iter_test_kern1__open_and_load",
384 "skeleton open_and_load failed\n"))
387 link
= bpf_program__attach_iter(skel1
->progs
.dump_task
, NULL
);
388 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
391 /* unlink this path if it exists. */
394 err
= bpf_link__pin(link
, path
);
395 if (CHECK(err
, "pin_iter", "pin_iter to %s failed: %d\n", path
, err
))
398 err
= do_read(path
, "abcd");
402 /* file based iterator seems working fine. Let us a link update
403 * of the underlying link and `cat` the iterator again, its content
406 skel2
= bpf_iter_test_kern2__open_and_load();
407 if (CHECK(!skel2
, "bpf_iter_test_kern2__open_and_load",
408 "skeleton open_and_load failed\n"))
411 err
= bpf_link__update_program(link
, skel2
->progs
.dump_task
);
412 if (CHECK(err
, "update_prog", "update_prog failed\n"))
415 do_read(path
, "ABCD");
418 bpf_iter_test_kern2__destroy(skel2
);
422 bpf_link__destroy(link
);
424 bpf_iter_test_kern1__destroy(skel1
);
427 static void test_overflow(bool test_e2big_overflow
, bool ret1
)
429 __u32 map_info_len
, total_read_len
, expected_read_len
;
430 int err
, iter_fd
, map1_fd
, map2_fd
, len
;
431 struct bpf_map_info map_info
= {};
432 struct bpf_iter_test_kern4
*skel
;
433 struct bpf_link
*link
;
437 skel
= bpf_iter_test_kern4__open();
438 if (CHECK(!skel
, "bpf_iter_test_kern4__open",
439 "skeleton open failed\n"))
442 /* create two maps: bpf program will only do bpf_seq_write
443 * for these two maps. The goal is one map output almost
444 * fills seq_file buffer and then the other will trigger
445 * overflow and needs restart.
447 map1_fd
= bpf_create_map(BPF_MAP_TYPE_ARRAY
, 4, 8, 1, 0);
448 if (CHECK(map1_fd
< 0, "bpf_create_map",
449 "map_creation failed: %s\n", strerror(errno
)))
451 map2_fd
= bpf_create_map(BPF_MAP_TYPE_ARRAY
, 4, 8, 1, 0);
452 if (CHECK(map2_fd
< 0, "bpf_create_map",
453 "map_creation failed: %s\n", strerror(errno
)))
456 /* bpf_seq_printf kernel buffer is 8 pages, so one map
457 * bpf_seq_write will mostly fill it, and the other map
458 * will partially fill and then trigger overflow and need
459 * bpf_seq_read restart.
461 iter_size
= sysconf(_SC_PAGE_SIZE
) << 3;
463 if (test_e2big_overflow
) {
464 skel
->rodata
->print_len
= (iter_size
+ 8) / 8;
465 expected_read_len
= 2 * (iter_size
+ 8);
467 skel
->rodata
->print_len
= (iter_size
- 8) / 8;
468 expected_read_len
= 2 * (iter_size
- 8);
470 skel
->rodata
->print_len
= 1;
471 expected_read_len
= 2 * 8;
473 skel
->rodata
->ret1
= ret1
;
475 if (CHECK(bpf_iter_test_kern4__load(skel
),
476 "bpf_iter_test_kern4__load", "skeleton load failed\n"))
479 /* setup filtering map_id in bpf program */
480 map_info_len
= sizeof(map_info
);
481 err
= bpf_obj_get_info_by_fd(map1_fd
, &map_info
, &map_info_len
);
482 if (CHECK(err
, "get_map_info", "get map info failed: %s\n",
485 skel
->bss
->map1_id
= map_info
.id
;
487 err
= bpf_obj_get_info_by_fd(map2_fd
, &map_info
, &map_info_len
);
488 if (CHECK(err
, "get_map_info", "get map info failed: %s\n",
491 skel
->bss
->map2_id
= map_info
.id
;
493 link
= bpf_program__attach_iter(skel
->progs
.dump_bpf_map
, NULL
);
494 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
497 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
498 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
501 buf
= malloc(expected_read_len
);
507 if (test_e2big_overflow
) {
508 while ((len
= read(iter_fd
, buf
, expected_read_len
)) > 0)
509 total_read_len
+= len
;
511 CHECK(len
!= -1 || errno
!= E2BIG
, "read",
512 "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
513 len
, strerror(errno
));
516 while ((len
= read(iter_fd
, buf
, expected_read_len
)) > 0)
517 total_read_len
+= len
;
519 if (CHECK(len
< 0, "read", "read failed: %s\n",
524 len
= read(iter_fd
, buf
, expected_read_len
);
526 total_read_len
+= len
;
527 } while (len
> 0 || len
== -EAGAIN
);
529 if (CHECK(len
< 0, "read", "read failed: %s\n",
534 if (CHECK(total_read_len
!= expected_read_len
, "read",
535 "total len %u, expected len %u\n", total_read_len
,
539 if (CHECK(skel
->bss
->map1_accessed
!= 1, "map1_accessed",
540 "expected 1 actual %d\n", skel
->bss
->map1_accessed
))
543 if (CHECK(skel
->bss
->map2_accessed
!= 2, "map2_accessed",
544 "expected 2 actual %d\n", skel
->bss
->map2_accessed
))
547 CHECK(skel
->bss
->map2_seqnum1
!= skel
->bss
->map2_seqnum2
,
548 "map2_seqnum", "two different seqnum %lld %lld\n",
549 skel
->bss
->map2_seqnum1
, skel
->bss
->map2_seqnum2
);
556 bpf_link__destroy(link
);
562 bpf_iter_test_kern4__destroy(skel
);
565 static void test_bpf_hash_map(void)
567 __u32 expected_key_a
= 0, expected_key_b
= 0, expected_key_c
= 0;
568 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts
, opts
);
569 struct bpf_iter_bpf_hash_map
*skel
;
570 int err
, i
, len
, map_fd
, iter_fd
;
571 union bpf_iter_link_info linfo
;
572 __u64 val
, expected_val
= 0;
573 struct bpf_link
*link
;
581 skel
= bpf_iter_bpf_hash_map__open();
582 if (CHECK(!skel
, "bpf_iter_bpf_hash_map__open",
583 "skeleton open failed\n"))
586 skel
->bss
->in_test_mode
= true;
588 err
= bpf_iter_bpf_hash_map__load(skel
);
589 if (CHECK(!skel
, "bpf_iter_bpf_hash_map__load",
590 "skeleton load failed\n"))
593 /* iterator with hashmap2 and hashmap3 should fail */
594 memset(&linfo
, 0, sizeof(linfo
));
595 linfo
.map
.map_fd
= bpf_map__fd(skel
->maps
.hashmap2
);
596 opts
.link_info
= &linfo
;
597 opts
.link_info_len
= sizeof(linfo
);
598 link
= bpf_program__attach_iter(skel
->progs
.dump_bpf_hash_map
, &opts
);
599 if (CHECK(!IS_ERR(link
), "attach_iter",
600 "attach_iter for hashmap2 unexpected succeeded\n"))
603 linfo
.map
.map_fd
= bpf_map__fd(skel
->maps
.hashmap3
);
604 link
= bpf_program__attach_iter(skel
->progs
.dump_bpf_hash_map
, &opts
);
605 if (CHECK(!IS_ERR(link
), "attach_iter",
606 "attach_iter for hashmap3 unexpected succeeded\n"))
609 /* hashmap1 should be good, update map values here */
610 map_fd
= bpf_map__fd(skel
->maps
.hashmap1
);
611 for (i
= 0; i
< bpf_map__max_entries(skel
->maps
.hashmap1
); i
++) {
616 expected_key_a
+= key
.a
;
617 expected_key_b
+= key
.b
;
618 expected_key_c
+= key
.c
;
621 err
= bpf_map_update_elem(map_fd
, &key
, &val
, BPF_ANY
);
622 if (CHECK(err
, "map_update", "map_update failed\n"))
626 linfo
.map
.map_fd
= map_fd
;
627 link
= bpf_program__attach_iter(skel
->progs
.dump_bpf_hash_map
, &opts
);
628 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
631 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
632 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
636 while ((len
= read(iter_fd
, buf
, sizeof(buf
))) > 0)
638 if (CHECK(len
< 0, "read", "read failed: %s\n", strerror(errno
)))
642 if (CHECK(skel
->bss
->key_sum_a
!= expected_key_a
,
643 "key_sum_a", "got %u expected %u\n",
644 skel
->bss
->key_sum_a
, expected_key_a
))
646 if (CHECK(skel
->bss
->key_sum_b
!= expected_key_b
,
647 "key_sum_b", "got %u expected %u\n",
648 skel
->bss
->key_sum_b
, expected_key_b
))
650 if (CHECK(skel
->bss
->val_sum
!= expected_val
,
651 "val_sum", "got %llu expected %llu\n",
652 skel
->bss
->val_sum
, expected_val
))
658 bpf_link__destroy(link
);
660 bpf_iter_bpf_hash_map__destroy(skel
);
663 static void test_bpf_percpu_hash_map(void)
665 __u32 expected_key_a
= 0, expected_key_b
= 0, expected_key_c
= 0;
666 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts
, opts
);
667 struct bpf_iter_bpf_percpu_hash_map
*skel
;
668 int err
, i
, j
, len
, map_fd
, iter_fd
;
669 union bpf_iter_link_info linfo
;
670 __u32 expected_val
= 0;
671 struct bpf_link
*link
;
680 val
= malloc(8 * bpf_num_possible_cpus());
682 skel
= bpf_iter_bpf_percpu_hash_map__open();
683 if (CHECK(!skel
, "bpf_iter_bpf_percpu_hash_map__open",
684 "skeleton open failed\n"))
687 skel
->rodata
->num_cpus
= bpf_num_possible_cpus();
689 err
= bpf_iter_bpf_percpu_hash_map__load(skel
);
690 if (CHECK(!skel
, "bpf_iter_bpf_percpu_hash_map__load",
691 "skeleton load failed\n"))
694 /* update map values here */
695 map_fd
= bpf_map__fd(skel
->maps
.hashmap1
);
696 for (i
= 0; i
< bpf_map__max_entries(skel
->maps
.hashmap1
); i
++) {
700 expected_key_a
+= key
.a
;
701 expected_key_b
+= key
.b
;
702 expected_key_c
+= key
.c
;
704 for (j
= 0; j
< bpf_num_possible_cpus(); j
++) {
705 *(__u32
*)(val
+ j
* 8) = i
+ j
;
706 expected_val
+= i
+ j
;
709 err
= bpf_map_update_elem(map_fd
, &key
, val
, BPF_ANY
);
710 if (CHECK(err
, "map_update", "map_update failed\n"))
714 memset(&linfo
, 0, sizeof(linfo
));
715 linfo
.map
.map_fd
= map_fd
;
716 opts
.link_info
= &linfo
;
717 opts
.link_info_len
= sizeof(linfo
);
718 link
= bpf_program__attach_iter(skel
->progs
.dump_bpf_percpu_hash_map
, &opts
);
719 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
722 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
723 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
727 while ((len
= read(iter_fd
, buf
, sizeof(buf
))) > 0)
729 if (CHECK(len
< 0, "read", "read failed: %s\n", strerror(errno
)))
733 if (CHECK(skel
->bss
->key_sum_a
!= expected_key_a
,
734 "key_sum_a", "got %u expected %u\n",
735 skel
->bss
->key_sum_a
, expected_key_a
))
737 if (CHECK(skel
->bss
->key_sum_b
!= expected_key_b
,
738 "key_sum_b", "got %u expected %u\n",
739 skel
->bss
->key_sum_b
, expected_key_b
))
741 if (CHECK(skel
->bss
->val_sum
!= expected_val
,
742 "val_sum", "got %u expected %u\n",
743 skel
->bss
->val_sum
, expected_val
))
749 bpf_link__destroy(link
);
751 bpf_iter_bpf_percpu_hash_map__destroy(skel
);
754 static void test_bpf_array_map(void)
756 __u64 val
, expected_val
= 0, res_first_val
, first_val
= 0;
757 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts
, opts
);
758 __u32 expected_key
= 0, res_first_key
;
759 struct bpf_iter_bpf_array_map
*skel
;
760 union bpf_iter_link_info linfo
;
761 int err
, i
, map_fd
, iter_fd
;
762 struct bpf_link
*link
;
766 skel
= bpf_iter_bpf_array_map__open_and_load();
767 if (CHECK(!skel
, "bpf_iter_bpf_array_map__open_and_load",
768 "skeleton open_and_load failed\n"))
771 map_fd
= bpf_map__fd(skel
->maps
.arraymap1
);
772 for (i
= 0; i
< bpf_map__max_entries(skel
->maps
.arraymap1
); i
++) {
780 err
= bpf_map_update_elem(map_fd
, &i
, &val
, BPF_ANY
);
781 if (CHECK(err
, "map_update", "map_update failed\n"))
785 memset(&linfo
, 0, sizeof(linfo
));
786 linfo
.map
.map_fd
= map_fd
;
787 opts
.link_info
= &linfo
;
788 opts
.link_info_len
= sizeof(linfo
);
789 link
= bpf_program__attach_iter(skel
->progs
.dump_bpf_array_map
, &opts
);
790 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
793 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
794 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
799 while ((len
= read(iter_fd
, buf
+ start
, sizeof(buf
) - start
)) > 0)
801 if (CHECK(len
< 0, "read", "read failed: %s\n", strerror(errno
)))
805 res_first_key
= *(__u32
*)buf
;
806 res_first_val
= *(__u64
*)(buf
+ sizeof(__u32
));
807 if (CHECK(res_first_key
!= 0 || res_first_val
!= first_val
,
809 "seq_write failure: first key %u vs expected 0, "
810 " first value %llu vs expected %llu\n",
811 res_first_key
, res_first_val
, first_val
))
814 if (CHECK(skel
->bss
->key_sum
!= expected_key
,
815 "key_sum", "got %u expected %u\n",
816 skel
->bss
->key_sum
, expected_key
))
818 if (CHECK(skel
->bss
->val_sum
!= expected_val
,
819 "val_sum", "got %llu expected %llu\n",
820 skel
->bss
->val_sum
, expected_val
))
823 for (i
= 0; i
< bpf_map__max_entries(skel
->maps
.arraymap1
); i
++) {
824 err
= bpf_map_lookup_elem(map_fd
, &i
, &val
);
825 if (CHECK(err
, "map_lookup", "map_lookup failed\n"))
827 if (CHECK(i
!= val
, "invalid_val",
828 "got value %llu expected %u\n", val
, i
))
835 bpf_link__destroy(link
);
837 bpf_iter_bpf_array_map__destroy(skel
);
840 static void test_bpf_percpu_array_map(void)
842 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts
, opts
);
843 struct bpf_iter_bpf_percpu_array_map
*skel
;
844 __u32 expected_key
= 0, expected_val
= 0;
845 union bpf_iter_link_info linfo
;
846 int err
, i
, j
, map_fd
, iter_fd
;
847 struct bpf_link
*link
;
852 val
= malloc(8 * bpf_num_possible_cpus());
854 skel
= bpf_iter_bpf_percpu_array_map__open();
855 if (CHECK(!skel
, "bpf_iter_bpf_percpu_array_map__open",
856 "skeleton open failed\n"))
859 skel
->rodata
->num_cpus
= bpf_num_possible_cpus();
861 err
= bpf_iter_bpf_percpu_array_map__load(skel
);
862 if (CHECK(!skel
, "bpf_iter_bpf_percpu_array_map__load",
863 "skeleton load failed\n"))
866 /* update map values here */
867 map_fd
= bpf_map__fd(skel
->maps
.arraymap1
);
868 for (i
= 0; i
< bpf_map__max_entries(skel
->maps
.arraymap1
); i
++) {
871 for (j
= 0; j
< bpf_num_possible_cpus(); j
++) {
872 *(__u32
*)(val
+ j
* 8) = i
+ j
;
873 expected_val
+= i
+ j
;
876 err
= bpf_map_update_elem(map_fd
, &i
, val
, BPF_ANY
);
877 if (CHECK(err
, "map_update", "map_update failed\n"))
881 memset(&linfo
, 0, sizeof(linfo
));
882 linfo
.map
.map_fd
= map_fd
;
883 opts
.link_info
= &linfo
;
884 opts
.link_info_len
= sizeof(linfo
);
885 link
= bpf_program__attach_iter(skel
->progs
.dump_bpf_percpu_array_map
, &opts
);
886 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
889 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
890 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
894 while ((len
= read(iter_fd
, buf
, sizeof(buf
))) > 0)
896 if (CHECK(len
< 0, "read", "read failed: %s\n", strerror(errno
)))
900 if (CHECK(skel
->bss
->key_sum
!= expected_key
,
901 "key_sum", "got %u expected %u\n",
902 skel
->bss
->key_sum
, expected_key
))
904 if (CHECK(skel
->bss
->val_sum
!= expected_val
,
905 "val_sum", "got %u expected %u\n",
906 skel
->bss
->val_sum
, expected_val
))
912 bpf_link__destroy(link
);
914 bpf_iter_bpf_percpu_array_map__destroy(skel
);
917 /* An iterator program deletes all local storage in a map. */
918 static void test_bpf_sk_storage_delete(void)
920 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts
, opts
);
921 struct bpf_iter_bpf_sk_storage_helpers
*skel
;
922 union bpf_iter_link_info linfo
;
923 int err
, len
, map_fd
, iter_fd
;
924 struct bpf_link
*link
;
929 skel
= bpf_iter_bpf_sk_storage_helpers__open_and_load();
930 if (CHECK(!skel
, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
931 "skeleton open_and_load failed\n"))
934 map_fd
= bpf_map__fd(skel
->maps
.sk_stg_map
);
936 sock_fd
= socket(AF_INET6
, SOCK_STREAM
, 0);
937 if (CHECK(sock_fd
< 0, "socket", "errno: %d\n", errno
))
939 err
= bpf_map_update_elem(map_fd
, &sock_fd
, &val
, BPF_NOEXIST
);
940 if (CHECK(err
, "map_update", "map_update failed\n"))
943 memset(&linfo
, 0, sizeof(linfo
));
944 linfo
.map
.map_fd
= map_fd
;
945 opts
.link_info
= &linfo
;
946 opts
.link_info_len
= sizeof(linfo
);
947 link
= bpf_program__attach_iter(skel
->progs
.delete_bpf_sk_storage_map
,
949 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
952 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
953 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
957 while ((len
= read(iter_fd
, buf
, sizeof(buf
))) > 0)
959 if (CHECK(len
< 0, "read", "read failed: %s\n", strerror(errno
)))
963 err
= bpf_map_lookup_elem(map_fd
, &sock_fd
, &val
);
964 if (CHECK(!err
|| errno
!= ENOENT
, "bpf_map_lookup_elem",
965 "map value wasn't deleted (err=%d, errno=%d)\n", err
, errno
))
971 bpf_link__destroy(link
);
975 bpf_iter_bpf_sk_storage_helpers__destroy(skel
);
978 /* This creates a socket and its local storage. It then runs a task_iter BPF
979 * program that replaces the existing socket local storage with the tgid of the
980 * only task owning a file descriptor to this socket, this process, prog_tests.
981 * It then runs a tcp socket iterator that negates the value in the existing
982 * socket local storage, the test verifies that the resulting value is -pid.
984 static void test_bpf_sk_storage_get(void)
986 struct bpf_iter_bpf_sk_storage_helpers
*skel
;
987 int err
, map_fd
, val
= -1;
990 skel
= bpf_iter_bpf_sk_storage_helpers__open_and_load();
991 if (CHECK(!skel
, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
992 "skeleton open_and_load failed\n"))
995 sock_fd
= socket(AF_INET6
, SOCK_STREAM
, 0);
996 if (CHECK(sock_fd
< 0, "socket", "errno: %d\n", errno
))
999 err
= listen(sock_fd
, 1);
1000 if (CHECK(err
!= 0, "listen", "errno: %d\n", errno
))
1003 map_fd
= bpf_map__fd(skel
->maps
.sk_stg_map
);
1005 err
= bpf_map_update_elem(map_fd
, &sock_fd
, &val
, BPF_NOEXIST
);
1006 if (CHECK(err
, "bpf_map_update_elem", "map_update_failed\n"))
1009 do_dummy_read(skel
->progs
.fill_socket_owner
);
1011 err
= bpf_map_lookup_elem(map_fd
, &sock_fd
, &val
);
1012 if (CHECK(err
|| val
!= getpid(), "bpf_map_lookup_elem",
1013 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1014 getpid(), val
, err
))
1017 do_dummy_read(skel
->progs
.negate_socket_local_storage
);
1019 err
= bpf_map_lookup_elem(map_fd
, &sock_fd
, &val
);
1020 CHECK(err
|| val
!= -getpid(), "bpf_map_lookup_elem",
1021 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1022 -getpid(), val
, err
);
1027 bpf_iter_bpf_sk_storage_helpers__destroy(skel
);
1030 static void test_bpf_sk_storage_map(void)
1032 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts
, opts
);
1033 int err
, i
, len
, map_fd
, iter_fd
, num_sockets
;
1034 struct bpf_iter_bpf_sk_storage_map
*skel
;
1035 union bpf_iter_link_info linfo
;
1036 int sock_fd
[3] = {-1, -1, -1};
1037 __u32 val
, expected_val
= 0;
1038 struct bpf_link
*link
;
1041 skel
= bpf_iter_bpf_sk_storage_map__open_and_load();
1042 if (CHECK(!skel
, "bpf_iter_bpf_sk_storage_map__open_and_load",
1043 "skeleton open_and_load failed\n"))
1046 map_fd
= bpf_map__fd(skel
->maps
.sk_stg_map
);
1047 num_sockets
= ARRAY_SIZE(sock_fd
);
1048 for (i
= 0; i
< num_sockets
; i
++) {
1049 sock_fd
[i
] = socket(AF_INET6
, SOCK_STREAM
, 0);
1050 if (CHECK(sock_fd
[i
] < 0, "socket", "errno: %d\n", errno
))
1054 expected_val
+= val
;
1056 err
= bpf_map_update_elem(map_fd
, &sock_fd
[i
], &val
,
1058 if (CHECK(err
, "map_update", "map_update failed\n"))
1062 memset(&linfo
, 0, sizeof(linfo
));
1063 linfo
.map
.map_fd
= map_fd
;
1064 opts
.link_info
= &linfo
;
1065 opts
.link_info_len
= sizeof(linfo
);
1066 link
= bpf_program__attach_iter(skel
->progs
.dump_bpf_sk_storage_map
, &opts
);
1067 if (CHECK(IS_ERR(link
), "attach_iter", "attach_iter failed\n"))
1070 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
1071 if (CHECK(iter_fd
< 0, "create_iter", "create_iter failed\n"))
1075 while ((len
= read(iter_fd
, buf
, sizeof(buf
))) > 0)
1077 if (CHECK(len
< 0, "read", "read failed: %s\n", strerror(errno
)))
1081 if (CHECK(skel
->bss
->ipv6_sk_count
!= num_sockets
,
1082 "ipv6_sk_count", "got %u expected %u\n",
1083 skel
->bss
->ipv6_sk_count
, num_sockets
))
1086 if (CHECK(skel
->bss
->val_sum
!= expected_val
,
1087 "val_sum", "got %u expected %u\n",
1088 skel
->bss
->val_sum
, expected_val
))
1094 bpf_link__destroy(link
);
1096 for (i
= 0; i
< num_sockets
; i
++) {
1097 if (sock_fd
[i
] >= 0)
1100 bpf_iter_bpf_sk_storage_map__destroy(skel
);
1103 static void test_rdonly_buf_out_of_bound(void)
1105 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts
, opts
);
1106 struct bpf_iter_test_kern5
*skel
;
1107 union bpf_iter_link_info linfo
;
1108 struct bpf_link
*link
;
1110 skel
= bpf_iter_test_kern5__open_and_load();
1111 if (CHECK(!skel
, "bpf_iter_test_kern5__open_and_load",
1112 "skeleton open_and_load failed\n"))
1115 memset(&linfo
, 0, sizeof(linfo
));
1116 linfo
.map
.map_fd
= bpf_map__fd(skel
->maps
.hashmap1
);
1117 opts
.link_info
= &linfo
;
1118 opts
.link_info_len
= sizeof(linfo
);
1119 link
= bpf_program__attach_iter(skel
->progs
.dump_bpf_hash_map
, &opts
);
1120 if (CHECK(!IS_ERR(link
), "attach_iter", "unexpected success\n"))
1121 bpf_link__destroy(link
);
1123 bpf_iter_test_kern5__destroy(skel
);
1126 static void test_buf_neg_offset(void)
1128 struct bpf_iter_test_kern6
*skel
;
1130 skel
= bpf_iter_test_kern6__open_and_load();
1131 if (CHECK(skel
, "bpf_iter_test_kern6__open_and_load",
1132 "skeleton open_and_load unexpected success\n"))
1133 bpf_iter_test_kern6__destroy(skel
);
1136 void test_bpf_iter(void)
1138 if (test__start_subtest("btf_id_or_null"))
1139 test_btf_id_or_null();
1140 if (test__start_subtest("ipv6_route"))
1142 if (test__start_subtest("netlink"))
1144 if (test__start_subtest("bpf_map"))
1146 if (test__start_subtest("task"))
1148 if (test__start_subtest("task_stack"))
1150 if (test__start_subtest("task_file"))
1152 if (test__start_subtest("task_btf"))
1154 if (test__start_subtest("tcp4"))
1156 if (test__start_subtest("tcp6"))
1158 if (test__start_subtest("udp4"))
1160 if (test__start_subtest("udp6"))
1162 if (test__start_subtest("anon"))
1163 test_anon_iter(false);
1164 if (test__start_subtest("anon-read-one-char"))
1165 test_anon_iter(true);
1166 if (test__start_subtest("file"))
1168 if (test__start_subtest("overflow"))
1169 test_overflow(false, false);
1170 if (test__start_subtest("overflow-e2big"))
1171 test_overflow(true, false);
1172 if (test__start_subtest("prog-ret-1"))
1173 test_overflow(false, true);
1174 if (test__start_subtest("bpf_hash_map"))
1175 test_bpf_hash_map();
1176 if (test__start_subtest("bpf_percpu_hash_map"))
1177 test_bpf_percpu_hash_map();
1178 if (test__start_subtest("bpf_array_map"))
1179 test_bpf_array_map();
1180 if (test__start_subtest("bpf_percpu_array_map"))
1181 test_bpf_percpu_array_map();
1182 if (test__start_subtest("bpf_sk_storage_map"))
1183 test_bpf_sk_storage_map();
1184 if (test__start_subtest("bpf_sk_storage_delete"))
1185 test_bpf_sk_storage_delete();
1186 if (test__start_subtest("bpf_sk_storage_get"))
1187 test_bpf_sk_storage_get();
1188 if (test__start_subtest("rdonly-buf-out-of-bound"))
1189 test_rdonly_buf_out_of_bound();
1190 if (test__start_subtest("buf-neg-offset"))
1191 test_buf_neg_offset();