1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
5 /* test_tailcall_1 checks basic functionality by patching multiple locations
6 * in a single program for a single tail call slot with nop->jmp, jmp->nop
7 * and jmp->jmp rewrites. Also checks for nop->nop.
9 static void test_tailcall_1(void)
11 int err
, map_fd
, prog_fd
, main_fd
, i
, j
;
12 struct bpf_map
*prog_array
;
13 struct bpf_program
*prog
;
14 struct bpf_object
*obj
;
15 __u32 retval
, duration
;
19 err
= bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS
, &obj
,
24 prog
= bpf_object__find_program_by_title(obj
, "classifier");
25 if (CHECK_FAIL(!prog
))
28 main_fd
= bpf_program__fd(prog
);
29 if (CHECK_FAIL(main_fd
< 0))
32 prog_array
= bpf_object__find_map_by_name(obj
, "jmp_table");
33 if (CHECK_FAIL(!prog_array
))
36 map_fd
= bpf_map__fd(prog_array
);
37 if (CHECK_FAIL(map_fd
< 0))
40 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
41 snprintf(prog_name
, sizeof(prog_name
), "classifier/%i", i
);
43 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
44 if (CHECK_FAIL(!prog
))
47 prog_fd
= bpf_program__fd(prog
);
48 if (CHECK_FAIL(prog_fd
< 0))
51 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
56 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
57 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
58 &duration
, &retval
, NULL
);
59 CHECK(err
|| retval
!= i
, "tailcall",
60 "err %d errno %d retval %d\n", err
, errno
, retval
);
62 err
= bpf_map_delete_elem(map_fd
, &i
);
67 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
68 &duration
, &retval
, NULL
);
69 CHECK(err
|| retval
!= 3, "tailcall", "err %d errno %d retval %d\n",
72 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
73 snprintf(prog_name
, sizeof(prog_name
), "classifier/%i", i
);
75 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
76 if (CHECK_FAIL(!prog
))
79 prog_fd
= bpf_program__fd(prog
);
80 if (CHECK_FAIL(prog_fd
< 0))
83 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
88 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
89 &duration
, &retval
, NULL
);
90 CHECK(err
|| retval
!= 0, "tailcall", "err %d errno %d retval %d\n",
93 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
94 j
= bpf_map__def(prog_array
)->max_entries
- 1 - i
;
95 snprintf(prog_name
, sizeof(prog_name
), "classifier/%i", j
);
97 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
98 if (CHECK_FAIL(!prog
))
101 prog_fd
= bpf_program__fd(prog
);
102 if (CHECK_FAIL(prog_fd
< 0))
105 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
110 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
111 j
= bpf_map__def(prog_array
)->max_entries
- 1 - i
;
113 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
114 &duration
, &retval
, NULL
);
115 CHECK(err
|| retval
!= j
, "tailcall",
116 "err %d errno %d retval %d\n", err
, errno
, retval
);
118 err
= bpf_map_delete_elem(map_fd
, &i
);
123 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
124 &duration
, &retval
, NULL
);
125 CHECK(err
|| retval
!= 3, "tailcall", "err %d errno %d retval %d\n",
128 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
129 err
= bpf_map_delete_elem(map_fd
, &i
);
130 if (CHECK_FAIL(err
>= 0 || errno
!= ENOENT
))
133 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
134 &duration
, &retval
, NULL
);
135 CHECK(err
|| retval
!= 3, "tailcall",
136 "err %d errno %d retval %d\n", err
, errno
, retval
);
140 bpf_object__close(obj
);
143 /* test_tailcall_2 checks that patching multiple programs for a single
144 * tail call slot works. It also jumps through several programs and tests
145 * the tail call limit counter.
147 static void test_tailcall_2(void)
149 int err
, map_fd
, prog_fd
, main_fd
, i
;
150 struct bpf_map
*prog_array
;
151 struct bpf_program
*prog
;
152 struct bpf_object
*obj
;
153 __u32 retval
, duration
;
157 err
= bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS
, &obj
,
162 prog
= bpf_object__find_program_by_title(obj
, "classifier");
163 if (CHECK_FAIL(!prog
))
166 main_fd
= bpf_program__fd(prog
);
167 if (CHECK_FAIL(main_fd
< 0))
170 prog_array
= bpf_object__find_map_by_name(obj
, "jmp_table");
171 if (CHECK_FAIL(!prog_array
))
174 map_fd
= bpf_map__fd(prog_array
);
175 if (CHECK_FAIL(map_fd
< 0))
178 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
179 snprintf(prog_name
, sizeof(prog_name
), "classifier/%i", i
);
181 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
182 if (CHECK_FAIL(!prog
))
185 prog_fd
= bpf_program__fd(prog
);
186 if (CHECK_FAIL(prog_fd
< 0))
189 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
194 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
195 &duration
, &retval
, NULL
);
196 CHECK(err
|| retval
!= 2, "tailcall", "err %d errno %d retval %d\n",
200 err
= bpf_map_delete_elem(map_fd
, &i
);
204 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
205 &duration
, &retval
, NULL
);
206 CHECK(err
|| retval
!= 1, "tailcall", "err %d errno %d retval %d\n",
210 err
= bpf_map_delete_elem(map_fd
, &i
);
214 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
215 &duration
, &retval
, NULL
);
216 CHECK(err
|| retval
!= 3, "tailcall", "err %d errno %d retval %d\n",
219 bpf_object__close(obj
);
222 /* test_tailcall_3 checks that the count value of the tail call limit
223 * enforcement matches with expectations.
225 static void test_tailcall_3(void)
227 int err
, map_fd
, prog_fd
, main_fd
, data_fd
, i
, val
;
228 struct bpf_map
*prog_array
, *data_map
;
229 struct bpf_program
*prog
;
230 struct bpf_object
*obj
;
231 __u32 retval
, duration
;
234 err
= bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS
, &obj
,
239 prog
= bpf_object__find_program_by_title(obj
, "classifier");
240 if (CHECK_FAIL(!prog
))
243 main_fd
= bpf_program__fd(prog
);
244 if (CHECK_FAIL(main_fd
< 0))
247 prog_array
= bpf_object__find_map_by_name(obj
, "jmp_table");
248 if (CHECK_FAIL(!prog_array
))
251 map_fd
= bpf_map__fd(prog_array
);
252 if (CHECK_FAIL(map_fd
< 0))
255 prog
= bpf_object__find_program_by_title(obj
, "classifier/0");
256 if (CHECK_FAIL(!prog
))
259 prog_fd
= bpf_program__fd(prog
);
260 if (CHECK_FAIL(prog_fd
< 0))
264 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
268 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
269 &duration
, &retval
, NULL
);
270 CHECK(err
|| retval
!= 1, "tailcall", "err %d errno %d retval %d\n",
273 data_map
= bpf_object__find_map_by_name(obj
, "tailcall.bss");
274 if (CHECK_FAIL(!data_map
|| !bpf_map__is_internal(data_map
)))
277 data_fd
= bpf_map__fd(data_map
);
278 if (CHECK_FAIL(map_fd
< 0))
282 err
= bpf_map_lookup_elem(data_fd
, &i
, &val
);
283 CHECK(err
|| val
!= 33, "tailcall count", "err %d errno %d count %d\n",
287 err
= bpf_map_delete_elem(map_fd
, &i
);
291 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
292 &duration
, &retval
, NULL
);
293 CHECK(err
|| retval
!= 0, "tailcall", "err %d errno %d retval %d\n",
296 bpf_object__close(obj
);
299 /* test_tailcall_4 checks that the kernel properly selects indirect jump
300 * for the case where the key is not known. Latter is passed via global
301 * data to select different targets we can compare return value of.
303 static void test_tailcall_4(void)
305 int err
, map_fd
, prog_fd
, main_fd
, data_fd
, i
;
306 struct bpf_map
*prog_array
, *data_map
;
307 struct bpf_program
*prog
;
308 struct bpf_object
*obj
;
309 __u32 retval
, duration
;
310 static const int zero
= 0;
314 err
= bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS
, &obj
,
319 prog
= bpf_object__find_program_by_title(obj
, "classifier");
320 if (CHECK_FAIL(!prog
))
323 main_fd
= bpf_program__fd(prog
);
324 if (CHECK_FAIL(main_fd
< 0))
327 prog_array
= bpf_object__find_map_by_name(obj
, "jmp_table");
328 if (CHECK_FAIL(!prog_array
))
331 map_fd
= bpf_map__fd(prog_array
);
332 if (CHECK_FAIL(map_fd
< 0))
335 data_map
= bpf_object__find_map_by_name(obj
, "tailcall.bss");
336 if (CHECK_FAIL(!data_map
|| !bpf_map__is_internal(data_map
)))
339 data_fd
= bpf_map__fd(data_map
);
340 if (CHECK_FAIL(map_fd
< 0))
343 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
344 snprintf(prog_name
, sizeof(prog_name
), "classifier/%i", i
);
346 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
347 if (CHECK_FAIL(!prog
))
350 prog_fd
= bpf_program__fd(prog
);
351 if (CHECK_FAIL(prog_fd
< 0))
354 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
359 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
360 err
= bpf_map_update_elem(data_fd
, &zero
, &i
, BPF_ANY
);
364 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
365 &duration
, &retval
, NULL
);
366 CHECK(err
|| retval
!= i
, "tailcall",
367 "err %d errno %d retval %d\n", err
, errno
, retval
);
370 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
371 err
= bpf_map_update_elem(data_fd
, &zero
, &i
, BPF_ANY
);
375 err
= bpf_map_delete_elem(map_fd
, &i
);
379 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
380 &duration
, &retval
, NULL
);
381 CHECK(err
|| retval
!= 3, "tailcall",
382 "err %d errno %d retval %d\n", err
, errno
, retval
);
385 bpf_object__close(obj
);
388 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
389 * an indirect jump when the keys are const but different from different branches.
391 static void test_tailcall_5(void)
393 int err
, map_fd
, prog_fd
, main_fd
, data_fd
, i
, key
[] = { 1111, 1234, 5678 };
394 struct bpf_map
*prog_array
, *data_map
;
395 struct bpf_program
*prog
;
396 struct bpf_object
*obj
;
397 __u32 retval
, duration
;
398 static const int zero
= 0;
402 err
= bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS
, &obj
,
407 prog
= bpf_object__find_program_by_title(obj
, "classifier");
408 if (CHECK_FAIL(!prog
))
411 main_fd
= bpf_program__fd(prog
);
412 if (CHECK_FAIL(main_fd
< 0))
415 prog_array
= bpf_object__find_map_by_name(obj
, "jmp_table");
416 if (CHECK_FAIL(!prog_array
))
419 map_fd
= bpf_map__fd(prog_array
);
420 if (CHECK_FAIL(map_fd
< 0))
423 data_map
= bpf_object__find_map_by_name(obj
, "tailcall.bss");
424 if (CHECK_FAIL(!data_map
|| !bpf_map__is_internal(data_map
)))
427 data_fd
= bpf_map__fd(data_map
);
428 if (CHECK_FAIL(map_fd
< 0))
431 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
432 snprintf(prog_name
, sizeof(prog_name
), "classifier/%i", i
);
434 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
435 if (CHECK_FAIL(!prog
))
438 prog_fd
= bpf_program__fd(prog
);
439 if (CHECK_FAIL(prog_fd
< 0))
442 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
447 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
448 err
= bpf_map_update_elem(data_fd
, &zero
, &key
[i
], BPF_ANY
);
452 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
453 &duration
, &retval
, NULL
);
454 CHECK(err
|| retval
!= i
, "tailcall",
455 "err %d errno %d retval %d\n", err
, errno
, retval
);
458 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
459 err
= bpf_map_update_elem(data_fd
, &zero
, &key
[i
], BPF_ANY
);
463 err
= bpf_map_delete_elem(map_fd
, &i
);
467 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
468 &duration
, &retval
, NULL
);
469 CHECK(err
|| retval
!= 3, "tailcall",
470 "err %d errno %d retval %d\n", err
, errno
, retval
);
473 bpf_object__close(obj
);
476 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
477 * correctly in correlation with BPF subprograms
479 static void test_tailcall_bpf2bpf_1(void)
481 int err
, map_fd
, prog_fd
, main_fd
, i
;
482 struct bpf_map
*prog_array
;
483 struct bpf_program
*prog
;
484 struct bpf_object
*obj
;
485 __u32 retval
, duration
;
488 err
= bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS
,
493 prog
= bpf_object__find_program_by_title(obj
, "classifier");
494 if (CHECK_FAIL(!prog
))
497 main_fd
= bpf_program__fd(prog
);
498 if (CHECK_FAIL(main_fd
< 0))
501 prog_array
= bpf_object__find_map_by_name(obj
, "jmp_table");
502 if (CHECK_FAIL(!prog_array
))
505 map_fd
= bpf_map__fd(prog_array
);
506 if (CHECK_FAIL(map_fd
< 0))
510 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
511 snprintf(prog_name
, sizeof(prog_name
), "classifier/%i", i
);
513 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
514 if (CHECK_FAIL(!prog
))
517 prog_fd
= bpf_program__fd(prog
);
518 if (CHECK_FAIL(prog_fd
< 0))
521 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
526 err
= bpf_prog_test_run(main_fd
, 1, &pkt_v4
, sizeof(pkt_v4
), 0,
527 0, &retval
, &duration
);
528 CHECK(err
|| retval
!= 1, "tailcall",
529 "err %d errno %d retval %d\n", err
, errno
, retval
);
531 /* jmp -> nop, call subprog that will do tailcall */
533 err
= bpf_map_delete_elem(map_fd
, &i
);
537 err
= bpf_prog_test_run(main_fd
, 1, &pkt_v4
, sizeof(pkt_v4
), 0,
538 0, &retval
, &duration
);
539 CHECK(err
|| retval
!= 0, "tailcall", "err %d errno %d retval %d\n",
542 /* make sure that subprog can access ctx and entry prog that
543 * called this subprog can properly return
546 err
= bpf_map_delete_elem(map_fd
, &i
);
550 err
= bpf_prog_test_run(main_fd
, 1, &pkt_v4
, sizeof(pkt_v4
), 0,
551 0, &retval
, &duration
);
552 CHECK(err
|| retval
!= sizeof(pkt_v4
) * 2,
553 "tailcall", "err %d errno %d retval %d\n",
556 bpf_object__close(obj
);
559 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
560 * enforcement matches with expectations when tailcall is preceded with
563 static void test_tailcall_bpf2bpf_2(void)
565 int err
, map_fd
, prog_fd
, main_fd
, data_fd
, i
, val
;
566 struct bpf_map
*prog_array
, *data_map
;
567 struct bpf_program
*prog
;
568 struct bpf_object
*obj
;
569 __u32 retval
, duration
;
572 err
= bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS
,
577 prog
= bpf_object__find_program_by_title(obj
, "classifier");
578 if (CHECK_FAIL(!prog
))
581 main_fd
= bpf_program__fd(prog
);
582 if (CHECK_FAIL(main_fd
< 0))
585 prog_array
= bpf_object__find_map_by_name(obj
, "jmp_table");
586 if (CHECK_FAIL(!prog_array
))
589 map_fd
= bpf_map__fd(prog_array
);
590 if (CHECK_FAIL(map_fd
< 0))
593 prog
= bpf_object__find_program_by_title(obj
, "classifier/0");
594 if (CHECK_FAIL(!prog
))
597 prog_fd
= bpf_program__fd(prog
);
598 if (CHECK_FAIL(prog_fd
< 0))
602 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
606 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
607 &duration
, &retval
, NULL
);
608 CHECK(err
|| retval
!= 1, "tailcall", "err %d errno %d retval %d\n",
611 data_map
= bpf_object__find_map_by_name(obj
, "tailcall.bss");
612 if (CHECK_FAIL(!data_map
|| !bpf_map__is_internal(data_map
)))
615 data_fd
= bpf_map__fd(data_map
);
616 if (CHECK_FAIL(map_fd
< 0))
620 err
= bpf_map_lookup_elem(data_fd
, &i
, &val
);
621 CHECK(err
|| val
!= 33, "tailcall count", "err %d errno %d count %d\n",
625 err
= bpf_map_delete_elem(map_fd
, &i
);
629 err
= bpf_prog_test_run(main_fd
, 1, buff
, sizeof(buff
), 0,
630 &duration
, &retval
, NULL
);
631 CHECK(err
|| retval
!= 0, "tailcall", "err %d errno %d retval %d\n",
634 bpf_object__close(obj
);
637 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
638 * 256 bytes) can be used within bpf subprograms that have the tailcalls
641 static void test_tailcall_bpf2bpf_3(void)
643 int err
, map_fd
, prog_fd
, main_fd
, i
;
644 struct bpf_map
*prog_array
;
645 struct bpf_program
*prog
;
646 struct bpf_object
*obj
;
647 __u32 retval
, duration
;
650 err
= bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS
,
655 prog
= bpf_object__find_program_by_title(obj
, "classifier");
656 if (CHECK_FAIL(!prog
))
659 main_fd
= bpf_program__fd(prog
);
660 if (CHECK_FAIL(main_fd
< 0))
663 prog_array
= bpf_object__find_map_by_name(obj
, "jmp_table");
664 if (CHECK_FAIL(!prog_array
))
667 map_fd
= bpf_map__fd(prog_array
);
668 if (CHECK_FAIL(map_fd
< 0))
671 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
672 snprintf(prog_name
, sizeof(prog_name
), "classifier/%i", i
);
674 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
675 if (CHECK_FAIL(!prog
))
678 prog_fd
= bpf_program__fd(prog
);
679 if (CHECK_FAIL(prog_fd
< 0))
682 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
687 err
= bpf_prog_test_run(main_fd
, 1, &pkt_v4
, sizeof(pkt_v4
), 0,
688 &duration
, &retval
, NULL
);
689 CHECK(err
|| retval
!= sizeof(pkt_v4
) * 3,
690 "tailcall", "err %d errno %d retval %d\n",
694 err
= bpf_map_delete_elem(map_fd
, &i
);
698 err
= bpf_prog_test_run(main_fd
, 1, &pkt_v4
, sizeof(pkt_v4
), 0,
699 &duration
, &retval
, NULL
);
700 CHECK(err
|| retval
!= sizeof(pkt_v4
),
701 "tailcall", "err %d errno %d retval %d\n",
705 err
= bpf_map_delete_elem(map_fd
, &i
);
709 err
= bpf_prog_test_run(main_fd
, 1, &pkt_v4
, sizeof(pkt_v4
), 0,
710 &duration
, &retval
, NULL
);
711 CHECK(err
|| retval
!= sizeof(pkt_v4
) * 2,
712 "tailcall", "err %d errno %d retval %d\n",
715 bpf_object__close(obj
);
718 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
719 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
720 * counter behaves correctly, bpf program will go through following flow:
722 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
723 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
724 * subprog2 [here bump global counter] --------^
726 * We go through first two tailcalls and start counting from the subprog2 where
727 * the loop begins. At the end of the test make sure that the global counter is
728 * equal to 31, because tailcall counter includes the first two tailcalls
729 * whereas global counter is incremented only on loop presented on flow above.
731 static void test_tailcall_bpf2bpf_4(void)
733 int err
, map_fd
, prog_fd
, main_fd
, data_fd
, i
, val
;
734 struct bpf_map
*prog_array
, *data_map
;
735 struct bpf_program
*prog
;
736 struct bpf_object
*obj
;
737 __u32 retval
, duration
;
740 err
= bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS
,
745 prog
= bpf_object__find_program_by_title(obj
, "classifier");
746 if (CHECK_FAIL(!prog
))
749 main_fd
= bpf_program__fd(prog
);
750 if (CHECK_FAIL(main_fd
< 0))
753 prog_array
= bpf_object__find_map_by_name(obj
, "jmp_table");
754 if (CHECK_FAIL(!prog_array
))
757 map_fd
= bpf_map__fd(prog_array
);
758 if (CHECK_FAIL(map_fd
< 0))
761 for (i
= 0; i
< bpf_map__def(prog_array
)->max_entries
; i
++) {
762 snprintf(prog_name
, sizeof(prog_name
), "classifier/%i", i
);
764 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
765 if (CHECK_FAIL(!prog
))
768 prog_fd
= bpf_program__fd(prog
);
769 if (CHECK_FAIL(prog_fd
< 0))
772 err
= bpf_map_update_elem(map_fd
, &i
, &prog_fd
, BPF_ANY
);
777 err
= bpf_prog_test_run(main_fd
, 1, &pkt_v4
, sizeof(pkt_v4
), 0,
778 &duration
, &retval
, NULL
);
779 CHECK(err
|| retval
!= sizeof(pkt_v4
) * 3, "tailcall", "err %d errno %d retval %d\n",
782 data_map
= bpf_object__find_map_by_name(obj
, "tailcall.bss");
783 if (CHECK_FAIL(!data_map
|| !bpf_map__is_internal(data_map
)))
786 data_fd
= bpf_map__fd(data_map
);
787 if (CHECK_FAIL(map_fd
< 0))
791 err
= bpf_map_lookup_elem(data_fd
, &i
, &val
);
792 CHECK(err
|| val
!= 31, "tailcall count", "err %d errno %d count %d\n",
796 bpf_object__close(obj
);
799 void test_tailcalls(void)
801 if (test__start_subtest("tailcall_1"))
803 if (test__start_subtest("tailcall_2"))
805 if (test__start_subtest("tailcall_3"))
807 if (test__start_subtest("tailcall_4"))
809 if (test__start_subtest("tailcall_5"))
811 if (test__start_subtest("tailcall_bpf2bpf_1"))
812 test_tailcall_bpf2bpf_1();
813 if (test__start_subtest("tailcall_bpf2bpf_2"))
814 test_tailcall_bpf2bpf_2();
815 if (test__start_subtest("tailcall_bpf2bpf_3"))
816 test_tailcall_bpf2bpf_3();
817 if (test__start_subtest("tailcall_bpf2bpf_4"))
818 test_tailcall_bpf2bpf_4();