WIP FPC-III support
[linux/fpc-iii.git] / tools / testing / selftests / bpf / prog_tests / tailcalls.c
blobee27d68d2a1c6e41e8cf735ab4be29e40ebd0fc1
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
5 /* test_tailcall_1 checks basic functionality by patching multiple locations
6 * in a single program for a single tail call slot with nop->jmp, jmp->nop
7 * and jmp->jmp rewrites. Also checks for nop->nop.
8 */
9 static void test_tailcall_1(void)
11 int err, map_fd, prog_fd, main_fd, i, j;
12 struct bpf_map *prog_array;
13 struct bpf_program *prog;
14 struct bpf_object *obj;
15 __u32 retval, duration;
16 char prog_name[32];
17 char buff[128] = {};
19 err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
20 &prog_fd);
21 if (CHECK_FAIL(err))
22 return;
24 prog = bpf_object__find_program_by_title(obj, "classifier");
25 if (CHECK_FAIL(!prog))
26 goto out;
28 main_fd = bpf_program__fd(prog);
29 if (CHECK_FAIL(main_fd < 0))
30 goto out;
32 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
33 if (CHECK_FAIL(!prog_array))
34 goto out;
36 map_fd = bpf_map__fd(prog_array);
37 if (CHECK_FAIL(map_fd < 0))
38 goto out;
40 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
41 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
43 prog = bpf_object__find_program_by_title(obj, prog_name);
44 if (CHECK_FAIL(!prog))
45 goto out;
47 prog_fd = bpf_program__fd(prog);
48 if (CHECK_FAIL(prog_fd < 0))
49 goto out;
51 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
52 if (CHECK_FAIL(err))
53 goto out;
56 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
57 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
58 &duration, &retval, NULL);
59 CHECK(err || retval != i, "tailcall",
60 "err %d errno %d retval %d\n", err, errno, retval);
62 err = bpf_map_delete_elem(map_fd, &i);
63 if (CHECK_FAIL(err))
64 goto out;
67 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
68 &duration, &retval, NULL);
69 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
70 err, errno, retval);
72 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
73 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
75 prog = bpf_object__find_program_by_title(obj, prog_name);
76 if (CHECK_FAIL(!prog))
77 goto out;
79 prog_fd = bpf_program__fd(prog);
80 if (CHECK_FAIL(prog_fd < 0))
81 goto out;
83 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
84 if (CHECK_FAIL(err))
85 goto out;
88 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
89 &duration, &retval, NULL);
90 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
91 err, errno, retval);
93 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
94 j = bpf_map__def(prog_array)->max_entries - 1 - i;
95 snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
97 prog = bpf_object__find_program_by_title(obj, prog_name);
98 if (CHECK_FAIL(!prog))
99 goto out;
101 prog_fd = bpf_program__fd(prog);
102 if (CHECK_FAIL(prog_fd < 0))
103 goto out;
105 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
106 if (CHECK_FAIL(err))
107 goto out;
110 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
111 j = bpf_map__def(prog_array)->max_entries - 1 - i;
113 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
114 &duration, &retval, NULL);
115 CHECK(err || retval != j, "tailcall",
116 "err %d errno %d retval %d\n", err, errno, retval);
118 err = bpf_map_delete_elem(map_fd, &i);
119 if (CHECK_FAIL(err))
120 goto out;
123 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
124 &duration, &retval, NULL);
125 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
126 err, errno, retval);
128 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
129 err = bpf_map_delete_elem(map_fd, &i);
130 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
131 goto out;
133 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
134 &duration, &retval, NULL);
135 CHECK(err || retval != 3, "tailcall",
136 "err %d errno %d retval %d\n", err, errno, retval);
139 out:
140 bpf_object__close(obj);
143 /* test_tailcall_2 checks that patching multiple programs for a single
144 * tail call slot works. It also jumps through several programs and tests
145 * the tail call limit counter.
147 static void test_tailcall_2(void)
149 int err, map_fd, prog_fd, main_fd, i;
150 struct bpf_map *prog_array;
151 struct bpf_program *prog;
152 struct bpf_object *obj;
153 __u32 retval, duration;
154 char prog_name[32];
155 char buff[128] = {};
157 err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
158 &prog_fd);
159 if (CHECK_FAIL(err))
160 return;
162 prog = bpf_object__find_program_by_title(obj, "classifier");
163 if (CHECK_FAIL(!prog))
164 goto out;
166 main_fd = bpf_program__fd(prog);
167 if (CHECK_FAIL(main_fd < 0))
168 goto out;
170 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
171 if (CHECK_FAIL(!prog_array))
172 goto out;
174 map_fd = bpf_map__fd(prog_array);
175 if (CHECK_FAIL(map_fd < 0))
176 goto out;
178 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
179 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
181 prog = bpf_object__find_program_by_title(obj, prog_name);
182 if (CHECK_FAIL(!prog))
183 goto out;
185 prog_fd = bpf_program__fd(prog);
186 if (CHECK_FAIL(prog_fd < 0))
187 goto out;
189 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
190 if (CHECK_FAIL(err))
191 goto out;
194 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
195 &duration, &retval, NULL);
196 CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n",
197 err, errno, retval);
199 i = 2;
200 err = bpf_map_delete_elem(map_fd, &i);
201 if (CHECK_FAIL(err))
202 goto out;
204 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
205 &duration, &retval, NULL);
206 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
207 err, errno, retval);
209 i = 0;
210 err = bpf_map_delete_elem(map_fd, &i);
211 if (CHECK_FAIL(err))
212 goto out;
214 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
215 &duration, &retval, NULL);
216 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
217 err, errno, retval);
218 out:
219 bpf_object__close(obj);
222 /* test_tailcall_3 checks that the count value of the tail call limit
223 * enforcement matches with expectations.
225 static void test_tailcall_3(void)
227 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
228 struct bpf_map *prog_array, *data_map;
229 struct bpf_program *prog;
230 struct bpf_object *obj;
231 __u32 retval, duration;
232 char buff[128] = {};
234 err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
235 &prog_fd);
236 if (CHECK_FAIL(err))
237 return;
239 prog = bpf_object__find_program_by_title(obj, "classifier");
240 if (CHECK_FAIL(!prog))
241 goto out;
243 main_fd = bpf_program__fd(prog);
244 if (CHECK_FAIL(main_fd < 0))
245 goto out;
247 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248 if (CHECK_FAIL(!prog_array))
249 goto out;
251 map_fd = bpf_map__fd(prog_array);
252 if (CHECK_FAIL(map_fd < 0))
253 goto out;
255 prog = bpf_object__find_program_by_title(obj, "classifier/0");
256 if (CHECK_FAIL(!prog))
257 goto out;
259 prog_fd = bpf_program__fd(prog);
260 if (CHECK_FAIL(prog_fd < 0))
261 goto out;
263 i = 0;
264 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
265 if (CHECK_FAIL(err))
266 goto out;
268 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
269 &duration, &retval, NULL);
270 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
271 err, errno, retval);
273 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
274 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
275 return;
277 data_fd = bpf_map__fd(data_map);
278 if (CHECK_FAIL(map_fd < 0))
279 return;
281 i = 0;
282 err = bpf_map_lookup_elem(data_fd, &i, &val);
283 CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
284 err, errno, val);
286 i = 0;
287 err = bpf_map_delete_elem(map_fd, &i);
288 if (CHECK_FAIL(err))
289 goto out;
291 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
292 &duration, &retval, NULL);
293 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
294 err, errno, retval);
295 out:
296 bpf_object__close(obj);
299 /* test_tailcall_4 checks that the kernel properly selects indirect jump
300 * for the case where the key is not known. Latter is passed via global
301 * data to select different targets we can compare return value of.
303 static void test_tailcall_4(void)
305 int err, map_fd, prog_fd, main_fd, data_fd, i;
306 struct bpf_map *prog_array, *data_map;
307 struct bpf_program *prog;
308 struct bpf_object *obj;
309 __u32 retval, duration;
310 static const int zero = 0;
311 char buff[128] = {};
312 char prog_name[32];
314 err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
315 &prog_fd);
316 if (CHECK_FAIL(err))
317 return;
319 prog = bpf_object__find_program_by_title(obj, "classifier");
320 if (CHECK_FAIL(!prog))
321 goto out;
323 main_fd = bpf_program__fd(prog);
324 if (CHECK_FAIL(main_fd < 0))
325 goto out;
327 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
328 if (CHECK_FAIL(!prog_array))
329 goto out;
331 map_fd = bpf_map__fd(prog_array);
332 if (CHECK_FAIL(map_fd < 0))
333 goto out;
335 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
336 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
337 return;
339 data_fd = bpf_map__fd(data_map);
340 if (CHECK_FAIL(map_fd < 0))
341 return;
343 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
344 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
346 prog = bpf_object__find_program_by_title(obj, prog_name);
347 if (CHECK_FAIL(!prog))
348 goto out;
350 prog_fd = bpf_program__fd(prog);
351 if (CHECK_FAIL(prog_fd < 0))
352 goto out;
354 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
355 if (CHECK_FAIL(err))
356 goto out;
359 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
360 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
361 if (CHECK_FAIL(err))
362 goto out;
364 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
365 &duration, &retval, NULL);
366 CHECK(err || retval != i, "tailcall",
367 "err %d errno %d retval %d\n", err, errno, retval);
370 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
371 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
372 if (CHECK_FAIL(err))
373 goto out;
375 err = bpf_map_delete_elem(map_fd, &i);
376 if (CHECK_FAIL(err))
377 goto out;
379 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
380 &duration, &retval, NULL);
381 CHECK(err || retval != 3, "tailcall",
382 "err %d errno %d retval %d\n", err, errno, retval);
384 out:
385 bpf_object__close(obj);
388 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
389 * an indirect jump when the keys are const but different from different branches.
391 static void test_tailcall_5(void)
393 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
394 struct bpf_map *prog_array, *data_map;
395 struct bpf_program *prog;
396 struct bpf_object *obj;
397 __u32 retval, duration;
398 static const int zero = 0;
399 char buff[128] = {};
400 char prog_name[32];
402 err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
403 &prog_fd);
404 if (CHECK_FAIL(err))
405 return;
407 prog = bpf_object__find_program_by_title(obj, "classifier");
408 if (CHECK_FAIL(!prog))
409 goto out;
411 main_fd = bpf_program__fd(prog);
412 if (CHECK_FAIL(main_fd < 0))
413 goto out;
415 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
416 if (CHECK_FAIL(!prog_array))
417 goto out;
419 map_fd = bpf_map__fd(prog_array);
420 if (CHECK_FAIL(map_fd < 0))
421 goto out;
423 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
424 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
425 return;
427 data_fd = bpf_map__fd(data_map);
428 if (CHECK_FAIL(map_fd < 0))
429 return;
431 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
432 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
434 prog = bpf_object__find_program_by_title(obj, prog_name);
435 if (CHECK_FAIL(!prog))
436 goto out;
438 prog_fd = bpf_program__fd(prog);
439 if (CHECK_FAIL(prog_fd < 0))
440 goto out;
442 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
443 if (CHECK_FAIL(err))
444 goto out;
447 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
448 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
449 if (CHECK_FAIL(err))
450 goto out;
452 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
453 &duration, &retval, NULL);
454 CHECK(err || retval != i, "tailcall",
455 "err %d errno %d retval %d\n", err, errno, retval);
458 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
459 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
460 if (CHECK_FAIL(err))
461 goto out;
463 err = bpf_map_delete_elem(map_fd, &i);
464 if (CHECK_FAIL(err))
465 goto out;
467 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
468 &duration, &retval, NULL);
469 CHECK(err || retval != 3, "tailcall",
470 "err %d errno %d retval %d\n", err, errno, retval);
472 out:
473 bpf_object__close(obj);
476 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
477 * correctly in correlation with BPF subprograms
479 static void test_tailcall_bpf2bpf_1(void)
481 int err, map_fd, prog_fd, main_fd, i;
482 struct bpf_map *prog_array;
483 struct bpf_program *prog;
484 struct bpf_object *obj;
485 __u32 retval, duration;
486 char prog_name[32];
488 err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
489 &obj, &prog_fd);
490 if (CHECK_FAIL(err))
491 return;
493 prog = bpf_object__find_program_by_title(obj, "classifier");
494 if (CHECK_FAIL(!prog))
495 goto out;
497 main_fd = bpf_program__fd(prog);
498 if (CHECK_FAIL(main_fd < 0))
499 goto out;
501 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
502 if (CHECK_FAIL(!prog_array))
503 goto out;
505 map_fd = bpf_map__fd(prog_array);
506 if (CHECK_FAIL(map_fd < 0))
507 goto out;
509 /* nop -> jmp */
510 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
511 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
513 prog = bpf_object__find_program_by_title(obj, prog_name);
514 if (CHECK_FAIL(!prog))
515 goto out;
517 prog_fd = bpf_program__fd(prog);
518 if (CHECK_FAIL(prog_fd < 0))
519 goto out;
521 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
522 if (CHECK_FAIL(err))
523 goto out;
526 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
527 0, &retval, &duration);
528 CHECK(err || retval != 1, "tailcall",
529 "err %d errno %d retval %d\n", err, errno, retval);
531 /* jmp -> nop, call subprog that will do tailcall */
532 i = 1;
533 err = bpf_map_delete_elem(map_fd, &i);
534 if (CHECK_FAIL(err))
535 goto out;
537 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
538 0, &retval, &duration);
539 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
540 err, errno, retval);
542 /* make sure that subprog can access ctx and entry prog that
543 * called this subprog can properly return
545 i = 0;
546 err = bpf_map_delete_elem(map_fd, &i);
547 if (CHECK_FAIL(err))
548 goto out;
550 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
551 0, &retval, &duration);
552 CHECK(err || retval != sizeof(pkt_v4) * 2,
553 "tailcall", "err %d errno %d retval %d\n",
554 err, errno, retval);
555 out:
556 bpf_object__close(obj);
559 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
560 * enforcement matches with expectations when tailcall is preceded with
561 * bpf2bpf call.
563 static void test_tailcall_bpf2bpf_2(void)
565 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
566 struct bpf_map *prog_array, *data_map;
567 struct bpf_program *prog;
568 struct bpf_object *obj;
569 __u32 retval, duration;
570 char buff[128] = {};
572 err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
573 &obj, &prog_fd);
574 if (CHECK_FAIL(err))
575 return;
577 prog = bpf_object__find_program_by_title(obj, "classifier");
578 if (CHECK_FAIL(!prog))
579 goto out;
581 main_fd = bpf_program__fd(prog);
582 if (CHECK_FAIL(main_fd < 0))
583 goto out;
585 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
586 if (CHECK_FAIL(!prog_array))
587 goto out;
589 map_fd = bpf_map__fd(prog_array);
590 if (CHECK_FAIL(map_fd < 0))
591 goto out;
593 prog = bpf_object__find_program_by_title(obj, "classifier/0");
594 if (CHECK_FAIL(!prog))
595 goto out;
597 prog_fd = bpf_program__fd(prog);
598 if (CHECK_FAIL(prog_fd < 0))
599 goto out;
601 i = 0;
602 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
603 if (CHECK_FAIL(err))
604 goto out;
606 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
607 &duration, &retval, NULL);
608 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
609 err, errno, retval);
611 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
612 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
613 return;
615 data_fd = bpf_map__fd(data_map);
616 if (CHECK_FAIL(map_fd < 0))
617 return;
619 i = 0;
620 err = bpf_map_lookup_elem(data_fd, &i, &val);
621 CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
622 err, errno, val);
624 i = 0;
625 err = bpf_map_delete_elem(map_fd, &i);
626 if (CHECK_FAIL(err))
627 goto out;
629 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
630 &duration, &retval, NULL);
631 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
632 err, errno, retval);
633 out:
634 bpf_object__close(obj);
637 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
638 * 256 bytes) can be used within bpf subprograms that have the tailcalls
639 * in them
641 static void test_tailcall_bpf2bpf_3(void)
643 int err, map_fd, prog_fd, main_fd, i;
644 struct bpf_map *prog_array;
645 struct bpf_program *prog;
646 struct bpf_object *obj;
647 __u32 retval, duration;
648 char prog_name[32];
650 err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
651 &obj, &prog_fd);
652 if (CHECK_FAIL(err))
653 return;
655 prog = bpf_object__find_program_by_title(obj, "classifier");
656 if (CHECK_FAIL(!prog))
657 goto out;
659 main_fd = bpf_program__fd(prog);
660 if (CHECK_FAIL(main_fd < 0))
661 goto out;
663 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
664 if (CHECK_FAIL(!prog_array))
665 goto out;
667 map_fd = bpf_map__fd(prog_array);
668 if (CHECK_FAIL(map_fd < 0))
669 goto out;
671 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
672 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
674 prog = bpf_object__find_program_by_title(obj, prog_name);
675 if (CHECK_FAIL(!prog))
676 goto out;
678 prog_fd = bpf_program__fd(prog);
679 if (CHECK_FAIL(prog_fd < 0))
680 goto out;
682 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
683 if (CHECK_FAIL(err))
684 goto out;
687 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
688 &duration, &retval, NULL);
689 CHECK(err || retval != sizeof(pkt_v4) * 3,
690 "tailcall", "err %d errno %d retval %d\n",
691 err, errno, retval);
693 i = 1;
694 err = bpf_map_delete_elem(map_fd, &i);
695 if (CHECK_FAIL(err))
696 goto out;
698 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
699 &duration, &retval, NULL);
700 CHECK(err || retval != sizeof(pkt_v4),
701 "tailcall", "err %d errno %d retval %d\n",
702 err, errno, retval);
704 i = 0;
705 err = bpf_map_delete_elem(map_fd, &i);
706 if (CHECK_FAIL(err))
707 goto out;
709 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
710 &duration, &retval, NULL);
711 CHECK(err || retval != sizeof(pkt_v4) * 2,
712 "tailcall", "err %d errno %d retval %d\n",
713 err, errno, retval);
714 out:
715 bpf_object__close(obj);
718 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
719 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
720 * counter behaves correctly, bpf program will go through following flow:
722 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
723 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
724 * subprog2 [here bump global counter] --------^
726 * We go through first two tailcalls and start counting from the subprog2 where
727 * the loop begins. At the end of the test make sure that the global counter is
728 * equal to 31, because tailcall counter includes the first two tailcalls
729 * whereas global counter is incremented only on loop presented on flow above.
731 static void test_tailcall_bpf2bpf_4(void)
733 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
734 struct bpf_map *prog_array, *data_map;
735 struct bpf_program *prog;
736 struct bpf_object *obj;
737 __u32 retval, duration;
738 char prog_name[32];
740 err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
741 &obj, &prog_fd);
742 if (CHECK_FAIL(err))
743 return;
745 prog = bpf_object__find_program_by_title(obj, "classifier");
746 if (CHECK_FAIL(!prog))
747 goto out;
749 main_fd = bpf_program__fd(prog);
750 if (CHECK_FAIL(main_fd < 0))
751 goto out;
753 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
754 if (CHECK_FAIL(!prog_array))
755 goto out;
757 map_fd = bpf_map__fd(prog_array);
758 if (CHECK_FAIL(map_fd < 0))
759 goto out;
761 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
762 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
764 prog = bpf_object__find_program_by_title(obj, prog_name);
765 if (CHECK_FAIL(!prog))
766 goto out;
768 prog_fd = bpf_program__fd(prog);
769 if (CHECK_FAIL(prog_fd < 0))
770 goto out;
772 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
773 if (CHECK_FAIL(err))
774 goto out;
777 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
778 &duration, &retval, NULL);
779 CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
780 err, errno, retval);
782 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
783 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
784 return;
786 data_fd = bpf_map__fd(data_map);
787 if (CHECK_FAIL(map_fd < 0))
788 return;
790 i = 0;
791 err = bpf_map_lookup_elem(data_fd, &i, &val);
792 CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
793 err, errno, val);
795 out:
796 bpf_object__close(obj);
799 void test_tailcalls(void)
801 if (test__start_subtest("tailcall_1"))
802 test_tailcall_1();
803 if (test__start_subtest("tailcall_2"))
804 test_tailcall_2();
805 if (test__start_subtest("tailcall_3"))
806 test_tailcall_3();
807 if (test__start_subtest("tailcall_4"))
808 test_tailcall_4();
809 if (test__start_subtest("tailcall_5"))
810 test_tailcall_5();
811 if (test__start_subtest("tailcall_bpf2bpf_1"))
812 test_tailcall_bpf2bpf_1();
813 if (test__start_subtest("tailcall_bpf2bpf_2"))
814 test_tailcall_bpf2bpf_2();
815 if (test__start_subtest("tailcall_bpf2bpf_3"))
816 test_tailcall_bpf2bpf_3();
817 if (test__start_subtest("tailcall_bpf2bpf_4"))
818 test_tailcall_bpf2bpf_4();