2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <linux/bpf.h>
14 #include <linux/unistd.h>
16 #include <linux/filter.h>
20 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
24 struct bpf_insn insns
[MAX_INSNS
];
33 static struct bpf_test tests
[] = {
37 BPF_MOV64_IMM(BPF_REG_1
, 1),
38 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 2),
39 BPF_MOV64_IMM(BPF_REG_2
, 3),
40 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_2
),
41 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
42 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 3),
43 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
54 .errstr
= "unreachable",
60 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
61 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
64 .errstr
= "unreachable",
70 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
73 .errstr
= "jump out of range",
79 BPF_JMP_IMM(BPF_JA
, 0, 0, -2),
82 .errstr
= "jump out of range",
88 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
89 BPF_LD_IMM64(BPF_REG_0
, 0),
90 BPF_LD_IMM64(BPF_REG_0
, 0),
91 BPF_LD_IMM64(BPF_REG_0
, 1),
92 BPF_LD_IMM64(BPF_REG_0
, 1),
93 BPF_MOV64_IMM(BPF_REG_0
, 2),
96 .errstr
= "invalid BPF_LD_IMM insn",
102 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
103 BPF_LD_IMM64(BPF_REG_0
, 0),
104 BPF_LD_IMM64(BPF_REG_0
, 0),
105 BPF_LD_IMM64(BPF_REG_0
, 1),
106 BPF_LD_IMM64(BPF_REG_0
, 1),
109 .errstr
= "invalid BPF_LD_IMM insn",
115 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
116 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
117 BPF_LD_IMM64(BPF_REG_0
, 0),
118 BPF_LD_IMM64(BPF_REG_0
, 0),
119 BPF_LD_IMM64(BPF_REG_0
, 1),
120 BPF_LD_IMM64(BPF_REG_0
, 1),
123 .errstr
= "invalid bpf_ld_imm64 insn",
129 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
132 .errstr
= "invalid bpf_ld_imm64 insn",
138 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
140 .errstr
= "invalid bpf_ld_imm64 insn",
146 BPF_ALU64_REG(BPF_MOV
, BPF_REG_0
, BPF_REG_2
),
148 .errstr
= "jump out of range",
154 BPF_JMP_IMM(BPF_JA
, 0, 0, -1),
157 .errstr
= "back-edge",
163 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
164 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
165 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
166 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
169 .errstr
= "back-edge",
175 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
176 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
177 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
178 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
181 .errstr
= "back-edge",
185 "read uninitialized register",
187 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
190 .errstr
= "R2 !read_ok",
194 "read invalid register",
196 BPF_MOV64_REG(BPF_REG_0
, -1),
199 .errstr
= "R15 is invalid",
203 "program doesn't init R0 before exit",
205 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_1
),
208 .errstr
= "R0 !read_ok",
212 "program doesn't init R0 before exit in all branches",
214 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
215 BPF_MOV64_IMM(BPF_REG_0
, 1),
216 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
219 .errstr
= "R0 !read_ok",
223 "stack out of bounds",
225 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, 8, 0),
228 .errstr
= "invalid stack",
232 "invalid call insn1",
234 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
| BPF_X
, 0, 0, 0, 0),
237 .errstr
= "BPF_CALL uses reserved",
241 "invalid call insn2",
243 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 1, 0),
246 .errstr
= "BPF_CALL uses reserved",
250 "invalid function call",
252 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, 1234567),
255 .errstr
= "invalid func 1234567",
259 "uninitialized stack1",
261 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
262 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
263 BPF_LD_MAP_FD(BPF_REG_1
, 0),
264 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
268 .errstr
= "invalid indirect read from stack",
272 "uninitialized stack2",
274 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
275 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -8),
278 .errstr
= "invalid read from stack",
282 "check valid spill/fill",
284 /* spill R1(ctx) into stack */
285 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
287 /* fill it back into R2 */
288 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
290 /* should be able to access R0 = *(R2 + 8) */
291 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, 8),
297 "check corrupted spill/fill",
299 /* spill R1(ctx) into stack */
300 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
302 /* mess up with R1 pointer on stack */
303 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -7, 0x23),
305 /* fill back into R0 should fail */
306 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
310 .errstr
= "corrupted spill",
314 "invalid src register in STX",
316 BPF_STX_MEM(BPF_B
, BPF_REG_10
, -1, -1),
319 .errstr
= "R15 is invalid",
323 "invalid dst register in STX",
325 BPF_STX_MEM(BPF_B
, 14, BPF_REG_10
, -1),
328 .errstr
= "R14 is invalid",
332 "invalid dst register in ST",
334 BPF_ST_MEM(BPF_B
, 14, -1, -1),
337 .errstr
= "R14 is invalid",
341 "invalid src register in LDX",
343 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, 12, 0),
346 .errstr
= "R12 is invalid",
350 "invalid dst register in LDX",
352 BPF_LDX_MEM(BPF_B
, 11, BPF_REG_1
, 0),
355 .errstr
= "R11 is invalid",
361 BPF_RAW_INSN(0, 0, 0, 0, 0),
364 .errstr
= "invalid BPF_LD_IMM",
370 BPF_RAW_INSN(1, 0, 0, 0, 0),
373 .errstr
= "BPF_LDX uses reserved fields",
379 BPF_RAW_INSN(-1, 0, 0, 0, 0),
382 .errstr
= "invalid BPF_ALU opcode f0",
388 BPF_RAW_INSN(-1, -1, -1, -1, -1),
391 .errstr
= "invalid BPF_ALU opcode f0",
397 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
400 .errstr
= "BPF_ALU uses reserved fields",
404 "misaligned read from stack",
406 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
407 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -4),
410 .errstr
= "misaligned access",
414 "invalid map_fd for function call",
416 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
417 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_10
),
418 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
419 BPF_LD_MAP_FD(BPF_REG_1
, 0),
420 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_delete_elem
),
423 .errstr
= "fd 0 is not pointing to valid bpf_map",
427 "don't check return value before access",
429 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
430 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
431 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
432 BPF_LD_MAP_FD(BPF_REG_1
, 0),
433 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
434 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
438 .errstr
= "R0 invalid mem access 'map_value_or_null'",
442 "access memory with incorrect alignment",
444 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
445 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
446 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
447 BPF_LD_MAP_FD(BPF_REG_1
, 0),
448 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
449 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
450 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 4, 0),
454 .errstr
= "misaligned access",
458 "sometimes access memory with incorrect alignment",
460 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
461 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
462 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
463 BPF_LD_MAP_FD(BPF_REG_1
, 0),
464 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
465 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
466 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
468 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 1),
472 .errstr
= "R0 invalid mem access",
478 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
479 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -8),
480 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
481 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
482 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 1),
483 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 1),
484 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 1),
485 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 2),
486 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 1),
487 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 3),
488 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 1),
489 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 4),
490 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
491 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 5),
492 BPF_MOV64_IMM(BPF_REG_0
, 0),
500 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
501 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
502 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
503 BPF_JMP_IMM(BPF_JA
, 0, 0, 14),
504 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 2),
505 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
506 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
507 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 2),
508 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
509 BPF_JMP_IMM(BPF_JA
, 0, 0, 8),
510 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 2),
511 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
512 BPF_JMP_IMM(BPF_JA
, 0, 0, 5),
513 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 2),
514 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
515 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
516 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
517 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
518 BPF_MOV64_IMM(BPF_REG_0
, 0),
526 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
527 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
528 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
529 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
530 BPF_JMP_IMM(BPF_JA
, 0, 0, 19),
531 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 3),
532 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
533 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
534 BPF_JMP_IMM(BPF_JA
, 0, 0, 15),
535 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 3),
536 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
537 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -32),
538 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
539 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 3),
540 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
541 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -40),
542 BPF_JMP_IMM(BPF_JA
, 0, 0, 7),
543 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 3),
544 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
545 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -48),
546 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
547 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 0),
548 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
549 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -56),
550 BPF_LD_MAP_FD(BPF_REG_1
, 0),
551 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_delete_elem
),
560 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
561 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
562 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
563 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
564 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
565 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
566 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
567 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
568 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
569 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
570 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
571 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
572 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
573 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
574 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
575 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
576 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
577 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
578 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
579 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
580 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
581 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
582 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
583 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
584 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
585 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
586 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
587 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
588 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
589 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
590 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
591 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
592 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
593 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
594 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
595 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
596 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
597 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
598 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
599 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
600 BPF_MOV64_IMM(BPF_REG_0
, 0),
608 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
609 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
610 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
611 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
612 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
613 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
614 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
615 BPF_MOV64_IMM(BPF_REG_0
, 0),
616 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
617 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
618 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
619 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
620 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
621 BPF_MOV64_IMM(BPF_REG_0
, 0),
622 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
623 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
624 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
625 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
626 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
627 BPF_MOV64_IMM(BPF_REG_0
, 0),
628 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
629 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
630 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
631 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
632 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
633 BPF_MOV64_IMM(BPF_REG_0
, 0),
634 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
635 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
636 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
637 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
638 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
639 BPF_MOV64_IMM(BPF_REG_0
, 0),
646 static int probe_filter_length(struct bpf_insn
*fp
)
650 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
651 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
657 static int create_map(void)
659 long long key
, value
= 0;
662 map_fd
= bpf_create_map(BPF_MAP_TYPE_HASH
, sizeof(key
), sizeof(value
), 1024);
664 printf("failed to create map '%s'\n", strerror(errno
));
670 static int test(void)
672 int prog_fd
, i
, pass_cnt
= 0, err_cnt
= 0;
674 for (i
= 0; i
< ARRAY_SIZE(tests
); i
++) {
675 struct bpf_insn
*prog
= tests
[i
].insns
;
676 int prog_len
= probe_filter_length(prog
);
677 int *fixup
= tests
[i
].fixup
;
681 map_fd
= create_map();
684 prog
[*fixup
].imm
= map_fd
;
688 printf("#%d %s ", i
, tests
[i
].descr
);
690 prog_fd
= bpf_prog_load(BPF_PROG_TYPE_UNSPEC
, prog
,
691 prog_len
* sizeof(struct bpf_insn
),
694 if (tests
[i
].result
== ACCEPT
) {
696 printf("FAIL\nfailed to load prog '%s'\n",
698 printf("%s", bpf_log_buf
);
704 printf("FAIL\nunexpected success to load\n");
705 printf("%s", bpf_log_buf
);
709 if (strstr(bpf_log_buf
, tests
[i
].errstr
) == 0) {
710 printf("FAIL\nunexpected error message: %s",
725 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt
, err_cnt
);