2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 * Copyright (c) 2017 Facebook
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
13 #include <asm/types.h>
14 #include <linux/types.h>
26 #include <sys/capability.h>
27 #include <sys/resource.h>
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
33 #include <linux/if_ether.h>
38 # include "autoconf.h"
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
45 #include "../../../include/linux/filter.h"
48 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
54 #define POINTER_VALUE 0xcafe4all
55 #define TEST_DATA_LEN 64
57 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
58 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
62 struct bpf_insn insns
[MAX_INSNS
];
63 int fixup_map1
[MAX_FIXUPS
];
64 int fixup_map2
[MAX_FIXUPS
];
65 int fixup_prog
[MAX_FIXUPS
];
66 int fixup_map_in_map
[MAX_FIXUPS
];
68 const char *errstr_unpriv
;
74 } result
, result_unpriv
;
75 enum bpf_prog_type prog_type
;
79 /* Note we want this to be 64 bit aligned so that the end of our array is
80 * actually the end of the structure.
82 #define MAX_ENTRIES 11
89 static struct bpf_test tests
[] = {
93 BPF_MOV64_IMM(BPF_REG_1
, 1),
94 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 2),
95 BPF_MOV64_IMM(BPF_REG_2
, 3),
96 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_2
),
97 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
98 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 3),
99 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
106 "DIV32 by 0, zero check 1",
108 BPF_MOV32_IMM(BPF_REG_0
, 42),
109 BPF_MOV32_IMM(BPF_REG_1
, 0),
110 BPF_MOV32_IMM(BPF_REG_2
, 1),
111 BPF_ALU32_REG(BPF_DIV
, BPF_REG_2
, BPF_REG_1
),
118 "DIV32 by 0, zero check 2",
120 BPF_MOV32_IMM(BPF_REG_0
, 42),
121 BPF_LD_IMM64(BPF_REG_1
, 0xffffffff00000000LL
),
122 BPF_MOV32_IMM(BPF_REG_2
, 1),
123 BPF_ALU32_REG(BPF_DIV
, BPF_REG_2
, BPF_REG_1
),
130 "DIV64 by 0, zero check",
132 BPF_MOV32_IMM(BPF_REG_0
, 42),
133 BPF_MOV32_IMM(BPF_REG_1
, 0),
134 BPF_MOV32_IMM(BPF_REG_2
, 1),
135 BPF_ALU64_REG(BPF_DIV
, BPF_REG_2
, BPF_REG_1
),
142 "MOD32 by 0, zero check 1",
144 BPF_MOV32_IMM(BPF_REG_0
, 42),
145 BPF_MOV32_IMM(BPF_REG_1
, 0),
146 BPF_MOV32_IMM(BPF_REG_2
, 1),
147 BPF_ALU32_REG(BPF_MOD
, BPF_REG_2
, BPF_REG_1
),
154 "MOD32 by 0, zero check 2",
156 BPF_MOV32_IMM(BPF_REG_0
, 42),
157 BPF_LD_IMM64(BPF_REG_1
, 0xffffffff00000000LL
),
158 BPF_MOV32_IMM(BPF_REG_2
, 1),
159 BPF_ALU32_REG(BPF_MOD
, BPF_REG_2
, BPF_REG_1
),
166 "MOD64 by 0, zero check",
168 BPF_MOV32_IMM(BPF_REG_0
, 42),
169 BPF_MOV32_IMM(BPF_REG_1
, 0),
170 BPF_MOV32_IMM(BPF_REG_2
, 1),
171 BPF_ALU64_REG(BPF_MOD
, BPF_REG_2
, BPF_REG_1
),
178 "DIV32 by 0, zero check ok, cls",
180 BPF_MOV32_IMM(BPF_REG_0
, 42),
181 BPF_MOV32_IMM(BPF_REG_1
, 2),
182 BPF_MOV32_IMM(BPF_REG_2
, 16),
183 BPF_ALU32_REG(BPF_DIV
, BPF_REG_2
, BPF_REG_1
),
184 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
187 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
192 "DIV32 by 0, zero check 1, cls",
194 BPF_MOV32_IMM(BPF_REG_1
, 0),
195 BPF_MOV32_IMM(BPF_REG_0
, 1),
196 BPF_ALU32_REG(BPF_DIV
, BPF_REG_0
, BPF_REG_1
),
199 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
204 "DIV32 by 0, zero check 2, cls",
206 BPF_LD_IMM64(BPF_REG_1
, 0xffffffff00000000LL
),
207 BPF_MOV32_IMM(BPF_REG_0
, 1),
208 BPF_ALU32_REG(BPF_DIV
, BPF_REG_0
, BPF_REG_1
),
211 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
216 "DIV64 by 0, zero check, cls",
218 BPF_MOV32_IMM(BPF_REG_1
, 0),
219 BPF_MOV32_IMM(BPF_REG_0
, 1),
220 BPF_ALU64_REG(BPF_DIV
, BPF_REG_0
, BPF_REG_1
),
223 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
228 "MOD32 by 0, zero check ok, cls",
230 BPF_MOV32_IMM(BPF_REG_0
, 42),
231 BPF_MOV32_IMM(BPF_REG_1
, 3),
232 BPF_MOV32_IMM(BPF_REG_2
, 5),
233 BPF_ALU32_REG(BPF_MOD
, BPF_REG_2
, BPF_REG_1
),
234 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
237 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
242 "MOD32 by 0, zero check 1, cls",
244 BPF_MOV32_IMM(BPF_REG_1
, 0),
245 BPF_MOV32_IMM(BPF_REG_0
, 1),
246 BPF_ALU32_REG(BPF_MOD
, BPF_REG_0
, BPF_REG_1
),
249 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
254 "MOD32 by 0, zero check 2, cls",
256 BPF_LD_IMM64(BPF_REG_1
, 0xffffffff00000000LL
),
257 BPF_MOV32_IMM(BPF_REG_0
, 1),
258 BPF_ALU32_REG(BPF_MOD
, BPF_REG_0
, BPF_REG_1
),
261 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
266 "MOD64 by 0, zero check 1, cls",
268 BPF_MOV32_IMM(BPF_REG_1
, 0),
269 BPF_MOV32_IMM(BPF_REG_0
, 2),
270 BPF_ALU64_REG(BPF_MOD
, BPF_REG_0
, BPF_REG_1
),
273 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
278 "MOD64 by 0, zero check 2, cls",
280 BPF_MOV32_IMM(BPF_REG_1
, 0),
281 BPF_MOV32_IMM(BPF_REG_0
, -1),
282 BPF_ALU64_REG(BPF_MOD
, BPF_REG_0
, BPF_REG_1
),
285 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
289 /* Just make sure that JITs used udiv/umod as otherwise we get
290 * an exception from INT_MIN/-1 overflow similarly as with div
294 "DIV32 overflow, check 1",
296 BPF_MOV32_IMM(BPF_REG_1
, -1),
297 BPF_MOV32_IMM(BPF_REG_0
, INT_MIN
),
298 BPF_ALU32_REG(BPF_DIV
, BPF_REG_0
, BPF_REG_1
),
301 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
306 "DIV32 overflow, check 2",
308 BPF_MOV32_IMM(BPF_REG_0
, INT_MIN
),
309 BPF_ALU32_IMM(BPF_DIV
, BPF_REG_0
, -1),
312 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
317 "DIV64 overflow, check 1",
319 BPF_MOV64_IMM(BPF_REG_1
, -1),
320 BPF_LD_IMM64(BPF_REG_0
, LLONG_MIN
),
321 BPF_ALU64_REG(BPF_DIV
, BPF_REG_0
, BPF_REG_1
),
324 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
329 "DIV64 overflow, check 2",
331 BPF_LD_IMM64(BPF_REG_0
, LLONG_MIN
),
332 BPF_ALU64_IMM(BPF_DIV
, BPF_REG_0
, -1),
335 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
340 "MOD32 overflow, check 1",
342 BPF_MOV32_IMM(BPF_REG_1
, -1),
343 BPF_MOV32_IMM(BPF_REG_0
, INT_MIN
),
344 BPF_ALU32_REG(BPF_MOD
, BPF_REG_0
, BPF_REG_1
),
347 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
352 "MOD32 overflow, check 2",
354 BPF_MOV32_IMM(BPF_REG_0
, INT_MIN
),
355 BPF_ALU32_IMM(BPF_MOD
, BPF_REG_0
, -1),
358 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
363 "MOD64 overflow, check 1",
365 BPF_MOV64_IMM(BPF_REG_1
, -1),
366 BPF_LD_IMM64(BPF_REG_2
, LLONG_MIN
),
367 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
368 BPF_ALU64_REG(BPF_MOD
, BPF_REG_2
, BPF_REG_1
),
369 BPF_MOV32_IMM(BPF_REG_0
, 0),
370 BPF_JMP_REG(BPF_JNE
, BPF_REG_3
, BPF_REG_2
, 1),
371 BPF_MOV32_IMM(BPF_REG_0
, 1),
374 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
379 "MOD64 overflow, check 2",
381 BPF_LD_IMM64(BPF_REG_2
, LLONG_MIN
),
382 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
383 BPF_ALU64_IMM(BPF_MOD
, BPF_REG_2
, -1),
384 BPF_MOV32_IMM(BPF_REG_0
, 0),
385 BPF_JMP_REG(BPF_JNE
, BPF_REG_3
, BPF_REG_2
, 1),
386 BPF_MOV32_IMM(BPF_REG_0
, 1),
389 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
394 "xor32 zero extend check",
396 BPF_MOV32_IMM(BPF_REG_2
, -1),
397 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 32),
398 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 0xffff),
399 BPF_ALU32_REG(BPF_XOR
, BPF_REG_2
, BPF_REG_2
),
400 BPF_MOV32_IMM(BPF_REG_0
, 2),
401 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 0, 1),
402 BPF_MOV32_IMM(BPF_REG_0
, 1),
405 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
413 .errstr
= "unknown opcode 00",
421 .errstr
= "R0 !read_ok",
430 .errstr
= "unreachable",
436 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
437 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
440 .errstr
= "unreachable",
446 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
449 .errstr
= "jump out of range",
453 "out of range jump2",
455 BPF_JMP_IMM(BPF_JA
, 0, 0, -2),
458 .errstr
= "jump out of range",
464 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
465 BPF_LD_IMM64(BPF_REG_0
, 0),
466 BPF_LD_IMM64(BPF_REG_0
, 0),
467 BPF_LD_IMM64(BPF_REG_0
, 1),
468 BPF_LD_IMM64(BPF_REG_0
, 1),
469 BPF_MOV64_IMM(BPF_REG_0
, 2),
472 .errstr
= "invalid BPF_LD_IMM insn",
473 .errstr_unpriv
= "R1 pointer comparison",
479 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
480 BPF_LD_IMM64(BPF_REG_0
, 0),
481 BPF_LD_IMM64(BPF_REG_0
, 0),
482 BPF_LD_IMM64(BPF_REG_0
, 1),
483 BPF_LD_IMM64(BPF_REG_0
, 1),
486 .errstr
= "invalid BPF_LD_IMM insn",
487 .errstr_unpriv
= "R1 pointer comparison",
493 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
494 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
495 BPF_LD_IMM64(BPF_REG_0
, 0),
496 BPF_LD_IMM64(BPF_REG_0
, 0),
497 BPF_LD_IMM64(BPF_REG_0
, 1),
498 BPF_LD_IMM64(BPF_REG_0
, 1),
501 .errstr
= "invalid bpf_ld_imm64 insn",
507 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
510 .errstr
= "invalid bpf_ld_imm64 insn",
516 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
518 .errstr
= "invalid bpf_ld_imm64 insn",
524 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
525 BPF_RAW_INSN(0, 0, 0, 0, 0),
533 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
534 BPF_RAW_INSN(0, 0, 0, 0, 1),
543 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 1, 1),
544 BPF_RAW_INSN(0, 0, 0, 0, 1),
547 .errstr
= "uses reserved fields",
553 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
554 BPF_RAW_INSN(0, 0, 0, 1, 1),
557 .errstr
= "invalid bpf_ld_imm64 insn",
563 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
564 BPF_RAW_INSN(0, BPF_REG_1
, 0, 0, 1),
567 .errstr
= "invalid bpf_ld_imm64 insn",
573 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
574 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
577 .errstr
= "invalid bpf_ld_imm64 insn",
583 BPF_MOV64_IMM(BPF_REG_1
, 0),
584 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
585 BPF_RAW_INSN(0, 0, 0, 0, 1),
588 .errstr
= "not pointing to valid bpf_map",
594 BPF_MOV64_IMM(BPF_REG_1
, 0),
595 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
596 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
599 .errstr
= "invalid bpf_ld_imm64 insn",
605 BPF_MOV64_IMM(BPF_REG_0
, 1),
606 BPF_ALU32_IMM(BPF_ARSH
, BPF_REG_0
, 5),
610 .errstr
= "unknown opcode c4",
615 BPF_MOV64_IMM(BPF_REG_0
, 1),
616 BPF_MOV64_IMM(BPF_REG_1
, 5),
617 BPF_ALU32_REG(BPF_ARSH
, BPF_REG_0
, BPF_REG_1
),
621 .errstr
= "unknown opcode cc",
626 BPF_MOV64_IMM(BPF_REG_0
, 1),
627 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_0
, 5),
635 BPF_MOV64_IMM(BPF_REG_0
, 1),
636 BPF_MOV64_IMM(BPF_REG_1
, 5),
637 BPF_ALU64_REG(BPF_ARSH
, BPF_REG_0
, BPF_REG_1
),
645 BPF_ALU64_REG(BPF_MOV
, BPF_REG_0
, BPF_REG_2
),
647 .errstr
= "not an exit",
653 BPF_JMP_IMM(BPF_JA
, 0, 0, -1),
656 .errstr
= "back-edge",
662 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
663 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
664 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
665 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
668 .errstr
= "back-edge",
674 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
675 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
676 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
677 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
680 .errstr
= "back-edge",
684 "read uninitialized register",
686 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
689 .errstr
= "R2 !read_ok",
693 "read invalid register",
695 BPF_MOV64_REG(BPF_REG_0
, -1),
698 .errstr
= "R15 is invalid",
702 "program doesn't init R0 before exit",
704 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_1
),
707 .errstr
= "R0 !read_ok",
711 "program doesn't init R0 before exit in all branches",
713 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
714 BPF_MOV64_IMM(BPF_REG_0
, 1),
715 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
718 .errstr
= "R0 !read_ok",
719 .errstr_unpriv
= "R1 pointer comparison",
723 "stack out of bounds",
725 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, 8, 0),
728 .errstr
= "invalid stack",
732 "invalid call insn1",
734 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
| BPF_X
, 0, 0, 0, 0),
737 .errstr
= "unknown opcode 8d",
741 "invalid call insn2",
743 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 1, 0),
746 .errstr
= "BPF_CALL uses reserved",
750 "invalid function call",
752 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, 1234567),
755 .errstr
= "invalid func unknown#1234567",
759 "uninitialized stack1",
761 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
762 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
763 BPF_LD_MAP_FD(BPF_REG_1
, 0),
764 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
765 BPF_FUNC_map_lookup_elem
),
769 .errstr
= "invalid indirect read from stack",
773 "uninitialized stack2",
775 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
776 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -8),
779 .errstr
= "invalid read from stack",
783 "invalid fp arithmetic",
784 /* If this gets ever changed, make sure JITs can deal with it. */
786 BPF_MOV64_IMM(BPF_REG_0
, 0),
787 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
788 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 8),
789 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
792 .errstr
= "R1 subtraction from stack pointer",
796 "non-invalid fp arithmetic",
798 BPF_MOV64_IMM(BPF_REG_0
, 0),
799 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
805 "invalid argument register",
807 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
808 BPF_FUNC_get_cgroup_classid
),
809 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
810 BPF_FUNC_get_cgroup_classid
),
813 .errstr
= "R1 !read_ok",
815 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
818 "non-invalid argument register",
820 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
821 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
822 BPF_FUNC_get_cgroup_classid
),
823 BPF_ALU64_REG(BPF_MOV
, BPF_REG_1
, BPF_REG_6
),
824 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
825 BPF_FUNC_get_cgroup_classid
),
829 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
832 "check valid spill/fill",
834 /* spill R1(ctx) into stack */
835 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
836 /* fill it back into R2 */
837 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
838 /* should be able to access R0 = *(R2 + 8) */
839 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
840 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
843 .errstr_unpriv
= "R0 leaks addr",
845 .result_unpriv
= REJECT
,
846 .retval
= POINTER_VALUE
,
849 "check valid spill/fill, skb mark",
851 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
852 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
853 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
854 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
855 offsetof(struct __sk_buff
, mark
)),
859 .result_unpriv
= ACCEPT
,
862 "check corrupted spill/fill",
864 /* spill R1(ctx) into stack */
865 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
866 /* mess up with R1 pointer on stack */
867 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -7, 0x23),
868 /* fill back into R0 should fail */
869 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
872 .errstr_unpriv
= "attempt to corrupt spilled",
873 .errstr
= "corrupted spill",
877 "invalid src register in STX",
879 BPF_STX_MEM(BPF_B
, BPF_REG_10
, -1, -1),
882 .errstr
= "R15 is invalid",
886 "invalid dst register in STX",
888 BPF_STX_MEM(BPF_B
, 14, BPF_REG_10
, -1),
891 .errstr
= "R14 is invalid",
895 "invalid dst register in ST",
897 BPF_ST_MEM(BPF_B
, 14, -1, -1),
900 .errstr
= "R14 is invalid",
904 "invalid src register in LDX",
906 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, 12, 0),
909 .errstr
= "R12 is invalid",
913 "invalid dst register in LDX",
915 BPF_LDX_MEM(BPF_B
, 11, BPF_REG_1
, 0),
918 .errstr
= "R11 is invalid",
924 BPF_RAW_INSN(0, 0, 0, 0, 0),
927 .errstr
= "unknown opcode 00",
933 BPF_RAW_INSN(1, 0, 0, 0, 0),
936 .errstr
= "BPF_LDX uses reserved fields",
942 BPF_RAW_INSN(-1, 0, 0, 0, 0),
945 .errstr
= "unknown opcode ff",
951 BPF_RAW_INSN(-1, -1, -1, -1, -1),
954 .errstr
= "unknown opcode ff",
960 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
963 .errstr
= "BPF_ALU uses reserved fields",
967 "misaligned read from stack",
969 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
970 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -4),
973 .errstr
= "misaligned stack access",
977 "invalid map_fd for function call",
979 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
980 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_10
),
981 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
982 BPF_LD_MAP_FD(BPF_REG_1
, 0),
983 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
984 BPF_FUNC_map_delete_elem
),
987 .errstr
= "fd 0 is not pointing to valid bpf_map",
991 "don't check return value before access",
993 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
994 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
995 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
996 BPF_LD_MAP_FD(BPF_REG_1
, 0),
997 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
998 BPF_FUNC_map_lookup_elem
),
999 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
1002 .fixup_map1
= { 3 },
1003 .errstr
= "R0 invalid mem access 'map_value_or_null'",
1007 "access memory with incorrect alignment",
1009 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1010 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1011 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1012 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1013 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1014 BPF_FUNC_map_lookup_elem
),
1015 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
1016 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 4, 0),
1019 .fixup_map1
= { 3 },
1020 .errstr
= "misaligned value access",
1022 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1025 "sometimes access memory with incorrect alignment",
1027 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1028 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1029 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1030 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1031 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1032 BPF_FUNC_map_lookup_elem
),
1033 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
1034 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
1036 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 1),
1039 .fixup_map1
= { 3 },
1040 .errstr
= "R0 invalid mem access",
1041 .errstr_unpriv
= "R0 leaks addr",
1043 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1048 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1049 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -8),
1050 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
1051 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
1052 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 1),
1053 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 1),
1054 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 1),
1055 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 2),
1056 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 1),
1057 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 3),
1058 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 1),
1059 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 4),
1060 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
1061 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 5),
1062 BPF_MOV64_IMM(BPF_REG_0
, 0),
1065 .errstr_unpriv
= "R1 pointer comparison",
1066 .result_unpriv
= REJECT
,
1072 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1073 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
1074 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
1075 BPF_JMP_IMM(BPF_JA
, 0, 0, 14),
1076 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 2),
1077 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
1078 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
1079 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 2),
1080 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
1081 BPF_JMP_IMM(BPF_JA
, 0, 0, 8),
1082 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 2),
1083 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
1084 BPF_JMP_IMM(BPF_JA
, 0, 0, 5),
1085 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 2),
1086 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
1087 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
1088 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
1089 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
1090 BPF_MOV64_IMM(BPF_REG_0
, 0),
1093 .errstr_unpriv
= "R1 pointer comparison",
1094 .result_unpriv
= REJECT
,
1100 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1101 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
1102 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
1103 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1104 BPF_JMP_IMM(BPF_JA
, 0, 0, 19),
1105 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 3),
1106 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
1107 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
1108 BPF_JMP_IMM(BPF_JA
, 0, 0, 15),
1109 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 3),
1110 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
1111 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -32),
1112 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
1113 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 3),
1114 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
1115 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -40),
1116 BPF_JMP_IMM(BPF_JA
, 0, 0, 7),
1117 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 3),
1118 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
1119 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -48),
1120 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
1121 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 0),
1122 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
1123 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -56),
1124 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1125 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1126 BPF_FUNC_map_delete_elem
),
1129 .fixup_map1
= { 24 },
1130 .errstr_unpriv
= "R1 pointer comparison",
1131 .result_unpriv
= REJECT
,
1138 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
1139 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
1140 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
1141 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
1142 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
1143 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
1144 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
1145 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
1146 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
1147 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
1148 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
1149 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
1150 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
1151 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
1152 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
1153 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
1154 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
1155 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
1156 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
1157 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
1158 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
1159 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
1160 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
1161 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
1162 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
1163 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
1164 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
1165 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
1166 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
1167 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
1168 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
1169 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
1170 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
1171 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
1172 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
1173 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
1174 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
1175 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
1176 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
1177 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
1178 BPF_MOV64_IMM(BPF_REG_0
, 0),
1181 .errstr_unpriv
= "R1 pointer comparison",
1182 .result_unpriv
= REJECT
,
1188 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1189 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
1190 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
1191 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
1192 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
1193 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
1194 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
1195 BPF_MOV64_IMM(BPF_REG_0
, 0),
1196 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
1197 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
1198 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
1199 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
1200 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
1201 BPF_MOV64_IMM(BPF_REG_0
, 0),
1202 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
1203 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
1204 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
1205 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
1206 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
1207 BPF_MOV64_IMM(BPF_REG_0
, 0),
1208 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
1209 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
1210 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
1211 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
1212 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
1213 BPF_MOV64_IMM(BPF_REG_0
, 0),
1214 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
1215 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
1216 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
1217 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
1218 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
1219 BPF_MOV64_IMM(BPF_REG_0
, 0),
1222 .errstr_unpriv
= "R1 pointer comparison",
1223 .result_unpriv
= REJECT
,
1227 "access skb fields ok",
1229 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1230 offsetof(struct __sk_buff
, len
)),
1231 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1232 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1233 offsetof(struct __sk_buff
, mark
)),
1234 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1235 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1236 offsetof(struct __sk_buff
, pkt_type
)),
1237 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1238 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1239 offsetof(struct __sk_buff
, queue_mapping
)),
1240 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
1241 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1242 offsetof(struct __sk_buff
, protocol
)),
1243 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
1244 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1245 offsetof(struct __sk_buff
, vlan_present
)),
1246 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
1247 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1248 offsetof(struct __sk_buff
, vlan_tci
)),
1249 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
1250 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1251 offsetof(struct __sk_buff
, napi_id
)),
1252 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
1258 "access skb fields bad1",
1260 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -4),
1263 .errstr
= "invalid bpf_context access",
1267 "access skb fields bad2",
1269 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 9),
1270 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1271 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1272 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1273 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1274 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1275 BPF_FUNC_map_lookup_elem
),
1276 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
1278 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
1279 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1280 offsetof(struct __sk_buff
, pkt_type
)),
1283 .fixup_map1
= { 4 },
1284 .errstr
= "different pointers",
1285 .errstr_unpriv
= "R1 pointer comparison",
1289 "access skb fields bad3",
1291 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
1292 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1293 offsetof(struct __sk_buff
, pkt_type
)),
1295 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1296 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1297 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1298 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1299 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1300 BPF_FUNC_map_lookup_elem
),
1301 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
1303 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
1304 BPF_JMP_IMM(BPF_JA
, 0, 0, -12),
1306 .fixup_map1
= { 6 },
1307 .errstr
= "different pointers",
1308 .errstr_unpriv
= "R1 pointer comparison",
1312 "access skb fields bad4",
1314 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 3),
1315 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1316 offsetof(struct __sk_buff
, len
)),
1317 BPF_MOV64_IMM(BPF_REG_0
, 0),
1319 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1320 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1321 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1322 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1323 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1324 BPF_FUNC_map_lookup_elem
),
1325 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
1327 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
1328 BPF_JMP_IMM(BPF_JA
, 0, 0, -13),
1330 .fixup_map1
= { 7 },
1331 .errstr
= "different pointers",
1332 .errstr_unpriv
= "R1 pointer comparison",
1336 "invalid access __sk_buff family",
1338 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1339 offsetof(struct __sk_buff
, family
)),
1342 .errstr
= "invalid bpf_context access",
1346 "invalid access __sk_buff remote_ip4",
1348 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1349 offsetof(struct __sk_buff
, remote_ip4
)),
1352 .errstr
= "invalid bpf_context access",
1356 "invalid access __sk_buff local_ip4",
1358 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1359 offsetof(struct __sk_buff
, local_ip4
)),
1362 .errstr
= "invalid bpf_context access",
1366 "invalid access __sk_buff remote_ip6",
1368 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1369 offsetof(struct __sk_buff
, remote_ip6
)),
1372 .errstr
= "invalid bpf_context access",
1376 "invalid access __sk_buff local_ip6",
1378 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1379 offsetof(struct __sk_buff
, local_ip6
)),
1382 .errstr
= "invalid bpf_context access",
1386 "invalid access __sk_buff remote_port",
1388 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1389 offsetof(struct __sk_buff
, remote_port
)),
1392 .errstr
= "invalid bpf_context access",
1396 "invalid access __sk_buff remote_port",
1398 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1399 offsetof(struct __sk_buff
, local_port
)),
1402 .errstr
= "invalid bpf_context access",
1406 "valid access __sk_buff family",
1408 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1409 offsetof(struct __sk_buff
, family
)),
1413 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1416 "valid access __sk_buff remote_ip4",
1418 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1419 offsetof(struct __sk_buff
, remote_ip4
)),
1423 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1426 "valid access __sk_buff local_ip4",
1428 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1429 offsetof(struct __sk_buff
, local_ip4
)),
1433 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1436 "valid access __sk_buff remote_ip6",
1438 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1439 offsetof(struct __sk_buff
, remote_ip6
[0])),
1440 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1441 offsetof(struct __sk_buff
, remote_ip6
[1])),
1442 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1443 offsetof(struct __sk_buff
, remote_ip6
[2])),
1444 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1445 offsetof(struct __sk_buff
, remote_ip6
[3])),
1449 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1452 "valid access __sk_buff local_ip6",
1454 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1455 offsetof(struct __sk_buff
, local_ip6
[0])),
1456 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1457 offsetof(struct __sk_buff
, local_ip6
[1])),
1458 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1459 offsetof(struct __sk_buff
, local_ip6
[2])),
1460 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1461 offsetof(struct __sk_buff
, local_ip6
[3])),
1465 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1468 "valid access __sk_buff remote_port",
1470 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1471 offsetof(struct __sk_buff
, remote_port
)),
1475 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1478 "valid access __sk_buff remote_port",
1480 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1481 offsetof(struct __sk_buff
, local_port
)),
1485 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1488 "invalid access of tc_classid for SK_SKB",
1490 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1491 offsetof(struct __sk_buff
, tc_classid
)),
1495 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1496 .errstr
= "invalid bpf_context access",
1499 "invalid access of skb->mark for SK_SKB",
1501 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1502 offsetof(struct __sk_buff
, mark
)),
1506 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1507 .errstr
= "invalid bpf_context access",
1510 "check skb->mark is not writeable by SK_SKB",
1512 BPF_MOV64_IMM(BPF_REG_0
, 0),
1513 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1514 offsetof(struct __sk_buff
, mark
)),
1518 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1519 .errstr
= "invalid bpf_context access",
1522 "check skb->tc_index is writeable by SK_SKB",
1524 BPF_MOV64_IMM(BPF_REG_0
, 0),
1525 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1526 offsetof(struct __sk_buff
, tc_index
)),
1530 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1533 "check skb->priority is writeable by SK_SKB",
1535 BPF_MOV64_IMM(BPF_REG_0
, 0),
1536 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1537 offsetof(struct __sk_buff
, priority
)),
1541 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1544 "direct packet read for SK_SKB",
1546 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1547 offsetof(struct __sk_buff
, data
)),
1548 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1549 offsetof(struct __sk_buff
, data_end
)),
1550 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1551 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1552 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1553 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
1554 BPF_MOV64_IMM(BPF_REG_0
, 0),
1558 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1561 "direct packet write for SK_SKB",
1563 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1564 offsetof(struct __sk_buff
, data
)),
1565 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1566 offsetof(struct __sk_buff
, data_end
)),
1567 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1568 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1569 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1570 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
1571 BPF_MOV64_IMM(BPF_REG_0
, 0),
1575 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1578 "overlapping checks for direct packet access SK_SKB",
1580 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1581 offsetof(struct __sk_buff
, data
)),
1582 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1583 offsetof(struct __sk_buff
, data_end
)),
1584 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1585 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1586 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 4),
1587 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
1588 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 6),
1589 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
1590 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_2
, 6),
1591 BPF_MOV64_IMM(BPF_REG_0
, 0),
1595 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1598 "check skb->mark is not writeable by sockets",
1600 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1601 offsetof(struct __sk_buff
, mark
)),
1604 .errstr
= "invalid bpf_context access",
1605 .errstr_unpriv
= "R1 leaks addr",
1609 "check skb->tc_index is not writeable by sockets",
1611 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1612 offsetof(struct __sk_buff
, tc_index
)),
1615 .errstr
= "invalid bpf_context access",
1616 .errstr_unpriv
= "R1 leaks addr",
1620 "check cb access: byte",
1622 BPF_MOV64_IMM(BPF_REG_0
, 0),
1623 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1624 offsetof(struct __sk_buff
, cb
[0])),
1625 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1626 offsetof(struct __sk_buff
, cb
[0]) + 1),
1627 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1628 offsetof(struct __sk_buff
, cb
[0]) + 2),
1629 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1630 offsetof(struct __sk_buff
, cb
[0]) + 3),
1631 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1632 offsetof(struct __sk_buff
, cb
[1])),
1633 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1634 offsetof(struct __sk_buff
, cb
[1]) + 1),
1635 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1636 offsetof(struct __sk_buff
, cb
[1]) + 2),
1637 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1638 offsetof(struct __sk_buff
, cb
[1]) + 3),
1639 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1640 offsetof(struct __sk_buff
, cb
[2])),
1641 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1642 offsetof(struct __sk_buff
, cb
[2]) + 1),
1643 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1644 offsetof(struct __sk_buff
, cb
[2]) + 2),
1645 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1646 offsetof(struct __sk_buff
, cb
[2]) + 3),
1647 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1648 offsetof(struct __sk_buff
, cb
[3])),
1649 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1650 offsetof(struct __sk_buff
, cb
[3]) + 1),
1651 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1652 offsetof(struct __sk_buff
, cb
[3]) + 2),
1653 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1654 offsetof(struct __sk_buff
, cb
[3]) + 3),
1655 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1656 offsetof(struct __sk_buff
, cb
[4])),
1657 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1658 offsetof(struct __sk_buff
, cb
[4]) + 1),
1659 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1660 offsetof(struct __sk_buff
, cb
[4]) + 2),
1661 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1662 offsetof(struct __sk_buff
, cb
[4]) + 3),
1663 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1664 offsetof(struct __sk_buff
, cb
[0])),
1665 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1666 offsetof(struct __sk_buff
, cb
[0]) + 1),
1667 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1668 offsetof(struct __sk_buff
, cb
[0]) + 2),
1669 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1670 offsetof(struct __sk_buff
, cb
[0]) + 3),
1671 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1672 offsetof(struct __sk_buff
, cb
[1])),
1673 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1674 offsetof(struct __sk_buff
, cb
[1]) + 1),
1675 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1676 offsetof(struct __sk_buff
, cb
[1]) + 2),
1677 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1678 offsetof(struct __sk_buff
, cb
[1]) + 3),
1679 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1680 offsetof(struct __sk_buff
, cb
[2])),
1681 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1682 offsetof(struct __sk_buff
, cb
[2]) + 1),
1683 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1684 offsetof(struct __sk_buff
, cb
[2]) + 2),
1685 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1686 offsetof(struct __sk_buff
, cb
[2]) + 3),
1687 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1688 offsetof(struct __sk_buff
, cb
[3])),
1689 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1690 offsetof(struct __sk_buff
, cb
[3]) + 1),
1691 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1692 offsetof(struct __sk_buff
, cb
[3]) + 2),
1693 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1694 offsetof(struct __sk_buff
, cb
[3]) + 3),
1695 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1696 offsetof(struct __sk_buff
, cb
[4])),
1697 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1698 offsetof(struct __sk_buff
, cb
[4]) + 1),
1699 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1700 offsetof(struct __sk_buff
, cb
[4]) + 2),
1701 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1702 offsetof(struct __sk_buff
, cb
[4]) + 3),
1708 "__sk_buff->hash, offset 0, byte store not permitted",
1710 BPF_MOV64_IMM(BPF_REG_0
, 0),
1711 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1712 offsetof(struct __sk_buff
, hash
)),
1715 .errstr
= "invalid bpf_context access",
1719 "__sk_buff->tc_index, offset 3, byte store not permitted",
1721 BPF_MOV64_IMM(BPF_REG_0
, 0),
1722 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1723 offsetof(struct __sk_buff
, tc_index
) + 3),
1726 .errstr
= "invalid bpf_context access",
1730 "check skb->hash byte load permitted",
1732 BPF_MOV64_IMM(BPF_REG_0
, 0),
1733 #if __BYTE_ORDER == __LITTLE_ENDIAN
1734 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1735 offsetof(struct __sk_buff
, hash
)),
1737 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1738 offsetof(struct __sk_buff
, hash
) + 3),
1745 "check skb->hash byte load not permitted 1",
1747 BPF_MOV64_IMM(BPF_REG_0
, 0),
1748 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1749 offsetof(struct __sk_buff
, hash
) + 1),
1752 .errstr
= "invalid bpf_context access",
1756 "check skb->hash byte load not permitted 2",
1758 BPF_MOV64_IMM(BPF_REG_0
, 0),
1759 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1760 offsetof(struct __sk_buff
, hash
) + 2),
1763 .errstr
= "invalid bpf_context access",
1767 "check skb->hash byte load not permitted 3",
1769 BPF_MOV64_IMM(BPF_REG_0
, 0),
1770 #if __BYTE_ORDER == __LITTLE_ENDIAN
1771 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1772 offsetof(struct __sk_buff
, hash
) + 3),
1774 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1775 offsetof(struct __sk_buff
, hash
)),
1779 .errstr
= "invalid bpf_context access",
1783 "check cb access: byte, wrong type",
1785 BPF_MOV64_IMM(BPF_REG_0
, 0),
1786 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1787 offsetof(struct __sk_buff
, cb
[0])),
1790 .errstr
= "invalid bpf_context access",
1792 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1795 "check cb access: half",
1797 BPF_MOV64_IMM(BPF_REG_0
, 0),
1798 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1799 offsetof(struct __sk_buff
, cb
[0])),
1800 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1801 offsetof(struct __sk_buff
, cb
[0]) + 2),
1802 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1803 offsetof(struct __sk_buff
, cb
[1])),
1804 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1805 offsetof(struct __sk_buff
, cb
[1]) + 2),
1806 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1807 offsetof(struct __sk_buff
, cb
[2])),
1808 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1809 offsetof(struct __sk_buff
, cb
[2]) + 2),
1810 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1811 offsetof(struct __sk_buff
, cb
[3])),
1812 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1813 offsetof(struct __sk_buff
, cb
[3]) + 2),
1814 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1815 offsetof(struct __sk_buff
, cb
[4])),
1816 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1817 offsetof(struct __sk_buff
, cb
[4]) + 2),
1818 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1819 offsetof(struct __sk_buff
, cb
[0])),
1820 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1821 offsetof(struct __sk_buff
, cb
[0]) + 2),
1822 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1823 offsetof(struct __sk_buff
, cb
[1])),
1824 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1825 offsetof(struct __sk_buff
, cb
[1]) + 2),
1826 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1827 offsetof(struct __sk_buff
, cb
[2])),
1828 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1829 offsetof(struct __sk_buff
, cb
[2]) + 2),
1830 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1831 offsetof(struct __sk_buff
, cb
[3])),
1832 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1833 offsetof(struct __sk_buff
, cb
[3]) + 2),
1834 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1835 offsetof(struct __sk_buff
, cb
[4])),
1836 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1837 offsetof(struct __sk_buff
, cb
[4]) + 2),
1843 "check cb access: half, unaligned",
1845 BPF_MOV64_IMM(BPF_REG_0
, 0),
1846 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1847 offsetof(struct __sk_buff
, cb
[0]) + 1),
1850 .errstr
= "misaligned context access",
1852 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1855 "check __sk_buff->hash, offset 0, half store not permitted",
1857 BPF_MOV64_IMM(BPF_REG_0
, 0),
1858 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1859 offsetof(struct __sk_buff
, hash
)),
1862 .errstr
= "invalid bpf_context access",
1866 "check __sk_buff->tc_index, offset 2, half store not permitted",
1868 BPF_MOV64_IMM(BPF_REG_0
, 0),
1869 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1870 offsetof(struct __sk_buff
, tc_index
) + 2),
1873 .errstr
= "invalid bpf_context access",
1877 "check skb->hash half load permitted",
1879 BPF_MOV64_IMM(BPF_REG_0
, 0),
1880 #if __BYTE_ORDER == __LITTLE_ENDIAN
1881 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1882 offsetof(struct __sk_buff
, hash
)),
1884 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1885 offsetof(struct __sk_buff
, hash
) + 2),
1892 "check skb->hash half load not permitted",
1894 BPF_MOV64_IMM(BPF_REG_0
, 0),
1895 #if __BYTE_ORDER == __LITTLE_ENDIAN
1896 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1897 offsetof(struct __sk_buff
, hash
) + 2),
1899 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1900 offsetof(struct __sk_buff
, hash
)),
1904 .errstr
= "invalid bpf_context access",
1908 "check cb access: half, wrong type",
1910 BPF_MOV64_IMM(BPF_REG_0
, 0),
1911 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1912 offsetof(struct __sk_buff
, cb
[0])),
1915 .errstr
= "invalid bpf_context access",
1917 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1920 "check cb access: word",
1922 BPF_MOV64_IMM(BPF_REG_0
, 0),
1923 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1924 offsetof(struct __sk_buff
, cb
[0])),
1925 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1926 offsetof(struct __sk_buff
, cb
[1])),
1927 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1928 offsetof(struct __sk_buff
, cb
[2])),
1929 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1930 offsetof(struct __sk_buff
, cb
[3])),
1931 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1932 offsetof(struct __sk_buff
, cb
[4])),
1933 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1934 offsetof(struct __sk_buff
, cb
[0])),
1935 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1936 offsetof(struct __sk_buff
, cb
[1])),
1937 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1938 offsetof(struct __sk_buff
, cb
[2])),
1939 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1940 offsetof(struct __sk_buff
, cb
[3])),
1941 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1942 offsetof(struct __sk_buff
, cb
[4])),
1948 "check cb access: word, unaligned 1",
1950 BPF_MOV64_IMM(BPF_REG_0
, 0),
1951 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1952 offsetof(struct __sk_buff
, cb
[0]) + 2),
1955 .errstr
= "misaligned context access",
1957 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1960 "check cb access: word, unaligned 2",
1962 BPF_MOV64_IMM(BPF_REG_0
, 0),
1963 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1964 offsetof(struct __sk_buff
, cb
[4]) + 1),
1967 .errstr
= "misaligned context access",
1969 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1972 "check cb access: word, unaligned 3",
1974 BPF_MOV64_IMM(BPF_REG_0
, 0),
1975 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1976 offsetof(struct __sk_buff
, cb
[4]) + 2),
1979 .errstr
= "misaligned context access",
1981 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1984 "check cb access: word, unaligned 4",
1986 BPF_MOV64_IMM(BPF_REG_0
, 0),
1987 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1988 offsetof(struct __sk_buff
, cb
[4]) + 3),
1991 .errstr
= "misaligned context access",
1993 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1996 "check cb access: double",
1998 BPF_MOV64_IMM(BPF_REG_0
, 0),
1999 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
2000 offsetof(struct __sk_buff
, cb
[0])),
2001 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
2002 offsetof(struct __sk_buff
, cb
[2])),
2003 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
2004 offsetof(struct __sk_buff
, cb
[0])),
2005 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
2006 offsetof(struct __sk_buff
, cb
[2])),
2012 "check cb access: double, unaligned 1",
2014 BPF_MOV64_IMM(BPF_REG_0
, 0),
2015 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
2016 offsetof(struct __sk_buff
, cb
[1])),
2019 .errstr
= "misaligned context access",
2021 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
2024 "check cb access: double, unaligned 2",
2026 BPF_MOV64_IMM(BPF_REG_0
, 0),
2027 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
2028 offsetof(struct __sk_buff
, cb
[3])),
2031 .errstr
= "misaligned context access",
2033 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
2036 "check cb access: double, oob 1",
2038 BPF_MOV64_IMM(BPF_REG_0
, 0),
2039 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
2040 offsetof(struct __sk_buff
, cb
[4])),
2043 .errstr
= "invalid bpf_context access",
2047 "check cb access: double, oob 2",
2049 BPF_MOV64_IMM(BPF_REG_0
, 0),
2050 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
2051 offsetof(struct __sk_buff
, cb
[4])),
2054 .errstr
= "invalid bpf_context access",
2058 "check __sk_buff->ifindex dw store not permitted",
2060 BPF_MOV64_IMM(BPF_REG_0
, 0),
2061 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
2062 offsetof(struct __sk_buff
, ifindex
)),
2065 .errstr
= "invalid bpf_context access",
2069 "check __sk_buff->ifindex dw load not permitted",
2071 BPF_MOV64_IMM(BPF_REG_0
, 0),
2072 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
2073 offsetof(struct __sk_buff
, ifindex
)),
2076 .errstr
= "invalid bpf_context access",
2080 "check cb access: double, wrong type",
2082 BPF_MOV64_IMM(BPF_REG_0
, 0),
2083 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
2084 offsetof(struct __sk_buff
, cb
[0])),
2087 .errstr
= "invalid bpf_context access",
2089 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
2092 "check out of range skb->cb access",
2094 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
2095 offsetof(struct __sk_buff
, cb
[0]) + 256),
2098 .errstr
= "invalid bpf_context access",
2099 .errstr_unpriv
= "",
2101 .prog_type
= BPF_PROG_TYPE_SCHED_ACT
,
2104 "write skb fields from socket prog",
2106 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
2107 offsetof(struct __sk_buff
, cb
[4])),
2108 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
2109 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
2110 offsetof(struct __sk_buff
, mark
)),
2111 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
2112 offsetof(struct __sk_buff
, tc_index
)),
2113 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
2114 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
2115 offsetof(struct __sk_buff
, cb
[0])),
2116 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
2117 offsetof(struct __sk_buff
, cb
[2])),
2121 .errstr_unpriv
= "R1 leaks addr",
2122 .result_unpriv
= REJECT
,
2125 "write skb fields from tc_cls_act prog",
2127 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
2128 offsetof(struct __sk_buff
, cb
[0])),
2129 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
2130 offsetof(struct __sk_buff
, mark
)),
2131 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
2132 offsetof(struct __sk_buff
, tc_index
)),
2133 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
2134 offsetof(struct __sk_buff
, tc_index
)),
2135 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
2136 offsetof(struct __sk_buff
, cb
[3])),
2139 .errstr_unpriv
= "",
2140 .result_unpriv
= REJECT
,
2142 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2145 "PTR_TO_STACK store/load",
2147 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2148 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
2149 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
2150 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
2154 .retval
= 0xfaceb00c,
2157 "PTR_TO_STACK store/load - bad alignment on off",
2159 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2160 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
2161 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
2162 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
2166 .errstr
= "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2169 "PTR_TO_STACK store/load - bad alignment on reg",
2171 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2172 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
2173 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
2174 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
2178 .errstr
= "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2181 "PTR_TO_STACK store/load - out of bounds low",
2183 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2184 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -80000),
2185 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
2186 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
2190 .errstr
= "invalid stack off=-79992 size=8",
2193 "PTR_TO_STACK store/load - out of bounds high",
2195 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2196 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
2197 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
2198 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
2202 .errstr
= "invalid stack off=0 size=8",
2205 "unpriv: return pointer",
2207 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
2211 .result_unpriv
= REJECT
,
2212 .errstr_unpriv
= "R0 leaks addr",
2213 .retval
= POINTER_VALUE
,
2216 "unpriv: add const to pointer",
2218 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
2219 BPF_MOV64_IMM(BPF_REG_0
, 0),
2225 "unpriv: add pointer to pointer",
2227 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
2228 BPF_MOV64_IMM(BPF_REG_0
, 0),
2232 .errstr
= "R1 pointer += pointer",
2235 "unpriv: neg pointer",
2237 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_1
, 0),
2238 BPF_MOV64_IMM(BPF_REG_0
, 0),
2242 .result_unpriv
= REJECT
,
2243 .errstr_unpriv
= "R1 pointer arithmetic",
2246 "unpriv: cmp pointer with const",
2248 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
2249 BPF_MOV64_IMM(BPF_REG_0
, 0),
2253 .result_unpriv
= REJECT
,
2254 .errstr_unpriv
= "R1 pointer comparison",
2257 "unpriv: cmp pointer with pointer",
2259 BPF_JMP_REG(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
2260 BPF_MOV64_IMM(BPF_REG_0
, 0),
2264 .result_unpriv
= REJECT
,
2265 .errstr_unpriv
= "R10 pointer comparison",
2268 "unpriv: check that printk is disallowed",
2270 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
2271 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2272 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
2273 BPF_MOV64_IMM(BPF_REG_2
, 8),
2274 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
2275 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2276 BPF_FUNC_trace_printk
),
2277 BPF_MOV64_IMM(BPF_REG_0
, 0),
2280 .errstr_unpriv
= "unknown func bpf_trace_printk#6",
2281 .result_unpriv
= REJECT
,
2285 "unpriv: pass pointer to helper function",
2287 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
2288 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2289 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2290 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2291 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
2292 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
2293 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2294 BPF_FUNC_map_update_elem
),
2295 BPF_MOV64_IMM(BPF_REG_0
, 0),
2298 .fixup_map1
= { 3 },
2299 .errstr_unpriv
= "R4 leaks addr",
2300 .result_unpriv
= REJECT
,
2304 "unpriv: indirectly pass pointer on stack to helper function",
2306 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
2307 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2308 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2309 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2310 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2311 BPF_FUNC_map_lookup_elem
),
2312 BPF_MOV64_IMM(BPF_REG_0
, 0),
2315 .fixup_map1
= { 3 },
2316 .errstr
= "invalid indirect read from stack off -8+0 size 8",
2320 "unpriv: mangle pointer on stack 1",
2322 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
2323 BPF_ST_MEM(BPF_W
, BPF_REG_10
, -8, 0),
2324 BPF_MOV64_IMM(BPF_REG_0
, 0),
2327 .errstr_unpriv
= "attempt to corrupt spilled",
2328 .result_unpriv
= REJECT
,
2332 "unpriv: mangle pointer on stack 2",
2334 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
2335 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -1, 0),
2336 BPF_MOV64_IMM(BPF_REG_0
, 0),
2339 .errstr_unpriv
= "attempt to corrupt spilled",
2340 .result_unpriv
= REJECT
,
2344 "unpriv: read pointer from stack in small chunks",
2346 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
2347 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_10
, -8),
2348 BPF_MOV64_IMM(BPF_REG_0
, 0),
2351 .errstr
= "invalid size",
2355 "unpriv: write pointer into ctx",
2357 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
, 0),
2358 BPF_MOV64_IMM(BPF_REG_0
, 0),
2361 .errstr_unpriv
= "R1 leaks addr",
2362 .result_unpriv
= REJECT
,
2363 .errstr
= "invalid bpf_context access",
2367 "unpriv: spill/fill of ctx",
2369 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2370 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2371 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2372 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2373 BPF_MOV64_IMM(BPF_REG_0
, 0),
2379 "unpriv: spill/fill of ctx 2",
2381 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2382 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2383 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2384 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2385 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2386 BPF_FUNC_get_hash_recalc
),
2387 BPF_MOV64_IMM(BPF_REG_0
, 0),
2391 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2394 "unpriv: spill/fill of ctx 3",
2396 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2397 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2398 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2399 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
2400 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2401 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2402 BPF_FUNC_get_hash_recalc
),
2406 .errstr
= "R1 type=fp expected=ctx",
2407 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2410 "unpriv: spill/fill of ctx 4",
2412 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2413 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2414 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2415 BPF_MOV64_IMM(BPF_REG_0
, 1),
2416 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_10
,
2418 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2419 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2420 BPF_FUNC_get_hash_recalc
),
2424 .errstr
= "R1 type=inv expected=ctx",
2425 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2428 "unpriv: spill/fill of different pointers stx",
2430 BPF_MOV64_IMM(BPF_REG_3
, 42),
2431 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2432 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2433 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
2434 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2435 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
2436 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
2437 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
2438 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2439 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2440 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_3
,
2441 offsetof(struct __sk_buff
, mark
)),
2442 BPF_MOV64_IMM(BPF_REG_0
, 0),
2446 .errstr
= "same insn cannot be used with different pointers",
2447 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2450 "unpriv: spill/fill of different pointers ldx",
2452 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2453 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2454 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
2455 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2456 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
,
2457 -(__s32
)offsetof(struct bpf_perf_event_data
,
2458 sample_period
) - 8),
2459 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
2460 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
2461 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2462 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2463 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
,
2464 offsetof(struct bpf_perf_event_data
,
2466 BPF_MOV64_IMM(BPF_REG_0
, 0),
2470 .errstr
= "same insn cannot be used with different pointers",
2471 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
2474 "unpriv: write pointer into map elem value",
2476 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
2477 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2478 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2479 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2480 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2481 BPF_FUNC_map_lookup_elem
),
2482 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
2483 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
2486 .fixup_map1
= { 3 },
2487 .errstr_unpriv
= "R0 leaks addr",
2488 .result_unpriv
= REJECT
,
2492 "unpriv: partial copy of pointer",
2494 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_10
),
2495 BPF_MOV64_IMM(BPF_REG_0
, 0),
2498 .errstr_unpriv
= "R10 partial copy",
2499 .result_unpriv
= REJECT
,
2503 "unpriv: pass pointer to tail_call",
2505 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
2506 BPF_LD_MAP_FD(BPF_REG_2
, 0),
2507 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2508 BPF_FUNC_tail_call
),
2509 BPF_MOV64_IMM(BPF_REG_0
, 0),
2512 .fixup_prog
= { 1 },
2513 .errstr_unpriv
= "R3 leaks addr into helper",
2514 .result_unpriv
= REJECT
,
2518 "unpriv: cmp map pointer with zero",
2520 BPF_MOV64_IMM(BPF_REG_1
, 0),
2521 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2522 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
2523 BPF_MOV64_IMM(BPF_REG_0
, 0),
2526 .fixup_map1
= { 1 },
2527 .errstr_unpriv
= "R1 pointer comparison",
2528 .result_unpriv
= REJECT
,
2532 "unpriv: write into frame pointer",
2534 BPF_MOV64_REG(BPF_REG_10
, BPF_REG_1
),
2535 BPF_MOV64_IMM(BPF_REG_0
, 0),
2538 .errstr
= "frame pointer is read only",
2542 "unpriv: spill/fill frame pointer",
2544 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2545 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2546 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
2547 BPF_LDX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, 0),
2548 BPF_MOV64_IMM(BPF_REG_0
, 0),
2551 .errstr
= "frame pointer is read only",
2555 "unpriv: cmp of frame pointer",
2557 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_10
, 0, 0),
2558 BPF_MOV64_IMM(BPF_REG_0
, 0),
2561 .errstr_unpriv
= "R10 pointer comparison",
2562 .result_unpriv
= REJECT
,
2566 "unpriv: adding of fp",
2568 BPF_MOV64_IMM(BPF_REG_0
, 0),
2569 BPF_MOV64_IMM(BPF_REG_1
, 0),
2570 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
2571 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, -8),
2577 "unpriv: cmp of stack pointer",
2579 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2580 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2581 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_2
, 0, 0),
2582 BPF_MOV64_IMM(BPF_REG_0
, 0),
2585 .errstr_unpriv
= "R2 pointer comparison",
2586 .result_unpriv
= REJECT
,
2590 "runtime/jit: pass negative index to tail_call",
2592 BPF_MOV64_IMM(BPF_REG_3
, -1),
2593 BPF_LD_MAP_FD(BPF_REG_2
, 0),
2594 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2595 BPF_FUNC_tail_call
),
2596 BPF_MOV64_IMM(BPF_REG_0
, 0),
2599 .fixup_prog
= { 1 },
2603 "runtime/jit: pass > 32bit index to tail_call",
2605 BPF_LD_IMM64(BPF_REG_3
, 0x100000000ULL
),
2606 BPF_LD_MAP_FD(BPF_REG_2
, 0),
2607 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2608 BPF_FUNC_tail_call
),
2609 BPF_MOV64_IMM(BPF_REG_0
, 0),
2612 .fixup_prog
= { 2 },
2616 "stack pointer arithmetic",
2618 BPF_MOV64_IMM(BPF_REG_1
, 4),
2619 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
2620 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_10
),
2621 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
2622 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
2623 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
2624 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_1
),
2625 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
2626 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
2627 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
2628 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
2629 BPF_MOV64_IMM(BPF_REG_0
, 0),
2635 "raw_stack: no skb_load_bytes",
2637 BPF_MOV64_IMM(BPF_REG_2
, 4),
2638 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2639 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2640 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2641 BPF_MOV64_IMM(BPF_REG_4
, 8),
2642 /* Call to skb_load_bytes() omitted. */
2643 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2647 .errstr
= "invalid read from stack off -8+0 size 8",
2648 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2651 "raw_stack: skb_load_bytes, negative len",
2653 BPF_MOV64_IMM(BPF_REG_2
, 4),
2654 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2655 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2656 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2657 BPF_MOV64_IMM(BPF_REG_4
, -8),
2658 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2659 BPF_FUNC_skb_load_bytes
),
2660 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2664 .errstr
= "R4 min value is negative",
2665 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2668 "raw_stack: skb_load_bytes, negative len 2",
2670 BPF_MOV64_IMM(BPF_REG_2
, 4),
2671 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2672 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2673 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2674 BPF_MOV64_IMM(BPF_REG_4
, ~0),
2675 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2676 BPF_FUNC_skb_load_bytes
),
2677 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2681 .errstr
= "R4 min value is negative",
2682 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2685 "raw_stack: skb_load_bytes, zero len",
2687 BPF_MOV64_IMM(BPF_REG_2
, 4),
2688 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2689 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2690 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2691 BPF_MOV64_IMM(BPF_REG_4
, 0),
2692 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2693 BPF_FUNC_skb_load_bytes
),
2694 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2698 .errstr
= "invalid stack type R3",
2699 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2702 "raw_stack: skb_load_bytes, no init",
2704 BPF_MOV64_IMM(BPF_REG_2
, 4),
2705 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2706 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2707 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2708 BPF_MOV64_IMM(BPF_REG_4
, 8),
2709 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2710 BPF_FUNC_skb_load_bytes
),
2711 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2715 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2718 "raw_stack: skb_load_bytes, init",
2720 BPF_MOV64_IMM(BPF_REG_2
, 4),
2721 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2722 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2723 BPF_ST_MEM(BPF_DW
, BPF_REG_6
, 0, 0xcafe),
2724 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2725 BPF_MOV64_IMM(BPF_REG_4
, 8),
2726 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2727 BPF_FUNC_skb_load_bytes
),
2728 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2732 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2735 "raw_stack: skb_load_bytes, spilled regs around bounds",
2737 BPF_MOV64_IMM(BPF_REG_2
, 4),
2738 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2739 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2740 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2741 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2742 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2743 BPF_MOV64_IMM(BPF_REG_4
, 8),
2744 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2745 BPF_FUNC_skb_load_bytes
),
2746 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2747 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2748 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2749 offsetof(struct __sk_buff
, mark
)),
2750 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2751 offsetof(struct __sk_buff
, priority
)),
2752 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2756 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2759 "raw_stack: skb_load_bytes, spilled regs corruption",
2761 BPF_MOV64_IMM(BPF_REG_2
, 4),
2762 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2763 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2764 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2765 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2766 BPF_MOV64_IMM(BPF_REG_4
, 8),
2767 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2768 BPF_FUNC_skb_load_bytes
),
2769 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2770 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2771 offsetof(struct __sk_buff
, mark
)),
2775 .errstr
= "R0 invalid mem access 'inv'",
2776 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2779 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2781 BPF_MOV64_IMM(BPF_REG_2
, 4),
2782 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2783 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2784 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2785 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2786 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2787 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2788 BPF_MOV64_IMM(BPF_REG_4
, 8),
2789 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2790 BPF_FUNC_skb_load_bytes
),
2791 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2792 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2793 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2794 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2795 offsetof(struct __sk_buff
, mark
)),
2796 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2797 offsetof(struct __sk_buff
, priority
)),
2798 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2799 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_3
,
2800 offsetof(struct __sk_buff
, pkt_type
)),
2801 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2805 .errstr
= "R3 invalid mem access 'inv'",
2806 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2809 "raw_stack: skb_load_bytes, spilled regs + data",
2811 BPF_MOV64_IMM(BPF_REG_2
, 4),
2812 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2813 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2814 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2815 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2816 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2817 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2818 BPF_MOV64_IMM(BPF_REG_4
, 8),
2819 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2820 BPF_FUNC_skb_load_bytes
),
2821 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2822 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2823 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2824 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2825 offsetof(struct __sk_buff
, mark
)),
2826 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2827 offsetof(struct __sk_buff
, priority
)),
2828 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2829 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2833 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2836 "raw_stack: skb_load_bytes, invalid access 1",
2838 BPF_MOV64_IMM(BPF_REG_2
, 4),
2839 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2840 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -513),
2841 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2842 BPF_MOV64_IMM(BPF_REG_4
, 8),
2843 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2844 BPF_FUNC_skb_load_bytes
),
2845 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2849 .errstr
= "invalid stack type R3 off=-513 access_size=8",
2850 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2853 "raw_stack: skb_load_bytes, invalid access 2",
2855 BPF_MOV64_IMM(BPF_REG_2
, 4),
2856 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2857 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2858 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2859 BPF_MOV64_IMM(BPF_REG_4
, 8),
2860 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2861 BPF_FUNC_skb_load_bytes
),
2862 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2866 .errstr
= "invalid stack type R3 off=-1 access_size=8",
2867 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2870 "raw_stack: skb_load_bytes, invalid access 3",
2872 BPF_MOV64_IMM(BPF_REG_2
, 4),
2873 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2874 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 0xffffffff),
2875 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2876 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
2877 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2878 BPF_FUNC_skb_load_bytes
),
2879 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2883 .errstr
= "R4 min value is negative",
2884 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2887 "raw_stack: skb_load_bytes, invalid access 4",
2889 BPF_MOV64_IMM(BPF_REG_2
, 4),
2890 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2891 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2892 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2893 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2894 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2895 BPF_FUNC_skb_load_bytes
),
2896 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2900 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2901 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2904 "raw_stack: skb_load_bytes, invalid access 5",
2906 BPF_MOV64_IMM(BPF_REG_2
, 4),
2907 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2908 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2909 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2910 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2911 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2912 BPF_FUNC_skb_load_bytes
),
2913 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2917 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2918 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2921 "raw_stack: skb_load_bytes, invalid access 6",
2923 BPF_MOV64_IMM(BPF_REG_2
, 4),
2924 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2925 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2926 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2927 BPF_MOV64_IMM(BPF_REG_4
, 0),
2928 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2929 BPF_FUNC_skb_load_bytes
),
2930 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2934 .errstr
= "invalid stack type R3 off=-512 access_size=0",
2935 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2938 "raw_stack: skb_load_bytes, large access",
2940 BPF_MOV64_IMM(BPF_REG_2
, 4),
2941 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2942 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2943 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2944 BPF_MOV64_IMM(BPF_REG_4
, 512),
2945 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2946 BPF_FUNC_skb_load_bytes
),
2947 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2951 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2954 "context stores via ST",
2956 BPF_MOV64_IMM(BPF_REG_0
, 0),
2957 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, offsetof(struct __sk_buff
, mark
), 0),
2960 .errstr
= "BPF_ST stores into R1 context is not allowed",
2962 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2965 "context stores via XADD",
2967 BPF_MOV64_IMM(BPF_REG_0
, 0),
2968 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_W
, BPF_REG_1
,
2969 BPF_REG_0
, offsetof(struct __sk_buff
, mark
), 0),
2972 .errstr
= "BPF_XADD stores into R1 context is not allowed",
2974 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2977 "direct packet access: test1",
2979 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2980 offsetof(struct __sk_buff
, data
)),
2981 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2982 offsetof(struct __sk_buff
, data_end
)),
2983 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2984 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2985 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2986 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2987 BPF_MOV64_IMM(BPF_REG_0
, 0),
2991 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2994 "direct packet access: test2",
2996 BPF_MOV64_IMM(BPF_REG_0
, 1),
2997 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
2998 offsetof(struct __sk_buff
, data_end
)),
2999 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3000 offsetof(struct __sk_buff
, data
)),
3001 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
3002 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
3003 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_4
, 15),
3004 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 7),
3005 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_3
, 12),
3006 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 14),
3007 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3008 offsetof(struct __sk_buff
, data
)),
3009 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_4
),
3010 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3011 offsetof(struct __sk_buff
, len
)),
3012 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 49),
3013 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 49),
3014 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_2
),
3015 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_3
),
3016 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
3017 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
3018 offsetof(struct __sk_buff
, data_end
)),
3019 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
3020 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_3
, 4),
3021 BPF_MOV64_IMM(BPF_REG_0
, 0),
3025 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3028 "direct packet access: test3",
3030 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3031 offsetof(struct __sk_buff
, data
)),
3032 BPF_MOV64_IMM(BPF_REG_0
, 0),
3035 .errstr
= "invalid bpf_context access off=76",
3037 .prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
,
3040 "direct packet access: test4 (write)",
3042 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3043 offsetof(struct __sk_buff
, data
)),
3044 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3045 offsetof(struct __sk_buff
, data_end
)),
3046 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3047 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3048 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3049 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3050 BPF_MOV64_IMM(BPF_REG_0
, 0),
3054 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3057 "direct packet access: test5 (pkt_end >= reg, good access)",
3059 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3060 offsetof(struct __sk_buff
, data
)),
3061 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3062 offsetof(struct __sk_buff
, data_end
)),
3063 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3064 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3065 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
3066 BPF_MOV64_IMM(BPF_REG_0
, 1),
3068 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3069 BPF_MOV64_IMM(BPF_REG_0
, 0),
3073 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3076 "direct packet access: test6 (pkt_end >= reg, bad access)",
3078 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3079 offsetof(struct __sk_buff
, data
)),
3080 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3081 offsetof(struct __sk_buff
, data_end
)),
3082 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3083 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3084 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
3085 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3086 BPF_MOV64_IMM(BPF_REG_0
, 1),
3088 BPF_MOV64_IMM(BPF_REG_0
, 0),
3091 .errstr
= "invalid access to packet",
3093 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3096 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3098 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3099 offsetof(struct __sk_buff
, data
)),
3100 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3101 offsetof(struct __sk_buff
, data_end
)),
3102 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3103 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3104 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
3105 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3106 BPF_MOV64_IMM(BPF_REG_0
, 1),
3108 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3109 BPF_MOV64_IMM(BPF_REG_0
, 0),
3112 .errstr
= "invalid access to packet",
3114 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3117 "direct packet access: test8 (double test, variant 1)",
3119 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3120 offsetof(struct __sk_buff
, data
)),
3121 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3122 offsetof(struct __sk_buff
, data_end
)),
3123 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3124 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3125 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 4),
3126 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3127 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3128 BPF_MOV64_IMM(BPF_REG_0
, 1),
3130 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3131 BPF_MOV64_IMM(BPF_REG_0
, 0),
3135 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3138 "direct packet access: test9 (double test, variant 2)",
3140 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3141 offsetof(struct __sk_buff
, data
)),
3142 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3143 offsetof(struct __sk_buff
, data_end
)),
3144 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3145 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3146 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
3147 BPF_MOV64_IMM(BPF_REG_0
, 1),
3149 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3150 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3151 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3152 BPF_MOV64_IMM(BPF_REG_0
, 0),
3156 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3159 "direct packet access: test10 (write invalid)",
3161 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3162 offsetof(struct __sk_buff
, data
)),
3163 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3164 offsetof(struct __sk_buff
, data_end
)),
3165 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3166 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3167 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
3168 BPF_MOV64_IMM(BPF_REG_0
, 0),
3170 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3171 BPF_MOV64_IMM(BPF_REG_0
, 0),
3174 .errstr
= "invalid access to packet",
3176 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3179 "direct packet access: test11 (shift, good access)",
3181 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3182 offsetof(struct __sk_buff
, data
)),
3183 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3184 offsetof(struct __sk_buff
, data_end
)),
3185 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3186 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
3187 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
3188 BPF_MOV64_IMM(BPF_REG_3
, 144),
3189 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
3190 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
3191 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 3),
3192 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
3193 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
3194 BPF_MOV64_IMM(BPF_REG_0
, 1),
3196 BPF_MOV64_IMM(BPF_REG_0
, 0),
3200 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3204 "direct packet access: test12 (and, good access)",
3206 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3207 offsetof(struct __sk_buff
, data
)),
3208 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3209 offsetof(struct __sk_buff
, data_end
)),
3210 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3211 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
3212 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
3213 BPF_MOV64_IMM(BPF_REG_3
, 144),
3214 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
3215 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
3216 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
3217 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
3218 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
3219 BPF_MOV64_IMM(BPF_REG_0
, 1),
3221 BPF_MOV64_IMM(BPF_REG_0
, 0),
3225 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3229 "direct packet access: test13 (branches, good access)",
3231 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3232 offsetof(struct __sk_buff
, data
)),
3233 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3234 offsetof(struct __sk_buff
, data_end
)),
3235 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3236 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
3237 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 13),
3238 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3239 offsetof(struct __sk_buff
, mark
)),
3240 BPF_MOV64_IMM(BPF_REG_4
, 1),
3241 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_4
, 2),
3242 BPF_MOV64_IMM(BPF_REG_3
, 14),
3243 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
3244 BPF_MOV64_IMM(BPF_REG_3
, 24),
3245 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
3246 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
3247 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
3248 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
3249 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
3250 BPF_MOV64_IMM(BPF_REG_0
, 1),
3252 BPF_MOV64_IMM(BPF_REG_0
, 0),
3256 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3260 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3262 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3263 offsetof(struct __sk_buff
, data
)),
3264 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3265 offsetof(struct __sk_buff
, data_end
)),
3266 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3267 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
3268 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 7),
3269 BPF_MOV64_IMM(BPF_REG_5
, 12),
3270 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 4),
3271 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
3272 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
3273 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_6
, 0),
3274 BPF_MOV64_IMM(BPF_REG_0
, 1),
3276 BPF_MOV64_IMM(BPF_REG_0
, 0),
3280 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3284 "direct packet access: test15 (spill with xadd)",
3286 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3287 offsetof(struct __sk_buff
, data
)),
3288 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3289 offsetof(struct __sk_buff
, data_end
)),
3290 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3291 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3292 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
3293 BPF_MOV64_IMM(BPF_REG_5
, 4096),
3294 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
3295 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
3296 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
3297 BPF_STX_XADD(BPF_DW
, BPF_REG_4
, BPF_REG_5
, 0),
3298 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_4
, 0),
3299 BPF_STX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_5
, 0),
3300 BPF_MOV64_IMM(BPF_REG_0
, 0),
3303 .errstr
= "R2 invalid mem access 'inv'",
3305 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3308 "direct packet access: test16 (arith on data_end)",
3310 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3311 offsetof(struct __sk_buff
, data
)),
3312 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3313 offsetof(struct __sk_buff
, data_end
)),
3314 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3315 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3316 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 16),
3317 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3318 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3319 BPF_MOV64_IMM(BPF_REG_0
, 0),
3322 .errstr
= "R3 pointer arithmetic on PTR_TO_PACKET_END",
3324 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3327 "direct packet access: test17 (pruning, alignment)",
3329 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3330 offsetof(struct __sk_buff
, data
)),
3331 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3332 offsetof(struct __sk_buff
, data_end
)),
3333 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3334 offsetof(struct __sk_buff
, mark
)),
3335 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3336 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 14),
3337 BPF_JMP_IMM(BPF_JGT
, BPF_REG_7
, 1, 4),
3338 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3339 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
, -4),
3340 BPF_MOV64_IMM(BPF_REG_0
, 0),
3342 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 1),
3345 .errstr
= "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3347 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3348 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
3351 "direct packet access: test18 (imm += pkt_ptr, 1)",
3353 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3354 offsetof(struct __sk_buff
, data
)),
3355 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3356 offsetof(struct __sk_buff
, data_end
)),
3357 BPF_MOV64_IMM(BPF_REG_0
, 8),
3358 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3359 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3360 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3361 BPF_MOV64_IMM(BPF_REG_0
, 0),
3365 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3368 "direct packet access: test19 (imm += pkt_ptr, 2)",
3370 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3371 offsetof(struct __sk_buff
, data
)),
3372 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3373 offsetof(struct __sk_buff
, data_end
)),
3374 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3375 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3376 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
3377 BPF_MOV64_IMM(BPF_REG_4
, 4),
3378 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3379 BPF_STX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_4
, 0),
3380 BPF_MOV64_IMM(BPF_REG_0
, 0),
3384 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3387 "direct packet access: test20 (x += pkt_ptr, 1)",
3389 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3390 offsetof(struct __sk_buff
, data
)),
3391 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3392 offsetof(struct __sk_buff
, data_end
)),
3393 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3394 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3395 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3396 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0x7fff),
3397 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3398 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3399 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3400 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
3401 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
3402 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
3403 BPF_MOV64_IMM(BPF_REG_0
, 0),
3406 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3410 "direct packet access: test21 (x += pkt_ptr, 2)",
3412 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3413 offsetof(struct __sk_buff
, data
)),
3414 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3415 offsetof(struct __sk_buff
, data_end
)),
3416 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3417 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3418 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 9),
3419 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
3420 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
3421 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
3422 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, 0x7fff),
3423 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3424 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3425 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
3426 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
3427 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
3428 BPF_MOV64_IMM(BPF_REG_0
, 0),
3431 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3435 "direct packet access: test22 (x += pkt_ptr, 3)",
3437 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3438 offsetof(struct __sk_buff
, data
)),
3439 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3440 offsetof(struct __sk_buff
, data_end
)),
3441 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3442 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3443 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -8),
3444 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_3
, -16),
3445 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_10
, -16),
3446 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 11),
3447 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
3448 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
3449 BPF_STX_XADD(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
3450 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
3451 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 49),
3452 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3453 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
3454 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
3455 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
3456 BPF_MOV64_IMM(BPF_REG_2
, 1),
3457 BPF_STX_MEM(BPF_H
, BPF_REG_4
, BPF_REG_2
, 0),
3458 BPF_MOV64_IMM(BPF_REG_0
, 0),
3461 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3465 "direct packet access: test23 (x += pkt_ptr, 4)",
3467 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3468 offsetof(struct __sk_buff
, data
)),
3469 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3470 offsetof(struct __sk_buff
, data_end
)),
3471 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3472 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3473 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3474 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xffff),
3475 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3476 BPF_MOV64_IMM(BPF_REG_0
, 31),
3477 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
3478 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3479 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
3480 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0xffff - 1),
3481 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3482 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
3483 BPF_MOV64_IMM(BPF_REG_0
, 0),
3486 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3488 .errstr
= "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3491 "direct packet access: test24 (x += pkt_ptr, 5)",
3493 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3494 offsetof(struct __sk_buff
, data
)),
3495 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3496 offsetof(struct __sk_buff
, data_end
)),
3497 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3498 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3499 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3500 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xff),
3501 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3502 BPF_MOV64_IMM(BPF_REG_0
, 64),
3503 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
3504 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3505 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
3506 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x7fff - 1),
3507 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3508 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
3509 BPF_MOV64_IMM(BPF_REG_0
, 0),
3512 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3516 "direct packet access: test25 (marking on <, good access)",
3518 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3519 offsetof(struct __sk_buff
, data
)),
3520 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3521 offsetof(struct __sk_buff
, data_end
)),
3522 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3523 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3524 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 2),
3525 BPF_MOV64_IMM(BPF_REG_0
, 0),
3527 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3528 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
3531 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3534 "direct packet access: test26 (marking on <, bad access)",
3536 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3537 offsetof(struct __sk_buff
, data
)),
3538 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3539 offsetof(struct __sk_buff
, data_end
)),
3540 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3541 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3542 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 3),
3543 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3544 BPF_MOV64_IMM(BPF_REG_0
, 0),
3546 BPF_JMP_IMM(BPF_JA
, 0, 0, -3),
3549 .errstr
= "invalid access to packet",
3550 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3553 "direct packet access: test27 (marking on <=, good access)",
3555 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3556 offsetof(struct __sk_buff
, data
)),
3557 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3558 offsetof(struct __sk_buff
, data_end
)),
3559 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3560 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3561 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 1),
3562 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3563 BPF_MOV64_IMM(BPF_REG_0
, 1),
3567 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3571 "direct packet access: test28 (marking on <=, bad access)",
3573 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3574 offsetof(struct __sk_buff
, data
)),
3575 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3576 offsetof(struct __sk_buff
, data_end
)),
3577 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3578 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3579 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 2),
3580 BPF_MOV64_IMM(BPF_REG_0
, 1),
3582 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3583 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
3586 .errstr
= "invalid access to packet",
3587 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3590 "helper access to packet: test1, valid packet_ptr range",
3592 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3593 offsetof(struct xdp_md
, data
)),
3594 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3595 offsetof(struct xdp_md
, data_end
)),
3596 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3597 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
3598 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
3599 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3600 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
3601 BPF_MOV64_IMM(BPF_REG_4
, 0),
3602 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3603 BPF_FUNC_map_update_elem
),
3604 BPF_MOV64_IMM(BPF_REG_0
, 0),
3607 .fixup_map1
= { 5 },
3608 .result_unpriv
= ACCEPT
,
3610 .prog_type
= BPF_PROG_TYPE_XDP
,
3613 "helper access to packet: test2, unchecked packet_ptr",
3615 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3616 offsetof(struct xdp_md
, data
)),
3617 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3618 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3619 BPF_FUNC_map_lookup_elem
),
3620 BPF_MOV64_IMM(BPF_REG_0
, 0),
3623 .fixup_map1
= { 1 },
3625 .errstr
= "invalid access to packet",
3626 .prog_type
= BPF_PROG_TYPE_XDP
,
3629 "helper access to packet: test3, variable add",
3631 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3632 offsetof(struct xdp_md
, data
)),
3633 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3634 offsetof(struct xdp_md
, data_end
)),
3635 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3636 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
3637 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
3638 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
3639 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3640 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
3641 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3642 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
3643 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
3644 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3645 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
3646 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3647 BPF_FUNC_map_lookup_elem
),
3648 BPF_MOV64_IMM(BPF_REG_0
, 0),
3651 .fixup_map1
= { 11 },
3653 .prog_type
= BPF_PROG_TYPE_XDP
,
3656 "helper access to packet: test4, packet_ptr with bad range",
3658 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3659 offsetof(struct xdp_md
, data
)),
3660 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3661 offsetof(struct xdp_md
, data_end
)),
3662 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3663 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
3664 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
3665 BPF_MOV64_IMM(BPF_REG_0
, 0),
3667 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3668 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3669 BPF_FUNC_map_lookup_elem
),
3670 BPF_MOV64_IMM(BPF_REG_0
, 0),
3673 .fixup_map1
= { 7 },
3675 .errstr
= "invalid access to packet",
3676 .prog_type
= BPF_PROG_TYPE_XDP
,
3679 "helper access to packet: test5, packet_ptr with too short range",
3681 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3682 offsetof(struct xdp_md
, data
)),
3683 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3684 offsetof(struct xdp_md
, data_end
)),
3685 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3686 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3687 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3688 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3689 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3690 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3691 BPF_FUNC_map_lookup_elem
),
3692 BPF_MOV64_IMM(BPF_REG_0
, 0),
3695 .fixup_map1
= { 6 },
3697 .errstr
= "invalid access to packet",
3698 .prog_type
= BPF_PROG_TYPE_XDP
,
3701 "helper access to packet: test6, cls valid packet_ptr range",
3703 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3704 offsetof(struct __sk_buff
, data
)),
3705 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3706 offsetof(struct __sk_buff
, data_end
)),
3707 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3708 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
3709 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
3710 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3711 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
3712 BPF_MOV64_IMM(BPF_REG_4
, 0),
3713 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3714 BPF_FUNC_map_update_elem
),
3715 BPF_MOV64_IMM(BPF_REG_0
, 0),
3718 .fixup_map1
= { 5 },
3720 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3723 "helper access to packet: test7, cls unchecked packet_ptr",
3725 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3726 offsetof(struct __sk_buff
, data
)),
3727 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3728 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3729 BPF_FUNC_map_lookup_elem
),
3730 BPF_MOV64_IMM(BPF_REG_0
, 0),
3733 .fixup_map1
= { 1 },
3735 .errstr
= "invalid access to packet",
3736 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3739 "helper access to packet: test8, cls variable add",
3741 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3742 offsetof(struct __sk_buff
, data
)),
3743 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3744 offsetof(struct __sk_buff
, data_end
)),
3745 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3746 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
3747 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
3748 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
3749 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3750 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
3751 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3752 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
3753 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
3754 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3755 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
3756 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3757 BPF_FUNC_map_lookup_elem
),
3758 BPF_MOV64_IMM(BPF_REG_0
, 0),
3761 .fixup_map1
= { 11 },
3763 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3766 "helper access to packet: test9, cls packet_ptr with bad range",
3768 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3769 offsetof(struct __sk_buff
, data
)),
3770 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3771 offsetof(struct __sk_buff
, data_end
)),
3772 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3773 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
3774 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
3775 BPF_MOV64_IMM(BPF_REG_0
, 0),
3777 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3778 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3779 BPF_FUNC_map_lookup_elem
),
3780 BPF_MOV64_IMM(BPF_REG_0
, 0),
3783 .fixup_map1
= { 7 },
3785 .errstr
= "invalid access to packet",
3786 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3789 "helper access to packet: test10, cls packet_ptr with too short range",
3791 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3792 offsetof(struct __sk_buff
, data
)),
3793 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3794 offsetof(struct __sk_buff
, data_end
)),
3795 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3796 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3797 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3798 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3799 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3800 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3801 BPF_FUNC_map_lookup_elem
),
3802 BPF_MOV64_IMM(BPF_REG_0
, 0),
3805 .fixup_map1
= { 6 },
3807 .errstr
= "invalid access to packet",
3808 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3811 "helper access to packet: test11, cls unsuitable helper 1",
3813 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3814 offsetof(struct __sk_buff
, data
)),
3815 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3816 offsetof(struct __sk_buff
, data_end
)),
3817 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3818 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3819 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 7),
3820 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_7
, 4),
3821 BPF_MOV64_IMM(BPF_REG_2
, 0),
3822 BPF_MOV64_IMM(BPF_REG_4
, 42),
3823 BPF_MOV64_IMM(BPF_REG_5
, 0),
3824 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3825 BPF_FUNC_skb_store_bytes
),
3826 BPF_MOV64_IMM(BPF_REG_0
, 0),
3830 .errstr
= "helper access to the packet",
3831 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3834 "helper access to packet: test12, cls unsuitable helper 2",
3836 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3837 offsetof(struct __sk_buff
, data
)),
3838 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3839 offsetof(struct __sk_buff
, data_end
)),
3840 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3841 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 8),
3842 BPF_JMP_REG(BPF_JGT
, BPF_REG_6
, BPF_REG_7
, 3),
3843 BPF_MOV64_IMM(BPF_REG_2
, 0),
3844 BPF_MOV64_IMM(BPF_REG_4
, 4),
3845 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3846 BPF_FUNC_skb_load_bytes
),
3847 BPF_MOV64_IMM(BPF_REG_0
, 0),
3851 .errstr
= "helper access to the packet",
3852 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3855 "helper access to packet: test13, cls helper ok",
3857 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3858 offsetof(struct __sk_buff
, data
)),
3859 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3860 offsetof(struct __sk_buff
, data_end
)),
3861 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3862 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3863 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3864 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3865 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3866 BPF_MOV64_IMM(BPF_REG_2
, 4),
3867 BPF_MOV64_IMM(BPF_REG_3
, 0),
3868 BPF_MOV64_IMM(BPF_REG_4
, 0),
3869 BPF_MOV64_IMM(BPF_REG_5
, 0),
3870 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3871 BPF_FUNC_csum_diff
),
3872 BPF_MOV64_IMM(BPF_REG_0
, 0),
3876 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3879 "helper access to packet: test14, cls helper ok sub",
3881 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3882 offsetof(struct __sk_buff
, data
)),
3883 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3884 offsetof(struct __sk_buff
, data_end
)),
3885 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3886 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3887 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3888 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3889 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 4),
3890 BPF_MOV64_IMM(BPF_REG_2
, 4),
3891 BPF_MOV64_IMM(BPF_REG_3
, 0),
3892 BPF_MOV64_IMM(BPF_REG_4
, 0),
3893 BPF_MOV64_IMM(BPF_REG_5
, 0),
3894 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3895 BPF_FUNC_csum_diff
),
3896 BPF_MOV64_IMM(BPF_REG_0
, 0),
3900 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3903 "helper access to packet: test15, cls helper fail sub",
3905 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3906 offsetof(struct __sk_buff
, data
)),
3907 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3908 offsetof(struct __sk_buff
, data_end
)),
3909 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3910 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3911 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3912 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3913 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 12),
3914 BPF_MOV64_IMM(BPF_REG_2
, 4),
3915 BPF_MOV64_IMM(BPF_REG_3
, 0),
3916 BPF_MOV64_IMM(BPF_REG_4
, 0),
3917 BPF_MOV64_IMM(BPF_REG_5
, 0),
3918 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3919 BPF_FUNC_csum_diff
),
3920 BPF_MOV64_IMM(BPF_REG_0
, 0),
3924 .errstr
= "invalid access to packet",
3925 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3928 "helper access to packet: test16, cls helper fail range 1",
3930 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3931 offsetof(struct __sk_buff
, data
)),
3932 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3933 offsetof(struct __sk_buff
, data_end
)),
3934 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3935 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3936 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3937 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3938 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3939 BPF_MOV64_IMM(BPF_REG_2
, 8),
3940 BPF_MOV64_IMM(BPF_REG_3
, 0),
3941 BPF_MOV64_IMM(BPF_REG_4
, 0),
3942 BPF_MOV64_IMM(BPF_REG_5
, 0),
3943 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3944 BPF_FUNC_csum_diff
),
3945 BPF_MOV64_IMM(BPF_REG_0
, 0),
3949 .errstr
= "invalid access to packet",
3950 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3953 "helper access to packet: test17, cls helper fail range 2",
3955 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3956 offsetof(struct __sk_buff
, data
)),
3957 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3958 offsetof(struct __sk_buff
, data_end
)),
3959 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3960 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3961 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3962 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3963 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3964 BPF_MOV64_IMM(BPF_REG_2
, -9),
3965 BPF_MOV64_IMM(BPF_REG_3
, 0),
3966 BPF_MOV64_IMM(BPF_REG_4
, 0),
3967 BPF_MOV64_IMM(BPF_REG_5
, 0),
3968 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3969 BPF_FUNC_csum_diff
),
3970 BPF_MOV64_IMM(BPF_REG_0
, 0),
3974 .errstr
= "R2 min value is negative",
3975 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3978 "helper access to packet: test18, cls helper fail range 3",
3980 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3981 offsetof(struct __sk_buff
, data
)),
3982 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3983 offsetof(struct __sk_buff
, data_end
)),
3984 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3985 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3986 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3987 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3988 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3989 BPF_MOV64_IMM(BPF_REG_2
, ~0),
3990 BPF_MOV64_IMM(BPF_REG_3
, 0),
3991 BPF_MOV64_IMM(BPF_REG_4
, 0),
3992 BPF_MOV64_IMM(BPF_REG_5
, 0),
3993 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3994 BPF_FUNC_csum_diff
),
3995 BPF_MOV64_IMM(BPF_REG_0
, 0),
3999 .errstr
= "R2 min value is negative",
4000 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
4003 "helper access to packet: test19, cls helper range zero",
4005 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
4006 offsetof(struct __sk_buff
, data
)),
4007 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
4008 offsetof(struct __sk_buff
, data_end
)),
4009 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
4010 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
4011 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
4012 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
4013 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
4014 BPF_MOV64_IMM(BPF_REG_2
, 0),
4015 BPF_MOV64_IMM(BPF_REG_3
, 0),
4016 BPF_MOV64_IMM(BPF_REG_4
, 0),
4017 BPF_MOV64_IMM(BPF_REG_5
, 0),
4018 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4019 BPF_FUNC_csum_diff
),
4020 BPF_MOV64_IMM(BPF_REG_0
, 0),
4024 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
4027 "helper access to packet: test20, pkt end as input",
4029 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
4030 offsetof(struct __sk_buff
, data
)),
4031 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
4032 offsetof(struct __sk_buff
, data_end
)),
4033 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
4034 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
4035 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
4036 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
4037 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
4038 BPF_MOV64_IMM(BPF_REG_2
, 4),
4039 BPF_MOV64_IMM(BPF_REG_3
, 0),
4040 BPF_MOV64_IMM(BPF_REG_4
, 0),
4041 BPF_MOV64_IMM(BPF_REG_5
, 0),
4042 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4043 BPF_FUNC_csum_diff
),
4044 BPF_MOV64_IMM(BPF_REG_0
, 0),
4048 .errstr
= "R1 type=pkt_end expected=fp",
4049 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
4052 "helper access to packet: test21, wrong reg",
4054 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
4055 offsetof(struct __sk_buff
, data
)),
4056 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
4057 offsetof(struct __sk_buff
, data_end
)),
4058 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
4059 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
4060 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
4061 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
4062 BPF_MOV64_IMM(BPF_REG_2
, 4),
4063 BPF_MOV64_IMM(BPF_REG_3
, 0),
4064 BPF_MOV64_IMM(BPF_REG_4
, 0),
4065 BPF_MOV64_IMM(BPF_REG_5
, 0),
4066 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4067 BPF_FUNC_csum_diff
),
4068 BPF_MOV64_IMM(BPF_REG_0
, 0),
4072 .errstr
= "invalid access to packet",
4073 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
4076 "valid map access into an array with a constant",
4078 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4079 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4080 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4081 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4082 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4083 BPF_FUNC_map_lookup_elem
),
4084 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4085 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4086 offsetof(struct test_val
, foo
)),
4089 .fixup_map2
= { 3 },
4090 .errstr_unpriv
= "R0 leaks addr",
4091 .result_unpriv
= REJECT
,
4095 "valid map access into an array with a register",
4097 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4098 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4099 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4100 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4101 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4102 BPF_FUNC_map_lookup_elem
),
4103 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4104 BPF_MOV64_IMM(BPF_REG_1
, 4),
4105 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4106 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4107 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4108 offsetof(struct test_val
, foo
)),
4111 .fixup_map2
= { 3 },
4112 .errstr_unpriv
= "R0 leaks addr",
4113 .result_unpriv
= REJECT
,
4115 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4118 "valid map access into an array with a variable",
4120 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4121 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4122 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4123 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4124 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4125 BPF_FUNC_map_lookup_elem
),
4126 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4127 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4128 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 3),
4129 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4130 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4131 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4132 offsetof(struct test_val
, foo
)),
4135 .fixup_map2
= { 3 },
4136 .errstr_unpriv
= "R0 leaks addr",
4137 .result_unpriv
= REJECT
,
4139 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4142 "valid map access into an array with a signed variable",
4144 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4145 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4146 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4147 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4148 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4149 BPF_FUNC_map_lookup_elem
),
4150 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
4151 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4152 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 0xffffffff, 1),
4153 BPF_MOV32_IMM(BPF_REG_1
, 0),
4154 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
4155 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
4156 BPF_MOV32_IMM(BPF_REG_1
, 0),
4157 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
4158 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4159 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4160 offsetof(struct test_val
, foo
)),
4163 .fixup_map2
= { 3 },
4164 .errstr_unpriv
= "R0 leaks addr",
4165 .result_unpriv
= REJECT
,
4167 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4170 "invalid map access into an array with a constant",
4172 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4173 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4174 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4175 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4176 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4177 BPF_FUNC_map_lookup_elem
),
4178 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4179 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, (MAX_ENTRIES
+ 1) << 2,
4180 offsetof(struct test_val
, foo
)),
4183 .fixup_map2
= { 3 },
4184 .errstr
= "invalid access to map value, value_size=48 off=48 size=8",
4188 "invalid map access into an array with a register",
4190 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4191 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4192 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4193 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4194 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4195 BPF_FUNC_map_lookup_elem
),
4196 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4197 BPF_MOV64_IMM(BPF_REG_1
, MAX_ENTRIES
+ 1),
4198 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4199 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4200 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4201 offsetof(struct test_val
, foo
)),
4204 .fixup_map2
= { 3 },
4205 .errstr
= "R0 min value is outside of the array range",
4207 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4210 "invalid map access into an array with a variable",
4212 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4213 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4214 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4215 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4216 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4217 BPF_FUNC_map_lookup_elem
),
4218 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4219 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4220 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4221 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4222 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4223 offsetof(struct test_val
, foo
)),
4226 .fixup_map2
= { 3 },
4227 .errstr
= "R0 unbounded memory access, make sure to bounds check any array access into a map",
4229 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4232 "invalid map access into an array with no floor check",
4234 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4235 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4236 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4237 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4238 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4239 BPF_FUNC_map_lookup_elem
),
4240 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4241 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
4242 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
4243 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
4244 BPF_MOV32_IMM(BPF_REG_1
, 0),
4245 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
4246 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4247 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4248 offsetof(struct test_val
, foo
)),
4251 .fixup_map2
= { 3 },
4252 .errstr_unpriv
= "R0 leaks addr",
4253 .errstr
= "R0 unbounded memory access",
4254 .result_unpriv
= REJECT
,
4256 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4259 "invalid map access into an array with a invalid max check",
4261 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4262 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4263 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4264 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4265 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4266 BPF_FUNC_map_lookup_elem
),
4267 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4268 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4269 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
+ 1),
4270 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
4271 BPF_MOV32_IMM(BPF_REG_1
, 0),
4272 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
4273 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4274 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4275 offsetof(struct test_val
, foo
)),
4278 .fixup_map2
= { 3 },
4279 .errstr_unpriv
= "R0 leaks addr",
4280 .errstr
= "invalid access to map value, value_size=48 off=44 size=8",
4281 .result_unpriv
= REJECT
,
4283 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4286 "invalid map access into an array with a invalid max check",
4288 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4289 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4290 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4291 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4292 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4293 BPF_FUNC_map_lookup_elem
),
4294 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
4295 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
4296 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4297 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4298 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4299 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4300 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4301 BPF_FUNC_map_lookup_elem
),
4302 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
4303 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
4304 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
4305 offsetof(struct test_val
, foo
)),
4308 .fixup_map2
= { 3, 11 },
4309 .errstr
= "R0 pointer += pointer",
4311 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4314 "multiple registers share map_lookup_elem result",
4316 BPF_MOV64_IMM(BPF_REG_1
, 10),
4317 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4318 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4319 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4320 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4321 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4322 BPF_FUNC_map_lookup_elem
),
4323 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4324 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4325 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4328 .fixup_map1
= { 4 },
4330 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4333 "alu ops on ptr_to_map_value_or_null, 1",
4335 BPF_MOV64_IMM(BPF_REG_1
, 10),
4336 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4337 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4338 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4339 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4340 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4341 BPF_FUNC_map_lookup_elem
),
4342 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4343 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -2),
4344 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 2),
4345 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4346 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4349 .fixup_map1
= { 4 },
4350 .errstr
= "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4352 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4355 "alu ops on ptr_to_map_value_or_null, 2",
4357 BPF_MOV64_IMM(BPF_REG_1
, 10),
4358 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4359 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4360 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4361 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4362 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4363 BPF_FUNC_map_lookup_elem
),
4364 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4365 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, -1),
4366 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4367 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4370 .fixup_map1
= { 4 },
4371 .errstr
= "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4373 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4376 "alu ops on ptr_to_map_value_or_null, 3",
4378 BPF_MOV64_IMM(BPF_REG_1
, 10),
4379 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4380 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4381 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4382 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4383 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4384 BPF_FUNC_map_lookup_elem
),
4385 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4386 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_4
, 1),
4387 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4388 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4391 .fixup_map1
= { 4 },
4392 .errstr
= "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4394 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4397 "invalid memory access with multiple map_lookup_elem calls",
4399 BPF_MOV64_IMM(BPF_REG_1
, 10),
4400 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4401 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4402 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4403 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4404 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
4405 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
4406 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4407 BPF_FUNC_map_lookup_elem
),
4408 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4409 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
4410 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
4411 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4412 BPF_FUNC_map_lookup_elem
),
4413 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4414 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4417 .fixup_map1
= { 4 },
4419 .errstr
= "R4 !read_ok",
4420 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4423 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4425 BPF_MOV64_IMM(BPF_REG_1
, 10),
4426 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4427 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4428 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4429 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4430 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
4431 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
4432 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4433 BPF_FUNC_map_lookup_elem
),
4434 BPF_MOV64_IMM(BPF_REG_2
, 10),
4435 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 0, 3),
4436 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
4437 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
4438 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4439 BPF_FUNC_map_lookup_elem
),
4440 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4441 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4442 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4445 .fixup_map1
= { 4 },
4447 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4450 "invalid map access from else condition",
4452 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4453 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4454 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4455 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4456 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
4457 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4458 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4459 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
-1, 1),
4460 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 1),
4461 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4462 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4463 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, offsetof(struct test_val
, foo
)),
4466 .fixup_map2
= { 3 },
4467 .errstr
= "R0 unbounded memory access",
4469 .errstr_unpriv
= "R0 leaks addr",
4470 .result_unpriv
= REJECT
,
4471 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4474 "constant register |= constant should keep constant type",
4476 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4477 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4478 BPF_MOV64_IMM(BPF_REG_2
, 34),
4479 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 13),
4480 BPF_MOV64_IMM(BPF_REG_3
, 0),
4481 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4485 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4488 "constant register |= constant should not bypass stack boundary checks",
4490 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4491 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4492 BPF_MOV64_IMM(BPF_REG_2
, 34),
4493 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 24),
4494 BPF_MOV64_IMM(BPF_REG_3
, 0),
4495 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4498 .errstr
= "invalid stack type R1 off=-48 access_size=58",
4500 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4503 "constant register |= constant register should keep constant type",
4505 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4506 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4507 BPF_MOV64_IMM(BPF_REG_2
, 34),
4508 BPF_MOV64_IMM(BPF_REG_4
, 13),
4509 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
4510 BPF_MOV64_IMM(BPF_REG_3
, 0),
4511 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4515 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4518 "constant register |= constant register should not bypass stack boundary checks",
4520 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4521 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4522 BPF_MOV64_IMM(BPF_REG_2
, 34),
4523 BPF_MOV64_IMM(BPF_REG_4
, 24),
4524 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
4525 BPF_MOV64_IMM(BPF_REG_3
, 0),
4526 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4529 .errstr
= "invalid stack type R1 off=-48 access_size=58",
4531 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4534 "invalid direct packet write for LWT_IN",
4536 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4537 offsetof(struct __sk_buff
, data
)),
4538 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4539 offsetof(struct __sk_buff
, data_end
)),
4540 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4541 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4542 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4543 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4544 BPF_MOV64_IMM(BPF_REG_0
, 0),
4547 .errstr
= "cannot write into packet",
4549 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
4552 "invalid direct packet write for LWT_OUT",
4554 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4555 offsetof(struct __sk_buff
, data
)),
4556 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4557 offsetof(struct __sk_buff
, data_end
)),
4558 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4559 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4560 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4561 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4562 BPF_MOV64_IMM(BPF_REG_0
, 0),
4565 .errstr
= "cannot write into packet",
4567 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
4570 "direct packet write for LWT_XMIT",
4572 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4573 offsetof(struct __sk_buff
, data
)),
4574 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4575 offsetof(struct __sk_buff
, data_end
)),
4576 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4577 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4578 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4579 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4580 BPF_MOV64_IMM(BPF_REG_0
, 0),
4584 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4587 "direct packet read for LWT_IN",
4589 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4590 offsetof(struct __sk_buff
, data
)),
4591 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4592 offsetof(struct __sk_buff
, data_end
)),
4593 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4594 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4595 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4596 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4597 BPF_MOV64_IMM(BPF_REG_0
, 0),
4601 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
4604 "direct packet read for LWT_OUT",
4606 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4607 offsetof(struct __sk_buff
, data
)),
4608 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4609 offsetof(struct __sk_buff
, data_end
)),
4610 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4611 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4612 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4613 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4614 BPF_MOV64_IMM(BPF_REG_0
, 0),
4618 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
4621 "direct packet read for LWT_XMIT",
4623 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4624 offsetof(struct __sk_buff
, data
)),
4625 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4626 offsetof(struct __sk_buff
, data_end
)),
4627 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4628 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4629 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4630 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4631 BPF_MOV64_IMM(BPF_REG_0
, 0),
4635 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4638 "overlapping checks for direct packet access",
4640 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4641 offsetof(struct __sk_buff
, data
)),
4642 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4643 offsetof(struct __sk_buff
, data_end
)),
4644 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4645 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4646 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 4),
4647 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
4648 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 6),
4649 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
4650 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_2
, 6),
4651 BPF_MOV64_IMM(BPF_REG_0
, 0),
4655 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4658 "invalid access of tc_classid for LWT_IN",
4660 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4661 offsetof(struct __sk_buff
, tc_classid
)),
4665 .errstr
= "invalid bpf_context access",
4668 "invalid access of tc_classid for LWT_OUT",
4670 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4671 offsetof(struct __sk_buff
, tc_classid
)),
4675 .errstr
= "invalid bpf_context access",
4678 "invalid access of tc_classid for LWT_XMIT",
4680 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4681 offsetof(struct __sk_buff
, tc_classid
)),
4685 .errstr
= "invalid bpf_context access",
4688 "leak pointer into ctx 1",
4690 BPF_MOV64_IMM(BPF_REG_0
, 0),
4691 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4692 offsetof(struct __sk_buff
, cb
[0])),
4693 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4694 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4695 offsetof(struct __sk_buff
, cb
[0])),
4698 .fixup_map1
= { 2 },
4699 .errstr_unpriv
= "R2 leaks addr into mem",
4700 .result_unpriv
= REJECT
,
4702 .errstr
= "BPF_XADD stores into R1 context is not allowed",
4705 "leak pointer into ctx 2",
4707 BPF_MOV64_IMM(BPF_REG_0
, 0),
4708 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4709 offsetof(struct __sk_buff
, cb
[0])),
4710 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_10
,
4711 offsetof(struct __sk_buff
, cb
[0])),
4714 .errstr_unpriv
= "R10 leaks addr into mem",
4715 .result_unpriv
= REJECT
,
4717 .errstr
= "BPF_XADD stores into R1 context is not allowed",
4720 "leak pointer into ctx 3",
4722 BPF_MOV64_IMM(BPF_REG_0
, 0),
4723 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4724 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4725 offsetof(struct __sk_buff
, cb
[0])),
4728 .fixup_map1
= { 1 },
4729 .errstr_unpriv
= "R2 leaks addr into ctx",
4730 .result_unpriv
= REJECT
,
4734 "leak pointer into map val",
4736 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
4737 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4738 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4739 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4740 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4741 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4742 BPF_FUNC_map_lookup_elem
),
4743 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
4744 BPF_MOV64_IMM(BPF_REG_3
, 0),
4745 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
4746 BPF_STX_XADD(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
4747 BPF_MOV64_IMM(BPF_REG_0
, 0),
4750 .fixup_map1
= { 4 },
4751 .errstr_unpriv
= "R6 leaks addr into mem",
4752 .result_unpriv
= REJECT
,
4756 "helper access to map: full range",
4758 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4759 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4760 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4761 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4762 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4763 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4764 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4765 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
4766 BPF_MOV64_IMM(BPF_REG_3
, 0),
4767 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4770 .fixup_map2
= { 3 },
4772 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4775 "helper access to map: partial range",
4777 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4778 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4779 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4780 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4781 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4782 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4783 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4784 BPF_MOV64_IMM(BPF_REG_2
, 8),
4785 BPF_MOV64_IMM(BPF_REG_3
, 0),
4786 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4789 .fixup_map2
= { 3 },
4791 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4794 "helper access to map: empty range",
4796 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4797 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4798 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4799 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4800 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4801 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
4802 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4803 BPF_MOV64_IMM(BPF_REG_2
, 0),
4804 BPF_EMIT_CALL(BPF_FUNC_trace_printk
),
4807 .fixup_map2
= { 3 },
4808 .errstr
= "invalid access to map value, value_size=48 off=0 size=0",
4810 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4813 "helper access to map: out-of-bound range",
4815 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4816 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4817 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4818 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4819 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4820 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4821 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4822 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
) + 8),
4823 BPF_MOV64_IMM(BPF_REG_3
, 0),
4824 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4827 .fixup_map2
= { 3 },
4828 .errstr
= "invalid access to map value, value_size=48 off=0 size=56",
4830 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4833 "helper access to map: negative range",
4835 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4836 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4837 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4838 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4839 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4840 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4841 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4842 BPF_MOV64_IMM(BPF_REG_2
, -8),
4843 BPF_MOV64_IMM(BPF_REG_3
, 0),
4844 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4847 .fixup_map2
= { 3 },
4848 .errstr
= "R2 min value is negative",
4850 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4853 "helper access to adjusted map (via const imm): full range",
4855 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4856 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4857 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4858 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4859 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4860 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4861 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4862 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4863 offsetof(struct test_val
, foo
)),
4864 BPF_MOV64_IMM(BPF_REG_2
,
4865 sizeof(struct test_val
) -
4866 offsetof(struct test_val
, foo
)),
4867 BPF_MOV64_IMM(BPF_REG_3
, 0),
4868 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4871 .fixup_map2
= { 3 },
4873 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4876 "helper access to adjusted map (via const imm): partial range",
4878 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4879 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4880 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4881 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4882 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4883 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4884 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4885 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4886 offsetof(struct test_val
, foo
)),
4887 BPF_MOV64_IMM(BPF_REG_2
, 8),
4888 BPF_MOV64_IMM(BPF_REG_3
, 0),
4889 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4892 .fixup_map2
= { 3 },
4894 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4897 "helper access to adjusted map (via const imm): empty range",
4899 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4900 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4901 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4902 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4903 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4904 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4905 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4906 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4907 offsetof(struct test_val
, foo
)),
4908 BPF_MOV64_IMM(BPF_REG_2
, 0),
4909 BPF_EMIT_CALL(BPF_FUNC_trace_printk
),
4912 .fixup_map2
= { 3 },
4913 .errstr
= "invalid access to map value, value_size=48 off=4 size=0",
4915 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4918 "helper access to adjusted map (via const imm): out-of-bound range",
4920 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4921 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4922 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4923 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4924 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4925 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4926 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4927 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4928 offsetof(struct test_val
, foo
)),
4929 BPF_MOV64_IMM(BPF_REG_2
,
4930 sizeof(struct test_val
) -
4931 offsetof(struct test_val
, foo
) + 8),
4932 BPF_MOV64_IMM(BPF_REG_3
, 0),
4933 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4936 .fixup_map2
= { 3 },
4937 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
4939 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4942 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4944 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4945 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4946 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4947 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4948 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4949 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4950 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4951 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4952 offsetof(struct test_val
, foo
)),
4953 BPF_MOV64_IMM(BPF_REG_2
, -8),
4954 BPF_MOV64_IMM(BPF_REG_3
, 0),
4955 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4958 .fixup_map2
= { 3 },
4959 .errstr
= "R2 min value is negative",
4961 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4964 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4966 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4967 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4968 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4969 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4970 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4971 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4972 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4973 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4974 offsetof(struct test_val
, foo
)),
4975 BPF_MOV64_IMM(BPF_REG_2
, -1),
4976 BPF_MOV64_IMM(BPF_REG_3
, 0),
4977 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4980 .fixup_map2
= { 3 },
4981 .errstr
= "R2 min value is negative",
4983 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4986 "helper access to adjusted map (via const reg): full range",
4988 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4989 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4990 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4991 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4992 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4993 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4994 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4995 BPF_MOV64_IMM(BPF_REG_3
,
4996 offsetof(struct test_val
, foo
)),
4997 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4998 BPF_MOV64_IMM(BPF_REG_2
,
4999 sizeof(struct test_val
) -
5000 offsetof(struct test_val
, foo
)),
5001 BPF_MOV64_IMM(BPF_REG_3
, 0),
5002 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5005 .fixup_map2
= { 3 },
5007 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5010 "helper access to adjusted map (via const reg): partial range",
5012 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5013 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5014 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5015 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5016 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5017 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5018 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5019 BPF_MOV64_IMM(BPF_REG_3
,
5020 offsetof(struct test_val
, foo
)),
5021 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5022 BPF_MOV64_IMM(BPF_REG_2
, 8),
5023 BPF_MOV64_IMM(BPF_REG_3
, 0),
5024 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5027 .fixup_map2
= { 3 },
5029 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5032 "helper access to adjusted map (via const reg): empty range",
5034 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5035 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5036 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5037 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5038 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5039 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
5040 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5041 BPF_MOV64_IMM(BPF_REG_3
, 0),
5042 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5043 BPF_MOV64_IMM(BPF_REG_2
, 0),
5044 BPF_EMIT_CALL(BPF_FUNC_trace_printk
),
5047 .fixup_map2
= { 3 },
5048 .errstr
= "R1 min value is outside of the array range",
5050 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5053 "helper access to adjusted map (via const reg): out-of-bound range",
5055 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5056 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5057 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5058 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5059 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5060 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5061 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5062 BPF_MOV64_IMM(BPF_REG_3
,
5063 offsetof(struct test_val
, foo
)),
5064 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5065 BPF_MOV64_IMM(BPF_REG_2
,
5066 sizeof(struct test_val
) -
5067 offsetof(struct test_val
, foo
) + 8),
5068 BPF_MOV64_IMM(BPF_REG_3
, 0),
5069 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5072 .fixup_map2
= { 3 },
5073 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
5075 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5078 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5080 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5081 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5082 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5083 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5084 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5085 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5086 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5087 BPF_MOV64_IMM(BPF_REG_3
,
5088 offsetof(struct test_val
, foo
)),
5089 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5090 BPF_MOV64_IMM(BPF_REG_2
, -8),
5091 BPF_MOV64_IMM(BPF_REG_3
, 0),
5092 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5095 .fixup_map2
= { 3 },
5096 .errstr
= "R2 min value is negative",
5098 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5101 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5103 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5104 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5105 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5106 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5107 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5108 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5109 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5110 BPF_MOV64_IMM(BPF_REG_3
,
5111 offsetof(struct test_val
, foo
)),
5112 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5113 BPF_MOV64_IMM(BPF_REG_2
, -1),
5114 BPF_MOV64_IMM(BPF_REG_3
, 0),
5115 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5118 .fixup_map2
= { 3 },
5119 .errstr
= "R2 min value is negative",
5121 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5124 "helper access to adjusted map (via variable): full range",
5126 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5127 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5128 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5129 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5130 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5131 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5132 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5133 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5134 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
5135 offsetof(struct test_val
, foo
), 4),
5136 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5137 BPF_MOV64_IMM(BPF_REG_2
,
5138 sizeof(struct test_val
) -
5139 offsetof(struct test_val
, foo
)),
5140 BPF_MOV64_IMM(BPF_REG_3
, 0),
5141 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5144 .fixup_map2
= { 3 },
5146 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5149 "helper access to adjusted map (via variable): partial range",
5151 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5152 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5153 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5154 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5155 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5156 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5157 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5158 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5159 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
5160 offsetof(struct test_val
, foo
), 4),
5161 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5162 BPF_MOV64_IMM(BPF_REG_2
, 8),
5163 BPF_MOV64_IMM(BPF_REG_3
, 0),
5164 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5167 .fixup_map2
= { 3 },
5169 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5172 "helper access to adjusted map (via variable): empty range",
5174 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5175 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5176 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5177 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5178 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5179 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5180 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5181 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5182 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
5183 offsetof(struct test_val
, foo
), 3),
5184 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5185 BPF_MOV64_IMM(BPF_REG_2
, 0),
5186 BPF_EMIT_CALL(BPF_FUNC_trace_printk
),
5189 .fixup_map2
= { 3 },
5190 .errstr
= "R1 min value is outside of the array range",
5192 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5195 "helper access to adjusted map (via variable): no max check",
5197 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5198 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5199 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5200 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5201 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5202 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5203 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5204 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5205 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5206 BPF_MOV64_IMM(BPF_REG_2
, 1),
5207 BPF_MOV64_IMM(BPF_REG_3
, 0),
5208 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5211 .fixup_map2
= { 3 },
5212 .errstr
= "R1 unbounded memory access",
5214 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5217 "helper access to adjusted map (via variable): wrong max check",
5219 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5220 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5221 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5222 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5223 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5224 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5225 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5226 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5227 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
5228 offsetof(struct test_val
, foo
), 4),
5229 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5230 BPF_MOV64_IMM(BPF_REG_2
,
5231 sizeof(struct test_val
) -
5232 offsetof(struct test_val
, foo
) + 1),
5233 BPF_MOV64_IMM(BPF_REG_3
, 0),
5234 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5237 .fixup_map2
= { 3 },
5238 .errstr
= "invalid access to map value, value_size=48 off=4 size=45",
5240 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5243 "helper access to map: bounds check using <, good access",
5245 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5246 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5247 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5248 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5249 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5250 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5251 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5252 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5253 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 2),
5254 BPF_MOV64_IMM(BPF_REG_0
, 0),
5256 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5257 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5258 BPF_MOV64_IMM(BPF_REG_0
, 0),
5261 .fixup_map2
= { 3 },
5263 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5266 "helper access to map: bounds check using <, bad access",
5268 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5269 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5270 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5271 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5272 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5273 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5274 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5275 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5276 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 4),
5277 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5278 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5279 BPF_MOV64_IMM(BPF_REG_0
, 0),
5281 BPF_MOV64_IMM(BPF_REG_0
, 0),
5284 .fixup_map2
= { 3 },
5286 .errstr
= "R1 unbounded memory access",
5287 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5290 "helper access to map: bounds check using <=, good access",
5292 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5293 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5294 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5295 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5296 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5297 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5298 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5299 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5300 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 2),
5301 BPF_MOV64_IMM(BPF_REG_0
, 0),
5303 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5304 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5305 BPF_MOV64_IMM(BPF_REG_0
, 0),
5308 .fixup_map2
= { 3 },
5310 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5313 "helper access to map: bounds check using <=, bad access",
5315 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5316 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5317 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5318 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5319 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5320 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5321 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5322 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5323 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 4),
5324 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5325 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5326 BPF_MOV64_IMM(BPF_REG_0
, 0),
5328 BPF_MOV64_IMM(BPF_REG_0
, 0),
5331 .fixup_map2
= { 3 },
5333 .errstr
= "R1 unbounded memory access",
5334 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5337 "helper access to map: bounds check using s<, good access",
5339 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5340 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5341 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5342 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5343 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5344 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5345 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5346 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5347 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
5348 BPF_MOV64_IMM(BPF_REG_0
, 0),
5350 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 0, -3),
5351 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5352 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5353 BPF_MOV64_IMM(BPF_REG_0
, 0),
5356 .fixup_map2
= { 3 },
5358 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5361 "helper access to map: bounds check using s<, good access 2",
5363 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5364 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5365 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5366 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5367 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5368 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5369 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5370 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5371 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
5372 BPF_MOV64_IMM(BPF_REG_0
, 0),
5374 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
5375 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5376 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5377 BPF_MOV64_IMM(BPF_REG_0
, 0),
5380 .fixup_map2
= { 3 },
5382 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5385 "helper access to map: bounds check using s<, bad access",
5387 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5388 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5389 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5390 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5391 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5392 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5393 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5394 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
5395 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
5396 BPF_MOV64_IMM(BPF_REG_0
, 0),
5398 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
5399 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5400 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5401 BPF_MOV64_IMM(BPF_REG_0
, 0),
5404 .fixup_map2
= { 3 },
5406 .errstr
= "R1 min value is negative",
5407 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5410 "helper access to map: bounds check using s<=, good access",
5412 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5413 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5414 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5415 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5416 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5417 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5418 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5419 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5420 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5421 BPF_MOV64_IMM(BPF_REG_0
, 0),
5423 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 0, -3),
5424 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5425 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5426 BPF_MOV64_IMM(BPF_REG_0
, 0),
5429 .fixup_map2
= { 3 },
5431 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5434 "helper access to map: bounds check using s<=, good access 2",
5436 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5437 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5438 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5439 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5440 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5441 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5442 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5443 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5444 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5445 BPF_MOV64_IMM(BPF_REG_0
, 0),
5447 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
5448 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5449 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5450 BPF_MOV64_IMM(BPF_REG_0
, 0),
5453 .fixup_map2
= { 3 },
5455 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5458 "helper access to map: bounds check using s<=, bad access",
5460 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5461 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5462 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5463 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5464 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5465 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5466 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5467 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
5468 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5469 BPF_MOV64_IMM(BPF_REG_0
, 0),
5471 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
5472 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5473 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5474 BPF_MOV64_IMM(BPF_REG_0
, 0),
5477 .fixup_map2
= { 3 },
5479 .errstr
= "R1 min value is negative",
5480 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5483 "map element value is preserved across register spilling",
5485 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5486 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5487 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5488 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5489 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5490 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5491 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5492 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5493 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
5494 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5495 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5496 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5499 .fixup_map2
= { 3 },
5500 .errstr_unpriv
= "R0 leaks addr",
5502 .result_unpriv
= REJECT
,
5505 "map element value or null is marked on register spilling",
5507 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5508 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5509 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5510 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5511 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5512 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5513 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -152),
5514 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5515 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5516 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5517 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5520 .fixup_map2
= { 3 },
5521 .errstr_unpriv
= "R0 leaks addr",
5523 .result_unpriv
= REJECT
,
5526 "map element value store of cleared call register",
5528 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5529 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5530 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5531 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5532 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5533 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
5534 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
5537 .fixup_map2
= { 3 },
5538 .errstr_unpriv
= "R1 !read_ok",
5539 .errstr
= "R1 !read_ok",
5541 .result_unpriv
= REJECT
,
5544 "map element value with unaligned store",
5546 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5547 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5548 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5549 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5550 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5551 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 17),
5552 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
5553 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5554 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 2, 43),
5555 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, -2, 44),
5556 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
5557 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 32),
5558 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 2, 33),
5559 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -2, 34),
5560 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_8
, 5),
5561 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 22),
5562 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 4, 23),
5563 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -7, 24),
5564 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_8
),
5565 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, 3),
5566 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 0, 22),
5567 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 4, 23),
5568 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, -4, 24),
5571 .fixup_map2
= { 3 },
5572 .errstr_unpriv
= "R0 leaks addr",
5574 .result_unpriv
= REJECT
,
5575 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5578 "map element value with unaligned load",
5580 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5581 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5582 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5583 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5584 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5585 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5586 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
5587 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 9),
5588 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
5589 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
5590 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 2),
5591 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
5592 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 0),
5593 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 2),
5594 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 5),
5595 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
5596 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 4),
5599 .fixup_map2
= { 3 },
5600 .errstr_unpriv
= "R0 leaks addr",
5602 .result_unpriv
= REJECT
,
5603 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5606 "map element value illegal alu op, 1",
5608 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5609 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5610 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5611 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5612 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5613 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5614 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 8),
5615 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5618 .fixup_map2
= { 3 },
5619 .errstr
= "R0 bitwise operator &= on pointer",
5623 "map element value illegal alu op, 2",
5625 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5626 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5627 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5628 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5629 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5630 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5631 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_0
, 0),
5632 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5635 .fixup_map2
= { 3 },
5636 .errstr
= "R0 32-bit pointer arithmetic prohibited",
5640 "map element value illegal alu op, 3",
5642 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5643 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5644 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5645 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5646 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5647 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5648 BPF_ALU64_IMM(BPF_DIV
, BPF_REG_0
, 42),
5649 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5652 .fixup_map2
= { 3 },
5653 .errstr
= "R0 pointer arithmetic with /= operator",
5657 "map element value illegal alu op, 4",
5659 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5660 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5661 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5662 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5663 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5664 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5665 BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_0
, 64),
5666 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5669 .fixup_map2
= { 3 },
5670 .errstr_unpriv
= "R0 pointer arithmetic prohibited",
5671 .errstr
= "invalid mem access 'inv'",
5673 .result_unpriv
= REJECT
,
5676 "map element value illegal alu op, 5",
5678 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5679 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5680 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5681 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5682 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5683 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5684 BPF_MOV64_IMM(BPF_REG_3
, 4096),
5685 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5686 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5687 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
5688 BPF_STX_XADD(BPF_DW
, BPF_REG_2
, BPF_REG_3
, 0),
5689 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, 0),
5690 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5693 .fixup_map2
= { 3 },
5694 .errstr
= "R0 invalid mem access 'inv'",
5698 "map element value is preserved across register spilling",
5700 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5701 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5702 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5703 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5704 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5705 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5706 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
,
5707 offsetof(struct test_val
, foo
)),
5708 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5709 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5710 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
5711 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5712 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5713 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5716 .fixup_map2
= { 3 },
5717 .errstr_unpriv
= "R0 leaks addr",
5719 .result_unpriv
= REJECT
,
5720 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5723 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5725 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5726 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5727 BPF_MOV64_IMM(BPF_REG_0
, 0),
5728 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5729 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5730 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5731 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5732 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5733 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5734 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5735 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5736 BPF_MOV64_IMM(BPF_REG_2
, 16),
5737 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5738 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5739 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5740 BPF_MOV64_IMM(BPF_REG_4
, 0),
5741 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5742 BPF_MOV64_IMM(BPF_REG_3
, 0),
5743 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5744 BPF_MOV64_IMM(BPF_REG_0
, 0),
5748 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5751 "helper access to variable memory: stack, bitwise AND, zero included",
5753 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5754 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5755 BPF_MOV64_IMM(BPF_REG_2
, 16),
5756 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5757 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5758 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5759 BPF_MOV64_IMM(BPF_REG_3
, 0),
5760 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5763 .errstr
= "invalid indirect read from stack off -64+0 size 64",
5765 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5768 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5770 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5771 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5772 BPF_MOV64_IMM(BPF_REG_2
, 16),
5773 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5774 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5775 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 65),
5776 BPF_MOV64_IMM(BPF_REG_4
, 0),
5777 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5778 BPF_MOV64_IMM(BPF_REG_3
, 0),
5779 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5780 BPF_MOV64_IMM(BPF_REG_0
, 0),
5783 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5785 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5788 "helper access to variable memory: stack, JMP, correct bounds",
5790 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5791 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5792 BPF_MOV64_IMM(BPF_REG_0
, 0),
5793 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5794 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5795 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5796 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5797 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5798 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5799 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5800 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5801 BPF_MOV64_IMM(BPF_REG_2
, 16),
5802 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5803 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5804 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 4),
5805 BPF_MOV64_IMM(BPF_REG_4
, 0),
5806 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5807 BPF_MOV64_IMM(BPF_REG_3
, 0),
5808 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5809 BPF_MOV64_IMM(BPF_REG_0
, 0),
5813 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5816 "helper access to variable memory: stack, JMP (signed), correct bounds",
5818 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5819 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5820 BPF_MOV64_IMM(BPF_REG_0
, 0),
5821 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5822 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5823 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5824 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5825 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5826 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5827 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5828 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5829 BPF_MOV64_IMM(BPF_REG_2
, 16),
5830 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5831 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5832 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 4),
5833 BPF_MOV64_IMM(BPF_REG_4
, 0),
5834 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5835 BPF_MOV64_IMM(BPF_REG_3
, 0),
5836 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5837 BPF_MOV64_IMM(BPF_REG_0
, 0),
5841 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5844 "helper access to variable memory: stack, JMP, bounds + offset",
5846 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5847 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5848 BPF_MOV64_IMM(BPF_REG_2
, 16),
5849 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5850 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5851 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 5),
5852 BPF_MOV64_IMM(BPF_REG_4
, 0),
5853 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 3),
5854 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
5855 BPF_MOV64_IMM(BPF_REG_3
, 0),
5856 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5857 BPF_MOV64_IMM(BPF_REG_0
, 0),
5860 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5862 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5865 "helper access to variable memory: stack, JMP, wrong max",
5867 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5868 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5869 BPF_MOV64_IMM(BPF_REG_2
, 16),
5870 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5871 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5872 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 65, 4),
5873 BPF_MOV64_IMM(BPF_REG_4
, 0),
5874 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5875 BPF_MOV64_IMM(BPF_REG_3
, 0),
5876 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5877 BPF_MOV64_IMM(BPF_REG_0
, 0),
5880 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5882 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5885 "helper access to variable memory: stack, JMP, no max check",
5887 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5888 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5889 BPF_MOV64_IMM(BPF_REG_2
, 16),
5890 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5891 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5892 BPF_MOV64_IMM(BPF_REG_4
, 0),
5893 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5894 BPF_MOV64_IMM(BPF_REG_3
, 0),
5895 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5896 BPF_MOV64_IMM(BPF_REG_0
, 0),
5899 /* because max wasn't checked, signed min is negative */
5900 .errstr
= "R2 min value is negative, either use unsigned or 'var &= const'",
5902 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5905 "helper access to variable memory: stack, JMP, no min check",
5907 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5908 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5909 BPF_MOV64_IMM(BPF_REG_2
, 16),
5910 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5911 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5912 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 3),
5913 BPF_MOV64_IMM(BPF_REG_3
, 0),
5914 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5915 BPF_MOV64_IMM(BPF_REG_0
, 0),
5918 .errstr
= "invalid indirect read from stack off -64+0 size 64",
5920 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5923 "helper access to variable memory: stack, JMP (signed), no min check",
5925 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5926 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5927 BPF_MOV64_IMM(BPF_REG_2
, 16),
5928 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5929 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5930 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 3),
5931 BPF_MOV64_IMM(BPF_REG_3
, 0),
5932 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5933 BPF_MOV64_IMM(BPF_REG_0
, 0),
5936 .errstr
= "R2 min value is negative",
5938 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5941 "helper access to variable memory: map, JMP, correct bounds",
5943 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5944 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5945 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5946 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5947 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5948 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5949 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5950 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5951 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5952 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5953 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5954 sizeof(struct test_val
), 4),
5955 BPF_MOV64_IMM(BPF_REG_4
, 0),
5956 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5957 BPF_MOV64_IMM(BPF_REG_3
, 0),
5958 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5959 BPF_MOV64_IMM(BPF_REG_0
, 0),
5962 .fixup_map2
= { 3 },
5964 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5967 "helper access to variable memory: map, JMP, wrong max",
5969 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5970 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5971 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5972 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5973 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5974 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5975 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5976 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5977 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5978 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5979 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5980 sizeof(struct test_val
) + 1, 4),
5981 BPF_MOV64_IMM(BPF_REG_4
, 0),
5982 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5983 BPF_MOV64_IMM(BPF_REG_3
, 0),
5984 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5985 BPF_MOV64_IMM(BPF_REG_0
, 0),
5988 .fixup_map2
= { 3 },
5989 .errstr
= "invalid access to map value, value_size=48 off=0 size=49",
5991 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5994 "helper access to variable memory: map adjusted, JMP, correct bounds",
5996 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5997 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5998 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5999 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6000 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
6001 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
6002 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6003 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
6004 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
6005 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
6006 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
6007 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
6008 sizeof(struct test_val
) - 20, 4),
6009 BPF_MOV64_IMM(BPF_REG_4
, 0),
6010 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
6011 BPF_MOV64_IMM(BPF_REG_3
, 0),
6012 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6013 BPF_MOV64_IMM(BPF_REG_0
, 0),
6016 .fixup_map2
= { 3 },
6018 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6021 "helper access to variable memory: map adjusted, JMP, wrong max",
6023 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6024 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6025 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
6026 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6027 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
6028 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
6029 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6030 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
6031 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
6032 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
6033 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
6034 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
6035 sizeof(struct test_val
) - 19, 4),
6036 BPF_MOV64_IMM(BPF_REG_4
, 0),
6037 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
6038 BPF_MOV64_IMM(BPF_REG_3
, 0),
6039 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6040 BPF_MOV64_IMM(BPF_REG_0
, 0),
6043 .fixup_map2
= { 3 },
6044 .errstr
= "R1 min value is outside of the array range",
6046 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6049 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
6051 BPF_MOV64_IMM(BPF_REG_1
, 0),
6052 BPF_MOV64_IMM(BPF_REG_2
, 0),
6053 BPF_MOV64_IMM(BPF_REG_3
, 0),
6054 BPF_MOV64_IMM(BPF_REG_4
, 0),
6055 BPF_MOV64_IMM(BPF_REG_5
, 0),
6056 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6060 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6063 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
6065 BPF_MOV64_IMM(BPF_REG_1
, 0),
6066 BPF_MOV64_IMM(BPF_REG_2
, 1),
6067 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
6068 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
6069 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
6070 BPF_MOV64_IMM(BPF_REG_3
, 0),
6071 BPF_MOV64_IMM(BPF_REG_4
, 0),
6072 BPF_MOV64_IMM(BPF_REG_5
, 0),
6073 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6076 .errstr
= "R1 type=inv expected=fp",
6078 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6081 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
6083 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6084 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
6085 BPF_MOV64_IMM(BPF_REG_2
, 0),
6086 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, 0),
6087 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 8),
6088 BPF_MOV64_IMM(BPF_REG_3
, 0),
6089 BPF_MOV64_IMM(BPF_REG_4
, 0),
6090 BPF_MOV64_IMM(BPF_REG_5
, 0),
6091 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6095 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6098 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6100 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6101 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6102 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6103 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6104 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6105 BPF_FUNC_map_lookup_elem
),
6106 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6107 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6108 BPF_MOV64_IMM(BPF_REG_2
, 0),
6109 BPF_MOV64_IMM(BPF_REG_3
, 0),
6110 BPF_MOV64_IMM(BPF_REG_4
, 0),
6111 BPF_MOV64_IMM(BPF_REG_5
, 0),
6112 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6115 .fixup_map1
= { 3 },
6117 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6120 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
6122 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6123 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6124 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6125 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6126 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6127 BPF_FUNC_map_lookup_elem
),
6128 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6129 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
6130 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 7),
6131 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6132 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
6133 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, 0),
6134 BPF_MOV64_IMM(BPF_REG_3
, 0),
6135 BPF_MOV64_IMM(BPF_REG_4
, 0),
6136 BPF_MOV64_IMM(BPF_REG_5
, 0),
6137 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6140 .fixup_map1
= { 3 },
6142 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6145 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6147 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6148 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6149 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6150 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6151 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6152 BPF_FUNC_map_lookup_elem
),
6153 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6154 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6155 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
6156 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 4),
6157 BPF_MOV64_IMM(BPF_REG_3
, 0),
6158 BPF_MOV64_IMM(BPF_REG_4
, 0),
6159 BPF_MOV64_IMM(BPF_REG_5
, 0),
6160 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6163 .fixup_map1
= { 3 },
6165 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6168 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
6170 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
6171 offsetof(struct __sk_buff
, data
)),
6172 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
6173 offsetof(struct __sk_buff
, data_end
)),
6174 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_6
),
6175 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
6176 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 7),
6177 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
6178 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 0),
6179 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 4),
6180 BPF_MOV64_IMM(BPF_REG_3
, 0),
6181 BPF_MOV64_IMM(BPF_REG_4
, 0),
6182 BPF_MOV64_IMM(BPF_REG_5
, 0),
6183 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6187 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6188 .retval
= 0 /* csum_diff of 64-byte packet */,
6191 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6193 BPF_MOV64_IMM(BPF_REG_1
, 0),
6194 BPF_MOV64_IMM(BPF_REG_2
, 0),
6195 BPF_MOV64_IMM(BPF_REG_3
, 0),
6196 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6199 .errstr
= "R1 type=inv expected=fp",
6201 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6204 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6206 BPF_MOV64_IMM(BPF_REG_1
, 0),
6207 BPF_MOV64_IMM(BPF_REG_2
, 1),
6208 BPF_MOV64_IMM(BPF_REG_3
, 0),
6209 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6212 .errstr
= "R1 type=inv expected=fp",
6214 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6217 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6219 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6220 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
6221 BPF_MOV64_IMM(BPF_REG_2
, 0),
6222 BPF_MOV64_IMM(BPF_REG_3
, 0),
6223 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6227 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6230 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6232 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6233 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6234 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6235 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6236 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
6237 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6238 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6239 BPF_MOV64_IMM(BPF_REG_2
, 0),
6240 BPF_MOV64_IMM(BPF_REG_3
, 0),
6241 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6244 .fixup_map1
= { 3 },
6246 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6249 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6251 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6252 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6253 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6254 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6255 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
6256 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6257 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
6258 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 4),
6259 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6260 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
6261 BPF_MOV64_IMM(BPF_REG_3
, 0),
6262 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6265 .fixup_map1
= { 3 },
6267 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6270 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6272 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6273 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6274 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6275 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6276 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
6277 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
6278 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6279 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
6280 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 2),
6281 BPF_MOV64_IMM(BPF_REG_3
, 0),
6282 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6285 .fixup_map1
= { 3 },
6287 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6290 "helper access to variable memory: 8 bytes leak",
6292 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6293 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
6294 BPF_MOV64_IMM(BPF_REG_0
, 0),
6295 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
6296 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
6297 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
6298 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
6299 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
6300 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
6301 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
6302 BPF_MOV64_IMM(BPF_REG_2
, 1),
6303 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
6304 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
6305 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 63),
6306 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
6307 BPF_MOV64_IMM(BPF_REG_3
, 0),
6308 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6309 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6312 .errstr
= "invalid indirect read from stack off -64+32 size 64",
6314 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6317 "helper access to variable memory: 8 bytes no leak (init memory)",
6319 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6320 BPF_MOV64_IMM(BPF_REG_0
, 0),
6321 BPF_MOV64_IMM(BPF_REG_0
, 0),
6322 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
6323 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
6324 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
6325 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
6326 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
6327 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
6328 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
6329 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
6330 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
6331 BPF_MOV64_IMM(BPF_REG_2
, 0),
6332 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 32),
6333 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 32),
6334 BPF_MOV64_IMM(BPF_REG_3
, 0),
6335 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6336 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6340 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6343 "invalid and of negative number",
6345 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6346 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6347 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6348 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6349 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6350 BPF_FUNC_map_lookup_elem
),
6351 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6352 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6353 BPF_ALU64_IMM(BPF_AND
, BPF_REG_1
, -4),
6354 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
6355 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6356 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
6357 offsetof(struct test_val
, foo
)),
6360 .fixup_map2
= { 3 },
6361 .errstr
= "R0 max value is outside of the array range",
6363 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
6366 "invalid range check",
6368 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6369 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6370 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6371 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6372 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6373 BPF_FUNC_map_lookup_elem
),
6374 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 12),
6375 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
6376 BPF_MOV64_IMM(BPF_REG_9
, 1),
6377 BPF_ALU32_IMM(BPF_MOD
, BPF_REG_1
, 2),
6378 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 1),
6379 BPF_ALU32_REG(BPF_AND
, BPF_REG_9
, BPF_REG_1
),
6380 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_9
, 1),
6381 BPF_ALU32_IMM(BPF_RSH
, BPF_REG_9
, 1),
6382 BPF_MOV32_IMM(BPF_REG_3
, 1),
6383 BPF_ALU32_REG(BPF_SUB
, BPF_REG_3
, BPF_REG_9
),
6384 BPF_ALU32_IMM(BPF_MUL
, BPF_REG_3
, 0x10000000),
6385 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
6386 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_3
, 0),
6387 BPF_MOV64_REG(BPF_REG_0
, 0),
6390 .fixup_map2
= { 3 },
6391 .errstr
= "R0 max value is outside of the array range",
6393 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
6396 "map in map access",
6398 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6399 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6400 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6401 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6402 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6403 BPF_FUNC_map_lookup_elem
),
6404 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
6405 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6406 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6407 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6408 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6409 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6410 BPF_FUNC_map_lookup_elem
),
6411 BPF_MOV64_REG(BPF_REG_0
, 0),
6414 .fixup_map_in_map
= { 3 },
6418 "invalid inner map pointer",
6420 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6421 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6422 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6423 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6424 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6425 BPF_FUNC_map_lookup_elem
),
6426 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6427 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6428 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6429 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6430 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6431 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
6432 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6433 BPF_FUNC_map_lookup_elem
),
6434 BPF_MOV64_REG(BPF_REG_0
, 0),
6437 .fixup_map_in_map
= { 3 },
6438 .errstr
= "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
6442 "forgot null checking on the inner map pointer",
6444 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6445 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6446 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6447 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6448 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6449 BPF_FUNC_map_lookup_elem
),
6450 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6451 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6452 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6453 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6454 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6455 BPF_FUNC_map_lookup_elem
),
6456 BPF_MOV64_REG(BPF_REG_0
, 0),
6459 .fixup_map_in_map
= { 3 },
6460 .errstr
= "R1 type=map_value_or_null expected=map_ptr",
6464 "ld_abs: check calling conv, r1",
6466 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6467 BPF_MOV64_IMM(BPF_REG_1
, 0),
6468 BPF_LD_ABS(BPF_W
, -0x200000),
6469 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
6472 .errstr
= "R1 !read_ok",
6476 "ld_abs: check calling conv, r2",
6478 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6479 BPF_MOV64_IMM(BPF_REG_2
, 0),
6480 BPF_LD_ABS(BPF_W
, -0x200000),
6481 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
6484 .errstr
= "R2 !read_ok",
6488 "ld_abs: check calling conv, r3",
6490 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6491 BPF_MOV64_IMM(BPF_REG_3
, 0),
6492 BPF_LD_ABS(BPF_W
, -0x200000),
6493 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
6496 .errstr
= "R3 !read_ok",
6500 "ld_abs: check calling conv, r4",
6502 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6503 BPF_MOV64_IMM(BPF_REG_4
, 0),
6504 BPF_LD_ABS(BPF_W
, -0x200000),
6505 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
6508 .errstr
= "R4 !read_ok",
6512 "ld_abs: check calling conv, r5",
6514 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6515 BPF_MOV64_IMM(BPF_REG_5
, 0),
6516 BPF_LD_ABS(BPF_W
, -0x200000),
6517 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
6520 .errstr
= "R5 !read_ok",
6524 "ld_abs: check calling conv, r7",
6526 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6527 BPF_MOV64_IMM(BPF_REG_7
, 0),
6528 BPF_LD_ABS(BPF_W
, -0x200000),
6529 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
6535 "ld_abs: tests on r6 and skb data reload helper",
6537 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6538 BPF_LD_ABS(BPF_B
, 0),
6539 BPF_LD_ABS(BPF_H
, 0),
6540 BPF_LD_ABS(BPF_W
, 0),
6541 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_6
),
6542 BPF_MOV64_IMM(BPF_REG_6
, 0),
6543 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
6544 BPF_MOV64_IMM(BPF_REG_2
, 1),
6545 BPF_MOV64_IMM(BPF_REG_3
, 2),
6546 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6547 BPF_FUNC_skb_vlan_push
),
6548 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_7
),
6549 BPF_LD_ABS(BPF_B
, 0),
6550 BPF_LD_ABS(BPF_H
, 0),
6551 BPF_LD_ABS(BPF_W
, 0),
6552 BPF_MOV64_IMM(BPF_REG_0
, 42),
6555 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6557 .retval
= 42 /* ultimate return value */,
6560 "ld_ind: check calling conv, r1",
6562 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6563 BPF_MOV64_IMM(BPF_REG_1
, 1),
6564 BPF_LD_IND(BPF_W
, BPF_REG_1
, -0x200000),
6565 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
6568 .errstr
= "R1 !read_ok",
6572 "ld_ind: check calling conv, r2",
6574 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6575 BPF_MOV64_IMM(BPF_REG_2
, 1),
6576 BPF_LD_IND(BPF_W
, BPF_REG_2
, -0x200000),
6577 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
6580 .errstr
= "R2 !read_ok",
6584 "ld_ind: check calling conv, r3",
6586 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6587 BPF_MOV64_IMM(BPF_REG_3
, 1),
6588 BPF_LD_IND(BPF_W
, BPF_REG_3
, -0x200000),
6589 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
6592 .errstr
= "R3 !read_ok",
6596 "ld_ind: check calling conv, r4",
6598 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6599 BPF_MOV64_IMM(BPF_REG_4
, 1),
6600 BPF_LD_IND(BPF_W
, BPF_REG_4
, -0x200000),
6601 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
6604 .errstr
= "R4 !read_ok",
6608 "ld_ind: check calling conv, r5",
6610 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6611 BPF_MOV64_IMM(BPF_REG_5
, 1),
6612 BPF_LD_IND(BPF_W
, BPF_REG_5
, -0x200000),
6613 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
6616 .errstr
= "R5 !read_ok",
6620 "ld_ind: check calling conv, r7",
6622 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6623 BPF_MOV64_IMM(BPF_REG_7
, 1),
6624 BPF_LD_IND(BPF_W
, BPF_REG_7
, -0x200000),
6625 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
6632 "check bpf_perf_event_data->sample_period byte load permitted",
6634 BPF_MOV64_IMM(BPF_REG_0
, 0),
6635 #if __BYTE_ORDER == __LITTLE_ENDIAN
6636 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
6637 offsetof(struct bpf_perf_event_data
, sample_period
)),
6639 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
6640 offsetof(struct bpf_perf_event_data
, sample_period
) + 7),
6645 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6648 "check bpf_perf_event_data->sample_period half load permitted",
6650 BPF_MOV64_IMM(BPF_REG_0
, 0),
6651 #if __BYTE_ORDER == __LITTLE_ENDIAN
6652 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6653 offsetof(struct bpf_perf_event_data
, sample_period
)),
6655 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6656 offsetof(struct bpf_perf_event_data
, sample_period
) + 6),
6661 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6664 "check bpf_perf_event_data->sample_period word load permitted",
6666 BPF_MOV64_IMM(BPF_REG_0
, 0),
6667 #if __BYTE_ORDER == __LITTLE_ENDIAN
6668 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
6669 offsetof(struct bpf_perf_event_data
, sample_period
)),
6671 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
6672 offsetof(struct bpf_perf_event_data
, sample_period
) + 4),
6677 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6680 "check bpf_perf_event_data->sample_period dword load permitted",
6682 BPF_MOV64_IMM(BPF_REG_0
, 0),
6683 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
6684 offsetof(struct bpf_perf_event_data
, sample_period
)),
6688 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6691 "check skb->data half load not permitted",
6693 BPF_MOV64_IMM(BPF_REG_0
, 0),
6694 #if __BYTE_ORDER == __LITTLE_ENDIAN
6695 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6696 offsetof(struct __sk_buff
, data
)),
6698 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6699 offsetof(struct __sk_buff
, data
) + 2),
6704 .errstr
= "invalid bpf_context access",
6707 "check skb->tc_classid half load not permitted for lwt prog",
6709 BPF_MOV64_IMM(BPF_REG_0
, 0),
6710 #if __BYTE_ORDER == __LITTLE_ENDIAN
6711 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6712 offsetof(struct __sk_buff
, tc_classid
)),
6714 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6715 offsetof(struct __sk_buff
, tc_classid
) + 2),
6720 .errstr
= "invalid bpf_context access",
6721 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
6724 "bounds checks mixing signed and unsigned, positive bounds",
6726 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6727 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6728 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6729 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6730 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6731 BPF_FUNC_map_lookup_elem
),
6732 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6733 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6734 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6735 BPF_MOV64_IMM(BPF_REG_2
, 2),
6736 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 3),
6737 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 4, 2),
6738 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6739 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6740 BPF_MOV64_IMM(BPF_REG_0
, 0),
6743 .fixup_map1
= { 3 },
6744 .errstr
= "unbounded min value",
6748 "bounds checks mixing signed and unsigned",
6750 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6751 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6752 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6753 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6754 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6755 BPF_FUNC_map_lookup_elem
),
6756 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6757 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6758 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6759 BPF_MOV64_IMM(BPF_REG_2
, -1),
6760 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
6761 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6762 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6763 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6764 BPF_MOV64_IMM(BPF_REG_0
, 0),
6767 .fixup_map1
= { 3 },
6768 .errstr
= "unbounded min value",
6772 "bounds checks mixing signed and unsigned, variant 2",
6774 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6775 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6776 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6777 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6778 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6779 BPF_FUNC_map_lookup_elem
),
6780 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6781 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6782 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6783 BPF_MOV64_IMM(BPF_REG_2
, -1),
6784 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
6785 BPF_MOV64_IMM(BPF_REG_8
, 0),
6786 BPF_ALU64_REG(BPF_ADD
, BPF_REG_8
, BPF_REG_1
),
6787 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
6788 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
6789 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
6790 BPF_MOV64_IMM(BPF_REG_0
, 0),
6793 .fixup_map1
= { 3 },
6794 .errstr
= "unbounded min value",
6798 "bounds checks mixing signed and unsigned, variant 3",
6800 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6801 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6802 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6803 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6804 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6805 BPF_FUNC_map_lookup_elem
),
6806 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6807 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6808 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6809 BPF_MOV64_IMM(BPF_REG_2
, -1),
6810 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 4),
6811 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
6812 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
6813 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
6814 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
6815 BPF_MOV64_IMM(BPF_REG_0
, 0),
6818 .fixup_map1
= { 3 },
6819 .errstr
= "unbounded min value",
6823 "bounds checks mixing signed and unsigned, variant 4",
6825 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6826 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6827 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6828 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6829 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6830 BPF_FUNC_map_lookup_elem
),
6831 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6832 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6833 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6834 BPF_MOV64_IMM(BPF_REG_2
, 1),
6835 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
6836 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6837 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6838 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6839 BPF_MOV64_IMM(BPF_REG_0
, 0),
6842 .fixup_map1
= { 3 },
6846 "bounds checks mixing signed and unsigned, variant 5",
6848 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6849 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6850 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6851 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6852 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6853 BPF_FUNC_map_lookup_elem
),
6854 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6855 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6856 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6857 BPF_MOV64_IMM(BPF_REG_2
, -1),
6858 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
6859 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 4),
6860 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 4),
6861 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
6862 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6863 BPF_MOV64_IMM(BPF_REG_0
, 0),
6866 .fixup_map1
= { 3 },
6867 .errstr
= "unbounded min value",
6871 "bounds checks mixing signed and unsigned, variant 6",
6873 BPF_MOV64_IMM(BPF_REG_2
, 0),
6874 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_10
),
6875 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, -512),
6876 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6877 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -16),
6878 BPF_MOV64_IMM(BPF_REG_6
, -1),
6879 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_6
, 5),
6880 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_4
, 1, 4),
6881 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 1),
6882 BPF_MOV64_IMM(BPF_REG_5
, 0),
6883 BPF_ST_MEM(BPF_H
, BPF_REG_10
, -512, 0),
6884 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6885 BPF_FUNC_skb_load_bytes
),
6886 BPF_MOV64_IMM(BPF_REG_0
, 0),
6889 .errstr
= "R4 min value is negative, either use unsigned",
6893 "bounds checks mixing signed and unsigned, variant 7",
6895 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6896 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6897 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6898 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6899 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6900 BPF_FUNC_map_lookup_elem
),
6901 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6902 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6903 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6904 BPF_MOV64_IMM(BPF_REG_2
, 1024 * 1024 * 1024),
6905 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
6906 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6907 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6908 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6909 BPF_MOV64_IMM(BPF_REG_0
, 0),
6912 .fixup_map1
= { 3 },
6916 "bounds checks mixing signed and unsigned, variant 8",
6918 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6919 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6920 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6921 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6922 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6923 BPF_FUNC_map_lookup_elem
),
6924 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6925 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6926 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6927 BPF_MOV64_IMM(BPF_REG_2
, -1),
6928 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6929 BPF_MOV64_IMM(BPF_REG_0
, 0),
6931 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6932 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6933 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6934 BPF_MOV64_IMM(BPF_REG_0
, 0),
6937 .fixup_map1
= { 3 },
6938 .errstr
= "unbounded min value",
6942 "bounds checks mixing signed and unsigned, variant 9",
6944 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6945 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6946 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6947 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6948 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6949 BPF_FUNC_map_lookup_elem
),
6950 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
6951 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6952 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6953 BPF_LD_IMM64(BPF_REG_2
, -9223372036854775808ULL),
6954 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6955 BPF_MOV64_IMM(BPF_REG_0
, 0),
6957 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6958 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6959 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6960 BPF_MOV64_IMM(BPF_REG_0
, 0),
6963 .fixup_map1
= { 3 },
6967 "bounds checks mixing signed and unsigned, variant 10",
6969 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6970 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6971 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6972 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6973 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6974 BPF_FUNC_map_lookup_elem
),
6975 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6976 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6977 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6978 BPF_MOV64_IMM(BPF_REG_2
, 0),
6979 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6980 BPF_MOV64_IMM(BPF_REG_0
, 0),
6982 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6983 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6984 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6985 BPF_MOV64_IMM(BPF_REG_0
, 0),
6988 .fixup_map1
= { 3 },
6989 .errstr
= "unbounded min value",
6993 "bounds checks mixing signed and unsigned, variant 11",
6995 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6996 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6997 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6998 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6999 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7000 BPF_FUNC_map_lookup_elem
),
7001 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7002 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
7003 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
7004 BPF_MOV64_IMM(BPF_REG_2
, -1),
7005 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
7007 BPF_MOV64_IMM(BPF_REG_0
, 0),
7009 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
7010 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7011 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
7012 BPF_MOV64_IMM(BPF_REG_0
, 0),
7015 .fixup_map1
= { 3 },
7016 .errstr
= "unbounded min value",
7020 "bounds checks mixing signed and unsigned, variant 12",
7022 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7023 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7024 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7025 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7026 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7027 BPF_FUNC_map_lookup_elem
),
7028 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7029 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
7030 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
7031 BPF_MOV64_IMM(BPF_REG_2
, -6),
7032 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
7033 BPF_MOV64_IMM(BPF_REG_0
, 0),
7035 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
7036 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7037 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
7038 BPF_MOV64_IMM(BPF_REG_0
, 0),
7041 .fixup_map1
= { 3 },
7042 .errstr
= "unbounded min value",
7046 "bounds checks mixing signed and unsigned, variant 13",
7048 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7049 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7050 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7051 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7052 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7053 BPF_FUNC_map_lookup_elem
),
7054 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
7055 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
7056 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
7057 BPF_MOV64_IMM(BPF_REG_2
, 2),
7058 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
7059 BPF_MOV64_IMM(BPF_REG_7
, 1),
7060 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 0, 2),
7061 BPF_MOV64_IMM(BPF_REG_0
, 0),
7063 BPF_ALU64_REG(BPF_ADD
, BPF_REG_7
, BPF_REG_1
),
7064 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 4, 2),
7065 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_7
),
7066 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
7067 BPF_MOV64_IMM(BPF_REG_0
, 0),
7070 .fixup_map1
= { 3 },
7071 .errstr
= "unbounded min value",
7075 "bounds checks mixing signed and unsigned, variant 14",
7077 BPF_LDX_MEM(BPF_W
, BPF_REG_9
, BPF_REG_1
,
7078 offsetof(struct __sk_buff
, mark
)),
7079 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7080 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7081 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7082 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7083 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7084 BPF_FUNC_map_lookup_elem
),
7085 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
7086 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
7087 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
7088 BPF_MOV64_IMM(BPF_REG_2
, -1),
7089 BPF_MOV64_IMM(BPF_REG_8
, 2),
7090 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_9
, 42, 6),
7091 BPF_JMP_REG(BPF_JSGT
, BPF_REG_8
, BPF_REG_1
, 3),
7092 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
7093 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7094 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
7095 BPF_MOV64_IMM(BPF_REG_0
, 0),
7097 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, -3),
7098 BPF_JMP_IMM(BPF_JA
, 0, 0, -7),
7100 .fixup_map1
= { 4 },
7101 .errstr
= "R0 invalid mem access 'inv'",
7105 "bounds checks mixing signed and unsigned, variant 15",
7107 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7108 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7109 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7110 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7111 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7112 BPF_FUNC_map_lookup_elem
),
7113 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7114 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
7115 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
7116 BPF_MOV64_IMM(BPF_REG_2
, -6),
7117 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
7118 BPF_MOV64_IMM(BPF_REG_0
, 0),
7120 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7121 BPF_JMP_IMM(BPF_JGT
, BPF_REG_0
, 1, 2),
7122 BPF_MOV64_IMM(BPF_REG_0
, 0),
7124 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
7125 BPF_MOV64_IMM(BPF_REG_0
, 0),
7128 .fixup_map1
= { 3 },
7129 .errstr
= "unbounded min value",
7131 .result_unpriv
= REJECT
,
7134 "subtraction bounds (map value) variant 1",
7136 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7137 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7138 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7139 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7140 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7141 BPF_FUNC_map_lookup_elem
),
7142 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7143 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7144 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 7),
7145 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
7146 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 5),
7147 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
7148 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 56),
7149 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7150 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7152 BPF_MOV64_IMM(BPF_REG_0
, 0),
7155 .fixup_map1
= { 3 },
7156 .errstr
= "R0 max value is outside of the array range",
7160 "subtraction bounds (map value) variant 2",
7162 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7163 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7164 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7165 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7166 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7167 BPF_FUNC_map_lookup_elem
),
7168 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
7169 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7170 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 6),
7171 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
7172 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 4),
7173 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
7174 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7175 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7177 BPF_MOV64_IMM(BPF_REG_0
, 0),
7180 .fixup_map1
= { 3 },
7181 .errstr
= "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7185 "bounds check based on zero-extended MOV",
7187 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7188 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7189 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7190 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7191 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7192 BPF_FUNC_map_lookup_elem
),
7193 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7194 /* r2 = 0x0000'0000'ffff'ffff */
7195 BPF_MOV32_IMM(BPF_REG_2
, 0xffffffff),
7197 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 32),
7199 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
7200 /* access at offset 0 */
7201 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7203 BPF_MOV64_IMM(BPF_REG_0
, 0),
7206 .fixup_map1
= { 3 },
7210 "bounds check based on sign-extended MOV. test1",
7212 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7213 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7214 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7215 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7216 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7217 BPF_FUNC_map_lookup_elem
),
7218 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7219 /* r2 = 0xffff'ffff'ffff'ffff */
7220 BPF_MOV64_IMM(BPF_REG_2
, 0xffffffff),
7221 /* r2 = 0xffff'ffff */
7222 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 32),
7223 /* r0 = <oob pointer> */
7224 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
7225 /* access to OOB pointer */
7226 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7228 BPF_MOV64_IMM(BPF_REG_0
, 0),
7231 .fixup_map1
= { 3 },
7232 .errstr
= "map_value pointer and 4294967295",
7236 "bounds check based on sign-extended MOV. test2",
7238 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7239 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7240 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7241 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7242 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7243 BPF_FUNC_map_lookup_elem
),
7244 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7245 /* r2 = 0xffff'ffff'ffff'ffff */
7246 BPF_MOV64_IMM(BPF_REG_2
, 0xffffffff),
7247 /* r2 = 0xfff'ffff */
7248 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 36),
7249 /* r0 = <oob pointer> */
7250 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
7251 /* access to OOB pointer */
7252 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7254 BPF_MOV64_IMM(BPF_REG_0
, 0),
7257 .fixup_map1
= { 3 },
7258 .errstr
= "R0 min value is outside of the array range",
7262 "bounds check based on reg_off + var_off + insn_off. test1",
7264 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
7265 offsetof(struct __sk_buff
, mark
)),
7266 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7267 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7268 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7269 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7270 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7271 BPF_FUNC_map_lookup_elem
),
7272 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7273 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 1),
7274 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, (1 << 29) - 1),
7275 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_6
),
7276 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, (1 << 29) - 1),
7277 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 3),
7278 BPF_MOV64_IMM(BPF_REG_0
, 0),
7281 .fixup_map1
= { 4 },
7282 .errstr
= "value_size=8 off=1073741825",
7284 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
7287 "bounds check based on reg_off + var_off + insn_off. test2",
7289 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
7290 offsetof(struct __sk_buff
, mark
)),
7291 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7292 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7293 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7294 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7295 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7296 BPF_FUNC_map_lookup_elem
),
7297 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7298 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 1),
7299 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, (1 << 30) - 1),
7300 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_6
),
7301 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, (1 << 29) - 1),
7302 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 3),
7303 BPF_MOV64_IMM(BPF_REG_0
, 0),
7306 .fixup_map1
= { 4 },
7307 .errstr
= "value 1073741823",
7309 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
7312 "bounds check after truncation of non-boundary-crossing range",
7314 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7315 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7316 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7317 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7318 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7319 BPF_FUNC_map_lookup_elem
),
7320 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7321 /* r1 = [0x00, 0xff] */
7322 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7323 BPF_MOV64_IMM(BPF_REG_2
, 1),
7324 /* r2 = 0x10'0000'0000 */
7325 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 36),
7326 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
7327 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
7328 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
7329 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7330 /* r1 = [0x00, 0xff] */
7331 BPF_ALU32_IMM(BPF_SUB
, BPF_REG_1
, 0x7fffffff),
7333 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7335 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7336 /* access at offset 0 */
7337 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7339 BPF_MOV64_IMM(BPF_REG_0
, 0),
7342 .fixup_map1
= { 3 },
7346 "bounds check after truncation of boundary-crossing range (1)",
7348 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7349 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7350 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7351 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7352 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7353 BPF_FUNC_map_lookup_elem
),
7354 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7355 /* r1 = [0x00, 0xff] */
7356 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7357 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
7358 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7359 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
7360 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7361 * [0x0000'0000, 0x0000'007f]
7363 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 0),
7364 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
7365 /* r1 = [0x00, 0xff] or
7366 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7368 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
7370 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7372 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7373 /* no-op or OOB pointer computation */
7374 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7375 /* potentially OOB access */
7376 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7378 BPF_MOV64_IMM(BPF_REG_0
, 0),
7381 .fixup_map1
= { 3 },
7382 /* not actually fully unbounded, but the bound is very high */
7383 .errstr
= "R0 unbounded memory access",
7387 "bounds check after truncation of boundary-crossing range (2)",
7389 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7390 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7391 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7392 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7393 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7394 BPF_FUNC_map_lookup_elem
),
7395 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7396 /* r1 = [0x00, 0xff] */
7397 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7398 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
7399 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7400 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
7401 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7402 * [0x0000'0000, 0x0000'007f]
7403 * difference to previous test: truncation via MOV32
7406 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_1
),
7407 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
7408 /* r1 = [0x00, 0xff] or
7409 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7411 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
7413 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7415 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7416 /* no-op or OOB pointer computation */
7417 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7418 /* potentially OOB access */
7419 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7421 BPF_MOV64_IMM(BPF_REG_0
, 0),
7424 .fixup_map1
= { 3 },
7425 /* not actually fully unbounded, but the bound is very high */
7426 .errstr
= "R0 unbounded memory access",
7430 "bounds check after wrapping 32-bit addition",
7432 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7433 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7434 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7435 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7436 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7437 BPF_FUNC_map_lookup_elem
),
7438 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
7439 /* r1 = 0x7fff'ffff */
7440 BPF_MOV64_IMM(BPF_REG_1
, 0x7fffffff),
7441 /* r1 = 0xffff'fffe */
7442 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7444 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 2),
7446 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7447 /* access at offset 0 */
7448 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7450 BPF_MOV64_IMM(BPF_REG_0
, 0),
7453 .fixup_map1
= { 3 },
7457 "bounds check after shift with oversized count operand",
7459 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7460 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7461 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7462 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7463 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7464 BPF_FUNC_map_lookup_elem
),
7465 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
7466 BPF_MOV64_IMM(BPF_REG_2
, 32),
7467 BPF_MOV64_IMM(BPF_REG_1
, 1),
7468 /* r1 = (u32)1 << (u32)32 = ? */
7469 BPF_ALU32_REG(BPF_LSH
, BPF_REG_1
, BPF_REG_2
),
7470 /* r1 = [0x0000, 0xffff] */
7471 BPF_ALU64_IMM(BPF_AND
, BPF_REG_1
, 0xffff),
7472 /* computes unknown pointer, potentially OOB */
7473 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7474 /* potentially OOB access */
7475 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7477 BPF_MOV64_IMM(BPF_REG_0
, 0),
7480 .fixup_map1
= { 3 },
7481 .errstr
= "R0 max value is outside of the array range",
7485 "bounds check after right shift of maybe-negative number",
7487 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7488 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7489 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7490 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7491 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7492 BPF_FUNC_map_lookup_elem
),
7493 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
7494 /* r1 = [0x00, 0xff] */
7495 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7496 /* r1 = [-0x01, 0xfe] */
7497 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 1),
7498 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7499 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7500 /* r1 = 0 or 0xffff'ffff'ffff */
7501 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7502 /* computes unknown pointer, potentially OOB */
7503 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7504 /* potentially OOB access */
7505 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7507 BPF_MOV64_IMM(BPF_REG_0
, 0),
7510 .fixup_map1
= { 3 },
7511 .errstr
= "R0 unbounded memory access",
7515 "bounds check map access with off+size signed 32bit overflow. test1",
7517 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7518 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7519 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7520 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7521 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7522 BPF_FUNC_map_lookup_elem
),
7523 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
7525 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x7ffffffe),
7526 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
7530 .fixup_map1
= { 3 },
7531 .errstr
= "map_value pointer and 2147483646",
7535 "bounds check map access with off+size signed 32bit overflow. test2",
7537 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7538 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7539 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7540 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7541 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7542 BPF_FUNC_map_lookup_elem
),
7543 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
7545 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x1fffffff),
7546 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x1fffffff),
7547 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x1fffffff),
7548 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
7552 .fixup_map1
= { 3 },
7553 .errstr
= "pointer offset 1073741822",
7557 "bounds check map access with off+size signed 32bit overflow. test3",
7559 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7560 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7561 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7562 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7563 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7564 BPF_FUNC_map_lookup_elem
),
7565 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
7567 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_0
, 0x1fffffff),
7568 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_0
, 0x1fffffff),
7569 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 2),
7573 .fixup_map1
= { 3 },
7574 .errstr
= "pointer offset -1073741822",
7578 "bounds check map access with off+size signed 32bit overflow. test4",
7580 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7581 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7582 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7583 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7584 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7585 BPF_FUNC_map_lookup_elem
),
7586 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
7588 BPF_MOV64_IMM(BPF_REG_1
, 1000000),
7589 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 1000000),
7590 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7591 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 2),
7595 .fixup_map1
= { 3 },
7596 .errstr
= "map_value pointer and 1000000000000",
7600 "pointer/scalar confusion in state equality check (way 1)",
7602 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7603 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7604 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7605 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7606 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7607 BPF_FUNC_map_lookup_elem
),
7608 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
7609 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
7611 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
7615 .fixup_map1
= { 3 },
7617 .retval
= POINTER_VALUE
,
7618 .result_unpriv
= REJECT
,
7619 .errstr_unpriv
= "R0 leaks addr as return value"
7622 "pointer/scalar confusion in state equality check (way 2)",
7624 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7625 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7626 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7627 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7628 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7629 BPF_FUNC_map_lookup_elem
),
7630 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
7631 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
7633 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
7636 .fixup_map1
= { 3 },
7638 .retval
= POINTER_VALUE
,
7639 .result_unpriv
= REJECT
,
7640 .errstr_unpriv
= "R0 leaks addr as return value"
7643 "variable-offset ctx access",
7645 /* Get an unknown value */
7646 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7647 /* Make it small and 4-byte aligned */
7648 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
7649 /* add it to skb. We now have either &skb->len or
7650 * &skb->pkt_type, but we don't know which
7652 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
7653 /* dereference it */
7654 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
7657 .errstr
= "variable ctx access var_off=(0x0; 0x4)",
7659 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7662 "variable-offset stack access",
7664 /* Fill the top 8 bytes of the stack */
7665 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7666 /* Get an unknown value */
7667 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7668 /* Make it small and 4-byte aligned */
7669 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
7670 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_2
, 8),
7671 /* add it to fp. We now have either fp-4 or fp-8, but
7672 * we don't know which
7674 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_10
),
7675 /* dereference it */
7676 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_2
, 0),
7679 .errstr
= "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7681 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7684 "indirect variable-offset stack access",
7686 /* Fill the top 8 bytes of the stack */
7687 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7688 /* Get an unknown value */
7689 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7690 /* Make it small and 4-byte aligned */
7691 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
7692 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_2
, 8),
7693 /* add it to fp. We now have either fp-4 or fp-8, but
7694 * we don't know which
7696 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_10
),
7697 /* dereference it indirectly */
7698 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7699 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7700 BPF_FUNC_map_lookup_elem
),
7701 BPF_MOV64_IMM(BPF_REG_0
, 0),
7704 .fixup_map1
= { 5 },
7705 .errstr
= "variable stack read R2",
7707 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7710 "direct stack access with 32-bit wraparound. test1",
7712 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
7713 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7714 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7715 BPF_MOV32_IMM(BPF_REG_0
, 0),
7716 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7719 .errstr
= "fp pointer and 2147483647",
7723 "direct stack access with 32-bit wraparound. test2",
7725 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
7726 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x3fffffff),
7727 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x3fffffff),
7728 BPF_MOV32_IMM(BPF_REG_0
, 0),
7729 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7732 .errstr
= "fp pointer and 1073741823",
7736 "direct stack access with 32-bit wraparound. test3",
7738 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
7739 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x1fffffff),
7740 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x1fffffff),
7741 BPF_MOV32_IMM(BPF_REG_0
, 0),
7742 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7745 .errstr
= "fp pointer offset 1073741822",
7749 "liveness pruning and write screening",
7751 /* Get an unknown value */
7752 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7753 /* branch conditions teach us nothing about R2 */
7754 BPF_JMP_IMM(BPF_JGE
, BPF_REG_2
, 0, 1),
7755 BPF_MOV64_IMM(BPF_REG_0
, 0),
7756 BPF_JMP_IMM(BPF_JGE
, BPF_REG_2
, 0, 1),
7757 BPF_MOV64_IMM(BPF_REG_0
, 0),
7760 .errstr
= "R0 !read_ok",
7762 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7765 "varlen_map_value_access pruning",
7767 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7768 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7769 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7770 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7771 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7772 BPF_FUNC_map_lookup_elem
),
7773 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
7774 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
7775 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
7776 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
7777 BPF_MOV32_IMM(BPF_REG_1
, 0),
7778 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
7779 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7780 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
7781 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
7782 offsetof(struct test_val
, foo
)),
7785 .fixup_map2
= { 3 },
7786 .errstr_unpriv
= "R0 leaks addr",
7787 .errstr
= "R0 unbounded memory access",
7788 .result_unpriv
= REJECT
,
7790 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7793 "invalid 64-bit BPF_END",
7795 BPF_MOV32_IMM(BPF_REG_0
, 0),
7797 .code
= BPF_ALU64
| BPF_END
| BPF_TO_LE
,
7798 .dst_reg
= BPF_REG_0
,
7805 .errstr
= "unknown opcode d7",
7809 "XDP, using ifindex from netdev",
7811 BPF_MOV64_IMM(BPF_REG_0
, 0),
7812 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7813 offsetof(struct xdp_md
, ingress_ifindex
)),
7814 BPF_JMP_IMM(BPF_JLT
, BPF_REG_2
, 1, 1),
7815 BPF_MOV64_IMM(BPF_REG_0
, 1),
7819 .prog_type
= BPF_PROG_TYPE_XDP
,
7823 "meta access, test1",
7825 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7826 offsetof(struct xdp_md
, data_meta
)),
7827 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7828 offsetof(struct xdp_md
, data
)),
7829 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
7830 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7831 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
7832 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7833 BPF_MOV64_IMM(BPF_REG_0
, 0),
7837 .prog_type
= BPF_PROG_TYPE_XDP
,
7840 "meta access, test2",
7842 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7843 offsetof(struct xdp_md
, data_meta
)),
7844 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7845 offsetof(struct xdp_md
, data
)),
7846 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
7847 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_0
, 8),
7848 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7849 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
7850 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
7851 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7852 BPF_MOV64_IMM(BPF_REG_0
, 0),
7856 .errstr
= "invalid access to packet, off=-8",
7857 .prog_type
= BPF_PROG_TYPE_XDP
,
7860 "meta access, test3",
7862 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7863 offsetof(struct xdp_md
, data_meta
)),
7864 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7865 offsetof(struct xdp_md
, data_end
)),
7866 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
7867 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7868 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
7869 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7870 BPF_MOV64_IMM(BPF_REG_0
, 0),
7874 .errstr
= "invalid access to packet",
7875 .prog_type
= BPF_PROG_TYPE_XDP
,
7878 "meta access, test4",
7880 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7881 offsetof(struct xdp_md
, data_meta
)),
7882 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7883 offsetof(struct xdp_md
, data_end
)),
7884 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
7885 offsetof(struct xdp_md
, data
)),
7886 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
7887 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7888 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
7889 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7890 BPF_MOV64_IMM(BPF_REG_0
, 0),
7894 .errstr
= "invalid access to packet",
7895 .prog_type
= BPF_PROG_TYPE_XDP
,
7898 "meta access, test5",
7900 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7901 offsetof(struct xdp_md
, data_meta
)),
7902 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
7903 offsetof(struct xdp_md
, data
)),
7904 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
7905 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7906 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_4
, 3),
7907 BPF_MOV64_IMM(BPF_REG_2
, -8),
7908 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7909 BPF_FUNC_xdp_adjust_meta
),
7910 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 0),
7911 BPF_MOV64_IMM(BPF_REG_0
, 0),
7915 .errstr
= "R3 !read_ok",
7916 .prog_type
= BPF_PROG_TYPE_XDP
,
7919 "meta access, test6",
7921 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7922 offsetof(struct xdp_md
, data_meta
)),
7923 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7924 offsetof(struct xdp_md
, data
)),
7925 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
7926 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7927 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7928 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
7929 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_0
, 1),
7930 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7931 BPF_MOV64_IMM(BPF_REG_0
, 0),
7935 .errstr
= "invalid access to packet",
7936 .prog_type
= BPF_PROG_TYPE_XDP
,
7939 "meta access, test7",
7941 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7942 offsetof(struct xdp_md
, data_meta
)),
7943 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7944 offsetof(struct xdp_md
, data
)),
7945 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
7946 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7947 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7948 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
7949 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
7950 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7951 BPF_MOV64_IMM(BPF_REG_0
, 0),
7955 .prog_type
= BPF_PROG_TYPE_XDP
,
7958 "meta access, test8",
7960 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7961 offsetof(struct xdp_md
, data_meta
)),
7962 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7963 offsetof(struct xdp_md
, data
)),
7964 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7965 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0xFFFF),
7966 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
7967 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7968 BPF_MOV64_IMM(BPF_REG_0
, 0),
7972 .prog_type
= BPF_PROG_TYPE_XDP
,
7975 "meta access, test9",
7977 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7978 offsetof(struct xdp_md
, data_meta
)),
7979 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7980 offsetof(struct xdp_md
, data
)),
7981 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7982 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0xFFFF),
7983 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 1),
7984 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
7985 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7986 BPF_MOV64_IMM(BPF_REG_0
, 0),
7990 .errstr
= "invalid access to packet",
7991 .prog_type
= BPF_PROG_TYPE_XDP
,
7994 "meta access, test10",
7996 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7997 offsetof(struct xdp_md
, data_meta
)),
7998 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7999 offsetof(struct xdp_md
, data
)),
8000 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
8001 offsetof(struct xdp_md
, data_end
)),
8002 BPF_MOV64_IMM(BPF_REG_5
, 42),
8003 BPF_MOV64_IMM(BPF_REG_6
, 24),
8004 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_5
, -8),
8005 BPF_STX_XADD(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
8006 BPF_LDX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_10
, -8),
8007 BPF_JMP_IMM(BPF_JGT
, BPF_REG_5
, 100, 6),
8008 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_5
),
8009 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
8010 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
8011 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 8),
8012 BPF_JMP_REG(BPF_JGT
, BPF_REG_6
, BPF_REG_5
, 1),
8013 BPF_LDX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
8014 BPF_MOV64_IMM(BPF_REG_0
, 0),
8018 .errstr
= "invalid access to packet",
8019 .prog_type
= BPF_PROG_TYPE_XDP
,
8022 "meta access, test11",
8024 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8025 offsetof(struct xdp_md
, data_meta
)),
8026 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8027 offsetof(struct xdp_md
, data
)),
8028 BPF_MOV64_IMM(BPF_REG_5
, 42),
8029 BPF_MOV64_IMM(BPF_REG_6
, 24),
8030 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_5
, -8),
8031 BPF_STX_XADD(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
8032 BPF_LDX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_10
, -8),
8033 BPF_JMP_IMM(BPF_JGT
, BPF_REG_5
, 100, 6),
8034 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_5
),
8035 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
8036 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
8037 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 8),
8038 BPF_JMP_REG(BPF_JGT
, BPF_REG_6
, BPF_REG_3
, 1),
8039 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_5
, 0),
8040 BPF_MOV64_IMM(BPF_REG_0
, 0),
8044 .prog_type
= BPF_PROG_TYPE_XDP
,
8047 "meta access, test12",
8049 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8050 offsetof(struct xdp_md
, data_meta
)),
8051 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8052 offsetof(struct xdp_md
, data
)),
8053 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
8054 offsetof(struct xdp_md
, data_end
)),
8055 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
8056 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 16),
8057 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_4
, 5),
8058 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 0),
8059 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
8060 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 16),
8061 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 1),
8062 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
8063 BPF_MOV64_IMM(BPF_REG_0
, 0),
8067 .prog_type
= BPF_PROG_TYPE_XDP
,
8070 "arithmetic ops make PTR_TO_CTX unusable",
8072 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
8073 offsetof(struct __sk_buff
, data
) -
8074 offsetof(struct __sk_buff
, mark
)),
8075 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
8076 offsetof(struct __sk_buff
, mark
)),
8079 .errstr
= "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
8081 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
8084 "pkt_end - pkt_start is allowed",
8086 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
8087 offsetof(struct __sk_buff
, data_end
)),
8088 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8089 offsetof(struct __sk_buff
, data
)),
8090 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_2
),
8094 .retval
= TEST_DATA_LEN
,
8095 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
8098 "XDP pkt read, pkt_end mangling, bad access 1",
8100 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8101 offsetof(struct xdp_md
, data
)),
8102 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8103 offsetof(struct xdp_md
, data_end
)),
8104 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8105 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8106 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 8),
8107 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8108 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8109 BPF_MOV64_IMM(BPF_REG_0
, 0),
8112 .errstr
= "R3 pointer arithmetic on PTR_TO_PACKET_END",
8114 .prog_type
= BPF_PROG_TYPE_XDP
,
8117 "XDP pkt read, pkt_end mangling, bad access 2",
8119 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8120 offsetof(struct xdp_md
, data
)),
8121 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8122 offsetof(struct xdp_md
, data_end
)),
8123 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8124 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8125 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_3
, 8),
8126 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8127 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8128 BPF_MOV64_IMM(BPF_REG_0
, 0),
8131 .errstr
= "R3 pointer arithmetic on PTR_TO_PACKET_END",
8133 .prog_type
= BPF_PROG_TYPE_XDP
,
8136 "XDP pkt read, pkt_data' > pkt_end, good access",
8138 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8139 offsetof(struct xdp_md
, data
)),
8140 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8141 offsetof(struct xdp_md
, data_end
)),
8142 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8143 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8144 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8145 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8146 BPF_MOV64_IMM(BPF_REG_0
, 0),
8150 .prog_type
= BPF_PROG_TYPE_XDP
,
8153 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
8155 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8156 offsetof(struct xdp_md
, data
)),
8157 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8158 offsetof(struct xdp_md
, data_end
)),
8159 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8160 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8161 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8162 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8163 BPF_MOV64_IMM(BPF_REG_0
, 0),
8166 .errstr
= "R1 offset is outside of the packet",
8168 .prog_type
= BPF_PROG_TYPE_XDP
,
8169 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8172 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
8174 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8175 offsetof(struct xdp_md
, data
)),
8176 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8177 offsetof(struct xdp_md
, data_end
)),
8178 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8179 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8180 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 0),
8181 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8182 BPF_MOV64_IMM(BPF_REG_0
, 0),
8185 .errstr
= "R1 offset is outside of the packet",
8187 .prog_type
= BPF_PROG_TYPE_XDP
,
8190 "XDP pkt read, pkt_end > pkt_data', good access",
8192 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8193 offsetof(struct xdp_md
, data
)),
8194 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8195 offsetof(struct xdp_md
, data_end
)),
8196 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8197 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8198 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8199 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8200 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8201 BPF_MOV64_IMM(BPF_REG_0
, 0),
8205 .prog_type
= BPF_PROG_TYPE_XDP
,
8206 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8209 "XDP pkt read, pkt_end > pkt_data', bad access 1",
8211 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8212 offsetof(struct xdp_md
, data
)),
8213 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8214 offsetof(struct xdp_md
, data_end
)),
8215 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8216 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8217 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8218 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8219 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8220 BPF_MOV64_IMM(BPF_REG_0
, 0),
8223 .errstr
= "R1 offset is outside of the packet",
8225 .prog_type
= BPF_PROG_TYPE_XDP
,
8228 "XDP pkt read, pkt_end > pkt_data', bad access 2",
8230 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8231 offsetof(struct xdp_md
, data
)),
8232 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8233 offsetof(struct xdp_md
, data_end
)),
8234 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8235 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8236 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8237 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8238 BPF_MOV64_IMM(BPF_REG_0
, 0),
8241 .errstr
= "R1 offset is outside of the packet",
8243 .prog_type
= BPF_PROG_TYPE_XDP
,
8246 "XDP pkt read, pkt_data' < pkt_end, good access",
8248 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8249 offsetof(struct xdp_md
, data
)),
8250 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8251 offsetof(struct xdp_md
, data_end
)),
8252 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8253 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8254 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8255 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8256 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8257 BPF_MOV64_IMM(BPF_REG_0
, 0),
8261 .prog_type
= BPF_PROG_TYPE_XDP
,
8262 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8265 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
8267 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8268 offsetof(struct xdp_md
, data
)),
8269 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8270 offsetof(struct xdp_md
, data_end
)),
8271 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8272 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8273 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8274 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8275 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8276 BPF_MOV64_IMM(BPF_REG_0
, 0),
8279 .errstr
= "R1 offset is outside of the packet",
8281 .prog_type
= BPF_PROG_TYPE_XDP
,
8284 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
8286 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8287 offsetof(struct xdp_md
, data
)),
8288 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8289 offsetof(struct xdp_md
, data_end
)),
8290 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8291 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8292 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8293 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8294 BPF_MOV64_IMM(BPF_REG_0
, 0),
8297 .errstr
= "R1 offset is outside of the packet",
8299 .prog_type
= BPF_PROG_TYPE_XDP
,
8302 "XDP pkt read, pkt_end < pkt_data', good access",
8304 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8305 offsetof(struct xdp_md
, data
)),
8306 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8307 offsetof(struct xdp_md
, data_end
)),
8308 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8309 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8310 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
8311 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8312 BPF_MOV64_IMM(BPF_REG_0
, 0),
8316 .prog_type
= BPF_PROG_TYPE_XDP
,
8319 "XDP pkt read, pkt_end < pkt_data', bad access 1",
8321 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8322 offsetof(struct xdp_md
, data
)),
8323 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8324 offsetof(struct xdp_md
, data_end
)),
8325 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8326 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8327 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
8328 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8329 BPF_MOV64_IMM(BPF_REG_0
, 0),
8332 .errstr
= "R1 offset is outside of the packet",
8334 .prog_type
= BPF_PROG_TYPE_XDP
,
8335 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8338 "XDP pkt read, pkt_end < pkt_data', bad access 2",
8340 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8341 offsetof(struct xdp_md
, data
)),
8342 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8343 offsetof(struct xdp_md
, data_end
)),
8344 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8345 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8346 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 0),
8347 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8348 BPF_MOV64_IMM(BPF_REG_0
, 0),
8351 .errstr
= "R1 offset is outside of the packet",
8353 .prog_type
= BPF_PROG_TYPE_XDP
,
8356 "XDP pkt read, pkt_data' >= pkt_end, good access",
8358 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8359 offsetof(struct xdp_md
, data
)),
8360 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8361 offsetof(struct xdp_md
, data_end
)),
8362 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8363 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8364 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
8365 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8366 BPF_MOV64_IMM(BPF_REG_0
, 0),
8370 .prog_type
= BPF_PROG_TYPE_XDP
,
8371 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8374 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
8376 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8377 offsetof(struct xdp_md
, data
)),
8378 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8379 offsetof(struct xdp_md
, data_end
)),
8380 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8381 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8382 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
8383 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8384 BPF_MOV64_IMM(BPF_REG_0
, 0),
8387 .errstr
= "R1 offset is outside of the packet",
8389 .prog_type
= BPF_PROG_TYPE_XDP
,
8392 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
8394 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8395 offsetof(struct xdp_md
, data
)),
8396 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8397 offsetof(struct xdp_md
, data_end
)),
8398 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8399 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8400 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 0),
8401 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8402 BPF_MOV64_IMM(BPF_REG_0
, 0),
8405 .errstr
= "R1 offset is outside of the packet",
8407 .prog_type
= BPF_PROG_TYPE_XDP
,
8408 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8411 "XDP pkt read, pkt_end >= pkt_data', good access",
8413 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8414 offsetof(struct xdp_md
, data
)),
8415 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8416 offsetof(struct xdp_md
, data_end
)),
8417 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8418 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8419 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8420 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8421 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8422 BPF_MOV64_IMM(BPF_REG_0
, 0),
8426 .prog_type
= BPF_PROG_TYPE_XDP
,
8429 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
8431 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8432 offsetof(struct xdp_md
, data
)),
8433 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8434 offsetof(struct xdp_md
, data_end
)),
8435 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8436 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8437 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8438 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8439 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8440 BPF_MOV64_IMM(BPF_REG_0
, 0),
8443 .errstr
= "R1 offset is outside of the packet",
8445 .prog_type
= BPF_PROG_TYPE_XDP
,
8446 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8449 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
8451 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8452 offsetof(struct xdp_md
, data
)),
8453 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8454 offsetof(struct xdp_md
, data_end
)),
8455 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8456 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8457 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8458 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8459 BPF_MOV64_IMM(BPF_REG_0
, 0),
8462 .errstr
= "R1 offset is outside of the packet",
8464 .prog_type
= BPF_PROG_TYPE_XDP
,
8467 "XDP pkt read, pkt_data' <= pkt_end, good access",
8469 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8470 offsetof(struct xdp_md
, data
)),
8471 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8472 offsetof(struct xdp_md
, data_end
)),
8473 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8474 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8475 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8476 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8477 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8478 BPF_MOV64_IMM(BPF_REG_0
, 0),
8482 .prog_type
= BPF_PROG_TYPE_XDP
,
8485 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
8487 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8488 offsetof(struct xdp_md
, data
)),
8489 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8490 offsetof(struct xdp_md
, data_end
)),
8491 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8492 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8493 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8494 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8495 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8496 BPF_MOV64_IMM(BPF_REG_0
, 0),
8499 .errstr
= "R1 offset is outside of the packet",
8501 .prog_type
= BPF_PROG_TYPE_XDP
,
8502 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8505 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
8507 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8508 offsetof(struct xdp_md
, data
)),
8509 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8510 offsetof(struct xdp_md
, data_end
)),
8511 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8512 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8513 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8514 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8515 BPF_MOV64_IMM(BPF_REG_0
, 0),
8518 .errstr
= "R1 offset is outside of the packet",
8520 .prog_type
= BPF_PROG_TYPE_XDP
,
8523 "XDP pkt read, pkt_end <= pkt_data', good access",
8525 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8526 offsetof(struct xdp_md
, data
)),
8527 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8528 offsetof(struct xdp_md
, data_end
)),
8529 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8530 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8531 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
8532 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8533 BPF_MOV64_IMM(BPF_REG_0
, 0),
8537 .prog_type
= BPF_PROG_TYPE_XDP
,
8538 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8541 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
8543 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8544 offsetof(struct xdp_md
, data
)),
8545 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8546 offsetof(struct xdp_md
, data_end
)),
8547 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8548 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8549 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
8550 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8551 BPF_MOV64_IMM(BPF_REG_0
, 0),
8554 .errstr
= "R1 offset is outside of the packet",
8556 .prog_type
= BPF_PROG_TYPE_XDP
,
8559 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
8561 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8562 offsetof(struct xdp_md
, data
)),
8563 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8564 offsetof(struct xdp_md
, data_end
)),
8565 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8566 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8567 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 0),
8568 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8569 BPF_MOV64_IMM(BPF_REG_0
, 0),
8572 .errstr
= "R1 offset is outside of the packet",
8574 .prog_type
= BPF_PROG_TYPE_XDP
,
8575 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8578 "XDP pkt read, pkt_meta' > pkt_data, good access",
8580 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8581 offsetof(struct xdp_md
, data_meta
)),
8582 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8583 offsetof(struct xdp_md
, data
)),
8584 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8585 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8586 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8587 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8588 BPF_MOV64_IMM(BPF_REG_0
, 0),
8592 .prog_type
= BPF_PROG_TYPE_XDP
,
8595 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8597 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8598 offsetof(struct xdp_md
, data_meta
)),
8599 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8600 offsetof(struct xdp_md
, data
)),
8601 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8602 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8603 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8604 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8605 BPF_MOV64_IMM(BPF_REG_0
, 0),
8608 .errstr
= "R1 offset is outside of the packet",
8610 .prog_type
= BPF_PROG_TYPE_XDP
,
8611 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8614 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8616 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8617 offsetof(struct xdp_md
, data_meta
)),
8618 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8619 offsetof(struct xdp_md
, data
)),
8620 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8621 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8622 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 0),
8623 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8624 BPF_MOV64_IMM(BPF_REG_0
, 0),
8627 .errstr
= "R1 offset is outside of the packet",
8629 .prog_type
= BPF_PROG_TYPE_XDP
,
8632 "XDP pkt read, pkt_data > pkt_meta', good access",
8634 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8635 offsetof(struct xdp_md
, data_meta
)),
8636 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8637 offsetof(struct xdp_md
, data
)),
8638 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8639 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8640 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8641 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8642 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8643 BPF_MOV64_IMM(BPF_REG_0
, 0),
8647 .prog_type
= BPF_PROG_TYPE_XDP
,
8648 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8651 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
8653 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8654 offsetof(struct xdp_md
, data_meta
)),
8655 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8656 offsetof(struct xdp_md
, data
)),
8657 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8658 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8659 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8660 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8661 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8662 BPF_MOV64_IMM(BPF_REG_0
, 0),
8665 .errstr
= "R1 offset is outside of the packet",
8667 .prog_type
= BPF_PROG_TYPE_XDP
,
8670 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
8672 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8673 offsetof(struct xdp_md
, data_meta
)),
8674 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8675 offsetof(struct xdp_md
, data
)),
8676 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8677 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8678 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8679 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8680 BPF_MOV64_IMM(BPF_REG_0
, 0),
8683 .errstr
= "R1 offset is outside of the packet",
8685 .prog_type
= BPF_PROG_TYPE_XDP
,
8688 "XDP pkt read, pkt_meta' < pkt_data, good access",
8690 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8691 offsetof(struct xdp_md
, data_meta
)),
8692 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8693 offsetof(struct xdp_md
, data
)),
8694 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8695 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8696 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8697 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8698 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8699 BPF_MOV64_IMM(BPF_REG_0
, 0),
8703 .prog_type
= BPF_PROG_TYPE_XDP
,
8704 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8707 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
8709 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8710 offsetof(struct xdp_md
, data_meta
)),
8711 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8712 offsetof(struct xdp_md
, data
)),
8713 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8714 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8715 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8716 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8717 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8718 BPF_MOV64_IMM(BPF_REG_0
, 0),
8721 .errstr
= "R1 offset is outside of the packet",
8723 .prog_type
= BPF_PROG_TYPE_XDP
,
8726 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
8728 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8729 offsetof(struct xdp_md
, data_meta
)),
8730 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8731 offsetof(struct xdp_md
, data
)),
8732 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8733 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8734 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8735 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8736 BPF_MOV64_IMM(BPF_REG_0
, 0),
8739 .errstr
= "R1 offset is outside of the packet",
8741 .prog_type
= BPF_PROG_TYPE_XDP
,
8744 "XDP pkt read, pkt_data < pkt_meta', good access",
8746 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8747 offsetof(struct xdp_md
, data_meta
)),
8748 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8749 offsetof(struct xdp_md
, data
)),
8750 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8751 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8752 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
8753 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8754 BPF_MOV64_IMM(BPF_REG_0
, 0),
8758 .prog_type
= BPF_PROG_TYPE_XDP
,
8761 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
8763 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8764 offsetof(struct xdp_md
, data_meta
)),
8765 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8766 offsetof(struct xdp_md
, data
)),
8767 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8768 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8769 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
8770 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8771 BPF_MOV64_IMM(BPF_REG_0
, 0),
8774 .errstr
= "R1 offset is outside of the packet",
8776 .prog_type
= BPF_PROG_TYPE_XDP
,
8777 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8780 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
8782 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8783 offsetof(struct xdp_md
, data_meta
)),
8784 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8785 offsetof(struct xdp_md
, data
)),
8786 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8787 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8788 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 0),
8789 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8790 BPF_MOV64_IMM(BPF_REG_0
, 0),
8793 .errstr
= "R1 offset is outside of the packet",
8795 .prog_type
= BPF_PROG_TYPE_XDP
,
8798 "XDP pkt read, pkt_meta' >= pkt_data, good access",
8800 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8801 offsetof(struct xdp_md
, data_meta
)),
8802 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8803 offsetof(struct xdp_md
, data
)),
8804 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8805 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8806 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
8807 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8808 BPF_MOV64_IMM(BPF_REG_0
, 0),
8812 .prog_type
= BPF_PROG_TYPE_XDP
,
8813 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8816 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
8818 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8819 offsetof(struct xdp_md
, data_meta
)),
8820 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8821 offsetof(struct xdp_md
, data
)),
8822 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8823 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8824 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
8825 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8826 BPF_MOV64_IMM(BPF_REG_0
, 0),
8829 .errstr
= "R1 offset is outside of the packet",
8831 .prog_type
= BPF_PROG_TYPE_XDP
,
8834 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
8836 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8837 offsetof(struct xdp_md
, data_meta
)),
8838 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8839 offsetof(struct xdp_md
, data
)),
8840 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8841 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8842 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 0),
8843 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8844 BPF_MOV64_IMM(BPF_REG_0
, 0),
8847 .errstr
= "R1 offset is outside of the packet",
8849 .prog_type
= BPF_PROG_TYPE_XDP
,
8850 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8853 "XDP pkt read, pkt_data >= pkt_meta', good access",
8855 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8856 offsetof(struct xdp_md
, data_meta
)),
8857 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8858 offsetof(struct xdp_md
, data
)),
8859 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8860 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8861 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8862 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8863 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8864 BPF_MOV64_IMM(BPF_REG_0
, 0),
8868 .prog_type
= BPF_PROG_TYPE_XDP
,
8871 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
8873 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8874 offsetof(struct xdp_md
, data_meta
)),
8875 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8876 offsetof(struct xdp_md
, data
)),
8877 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8878 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8879 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8880 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8881 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8882 BPF_MOV64_IMM(BPF_REG_0
, 0),
8885 .errstr
= "R1 offset is outside of the packet",
8887 .prog_type
= BPF_PROG_TYPE_XDP
,
8888 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8891 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
8893 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8894 offsetof(struct xdp_md
, data_meta
)),
8895 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8896 offsetof(struct xdp_md
, data
)),
8897 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8898 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8899 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8900 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8901 BPF_MOV64_IMM(BPF_REG_0
, 0),
8904 .errstr
= "R1 offset is outside of the packet",
8906 .prog_type
= BPF_PROG_TYPE_XDP
,
8909 "XDP pkt read, pkt_meta' <= pkt_data, good access",
8911 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8912 offsetof(struct xdp_md
, data_meta
)),
8913 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8914 offsetof(struct xdp_md
, data
)),
8915 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8916 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8917 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8918 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8919 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8920 BPF_MOV64_IMM(BPF_REG_0
, 0),
8924 .prog_type
= BPF_PROG_TYPE_XDP
,
8927 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
8929 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8930 offsetof(struct xdp_md
, data_meta
)),
8931 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8932 offsetof(struct xdp_md
, data
)),
8933 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8934 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8935 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8936 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8937 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8938 BPF_MOV64_IMM(BPF_REG_0
, 0),
8941 .errstr
= "R1 offset is outside of the packet",
8943 .prog_type
= BPF_PROG_TYPE_XDP
,
8944 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8947 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
8949 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8950 offsetof(struct xdp_md
, data_meta
)),
8951 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8952 offsetof(struct xdp_md
, data
)),
8953 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8954 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8955 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8956 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8957 BPF_MOV64_IMM(BPF_REG_0
, 0),
8960 .errstr
= "R1 offset is outside of the packet",
8962 .prog_type
= BPF_PROG_TYPE_XDP
,
8965 "XDP pkt read, pkt_data <= pkt_meta', good access",
8967 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8968 offsetof(struct xdp_md
, data_meta
)),
8969 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8970 offsetof(struct xdp_md
, data
)),
8971 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8972 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8973 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
8974 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8975 BPF_MOV64_IMM(BPF_REG_0
, 0),
8979 .prog_type
= BPF_PROG_TYPE_XDP
,
8980 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8983 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
8985 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8986 offsetof(struct xdp_md
, data_meta
)),
8987 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8988 offsetof(struct xdp_md
, data
)),
8989 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8990 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8991 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
8992 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8993 BPF_MOV64_IMM(BPF_REG_0
, 0),
8996 .errstr
= "R1 offset is outside of the packet",
8998 .prog_type
= BPF_PROG_TYPE_XDP
,
9001 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
9003 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
9004 offsetof(struct xdp_md
, data_meta
)),
9005 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
9006 offsetof(struct xdp_md
, data
)),
9007 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
9008 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
9009 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 0),
9010 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
9011 BPF_MOV64_IMM(BPF_REG_0
, 0),
9014 .errstr
= "R1 offset is outside of the packet",
9016 .prog_type
= BPF_PROG_TYPE_XDP
,
9017 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
9020 "check deducing bounds from const, 1",
9022 BPF_MOV64_IMM(BPF_REG_0
, 1),
9023 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 1, 0),
9024 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9028 .errstr
= "R0 tried to subtract pointer from scalar",
9031 "check deducing bounds from const, 2",
9033 BPF_MOV64_IMM(BPF_REG_0
, 1),
9034 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 1, 1),
9036 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_0
, 1, 1),
9038 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_0
),
9045 "check deducing bounds from const, 3",
9047 BPF_MOV64_IMM(BPF_REG_0
, 0),
9048 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_0
, 0, 0),
9049 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9053 .errstr
= "R0 tried to subtract pointer from scalar",
9056 "check deducing bounds from const, 4",
9058 BPF_MOV64_IMM(BPF_REG_0
, 0),
9059 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_0
, 0, 1),
9061 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 1),
9063 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_0
),
9069 "check deducing bounds from const, 5",
9071 BPF_MOV64_IMM(BPF_REG_0
, 0),
9072 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 1),
9073 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9077 .errstr
= "R0 tried to subtract pointer from scalar",
9080 "check deducing bounds from const, 6",
9082 BPF_MOV64_IMM(BPF_REG_0
, 0),
9083 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 1),
9085 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9089 .errstr
= "R0 tried to subtract pointer from scalar",
9092 "check deducing bounds from const, 7",
9094 BPF_MOV64_IMM(BPF_REG_0
, ~0),
9095 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 0),
9096 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_0
),
9097 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9098 offsetof(struct __sk_buff
, mark
)),
9102 .errstr
= "dereference of modified ctx ptr",
9105 "check deducing bounds from const, 8",
9107 BPF_MOV64_IMM(BPF_REG_0
, ~0),
9108 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 1),
9109 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_0
),
9110 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9111 offsetof(struct __sk_buff
, mark
)),
9115 .errstr
= "dereference of modified ctx ptr",
9118 "check deducing bounds from const, 9",
9120 BPF_MOV64_IMM(BPF_REG_0
, 0),
9121 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 0),
9122 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9126 .errstr
= "R0 tried to subtract pointer from scalar",
9129 "check deducing bounds from const, 10",
9131 BPF_MOV64_IMM(BPF_REG_0
, 0),
9132 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_0
, 0, 0),
9133 /* Marks reg as unknown. */
9134 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_0
, 0),
9135 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9139 .errstr
= "math between ctx pointer and register with unbounded min value is not allowed",
9142 "bpf_exit with invalid return code. test1",
9144 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9147 .errstr
= "R0 has value (0x0; 0xffffffff)",
9149 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9152 "bpf_exit with invalid return code. test2",
9154 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9155 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 1),
9159 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9162 "bpf_exit with invalid return code. test3",
9164 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9165 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 3),
9168 .errstr
= "R0 has value (0x0; 0x3)",
9170 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9173 "bpf_exit with invalid return code. test4",
9175 BPF_MOV64_IMM(BPF_REG_0
, 1),
9179 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9182 "bpf_exit with invalid return code. test5",
9184 BPF_MOV64_IMM(BPF_REG_0
, 2),
9187 .errstr
= "R0 has value (0x2; 0x0)",
9189 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9192 "bpf_exit with invalid return code. test6",
9194 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9197 .errstr
= "R0 is not a known value (ctx)",
9199 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9202 "bpf_exit with invalid return code. test7",
9204 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9205 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 4),
9206 BPF_ALU64_REG(BPF_MUL
, BPF_REG_0
, BPF_REG_2
),
9209 .errstr
= "R0 has unknown scalar value",
9211 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9214 "calls: basic sanity",
9216 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
9217 BPF_MOV64_IMM(BPF_REG_0
, 1),
9219 BPF_MOV64_IMM(BPF_REG_0
, 2),
9222 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9226 "calls: not on unpriviledged",
9228 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
9229 BPF_MOV64_IMM(BPF_REG_0
, 1),
9231 BPF_MOV64_IMM(BPF_REG_0
, 2),
9234 .errstr_unpriv
= "function calls to other bpf functions are allowed for root only",
9235 .result_unpriv
= REJECT
,
9240 "calls: div by 0 in subprog",
9242 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9243 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 8),
9244 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
9245 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
9246 offsetof(struct __sk_buff
, data_end
)),
9247 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
9248 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
9249 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
9250 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
9251 BPF_MOV64_IMM(BPF_REG_0
, 1),
9253 BPF_MOV32_IMM(BPF_REG_2
, 0),
9254 BPF_MOV32_IMM(BPF_REG_3
, 1),
9255 BPF_ALU32_REG(BPF_DIV
, BPF_REG_3
, BPF_REG_2
),
9256 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9257 offsetof(struct __sk_buff
, data
)),
9260 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9265 "calls: multiple ret types in subprog 1",
9267 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9268 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 8),
9269 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
9270 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
9271 offsetof(struct __sk_buff
, data_end
)),
9272 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
9273 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
9274 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
9275 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
9276 BPF_MOV64_IMM(BPF_REG_0
, 1),
9278 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9279 offsetof(struct __sk_buff
, data
)),
9280 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
9281 BPF_MOV32_IMM(BPF_REG_0
, 42),
9284 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9286 .errstr
= "R0 invalid mem access 'inv'",
9289 "calls: multiple ret types in subprog 2",
9291 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9292 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 8),
9293 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
9294 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
9295 offsetof(struct __sk_buff
, data_end
)),
9296 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
9297 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
9298 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
9299 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
9300 BPF_MOV64_IMM(BPF_REG_0
, 1),
9302 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9303 offsetof(struct __sk_buff
, data
)),
9304 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9305 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 9),
9306 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
9307 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
9308 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
9309 BPF_LD_MAP_FD(BPF_REG_1
, 0),
9310 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
9311 BPF_FUNC_map_lookup_elem
),
9312 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
9313 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_6
,
9314 offsetof(struct __sk_buff
, data
)),
9315 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 64),
9318 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9319 .fixup_map1
= { 16 },
9321 .errstr
= "R0 min value is outside of the array range",
9324 "calls: overlapping caller/callee",
9326 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 0),
9327 BPF_MOV64_IMM(BPF_REG_0
, 1),
9330 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9331 .errstr
= "last insn is not an exit or jmp",
9335 "calls: wrong recursive calls",
9337 BPF_JMP_IMM(BPF_JA
, 0, 0, 4),
9338 BPF_JMP_IMM(BPF_JA
, 0, 0, 4),
9339 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, -2),
9340 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, -2),
9341 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, -2),
9342 BPF_MOV64_IMM(BPF_REG_0
, 1),
9345 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9346 .errstr
= "jump out of range",
9350 "calls: wrong src reg",
9352 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 2, 0, 0),
9353 BPF_MOV64_IMM(BPF_REG_0
, 1),
9356 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9357 .errstr
= "BPF_CALL uses reserved fields",
9361 "calls: wrong off value",
9363 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, -1, 2),
9364 BPF_MOV64_IMM(BPF_REG_0
, 1),
9366 BPF_MOV64_IMM(BPF_REG_0
, 2),
9369 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9370 .errstr
= "BPF_CALL uses reserved fields",
9374 "calls: jump back loop",
9376 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, -1),
9377 BPF_MOV64_IMM(BPF_REG_0
, 1),
9380 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9381 .errstr
= "back-edge from insn 0 to 0",
9385 "calls: conditional call",
9387 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9388 offsetof(struct __sk_buff
, mark
)),
9389 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
9390 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
9391 BPF_MOV64_IMM(BPF_REG_0
, 1),
9393 BPF_MOV64_IMM(BPF_REG_0
, 2),
9396 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9397 .errstr
= "jump out of range",
9401 "calls: conditional call 2",
9403 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9404 offsetof(struct __sk_buff
, mark
)),
9405 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
9406 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 4),
9407 BPF_MOV64_IMM(BPF_REG_0
, 1),
9409 BPF_MOV64_IMM(BPF_REG_0
, 2),
9411 BPF_MOV64_IMM(BPF_REG_0
, 3),
9414 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9418 "calls: conditional call 3",
9420 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9421 offsetof(struct __sk_buff
, mark
)),
9422 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
9423 BPF_JMP_IMM(BPF_JA
, 0, 0, 4),
9424 BPF_MOV64_IMM(BPF_REG_0
, 1),
9426 BPF_MOV64_IMM(BPF_REG_0
, 1),
9427 BPF_JMP_IMM(BPF_JA
, 0, 0, -6),
9428 BPF_MOV64_IMM(BPF_REG_0
, 3),
9429 BPF_JMP_IMM(BPF_JA
, 0, 0, -6),
9431 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9432 .errstr
= "back-edge from insn",
9436 "calls: conditional call 4",
9438 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9439 offsetof(struct __sk_buff
, mark
)),
9440 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
9441 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 4),
9442 BPF_MOV64_IMM(BPF_REG_0
, 1),
9444 BPF_MOV64_IMM(BPF_REG_0
, 1),
9445 BPF_JMP_IMM(BPF_JA
, 0, 0, -5),
9446 BPF_MOV64_IMM(BPF_REG_0
, 3),
9449 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9453 "calls: conditional call 5",
9455 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9456 offsetof(struct __sk_buff
, mark
)),
9457 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
9458 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 4),
9459 BPF_MOV64_IMM(BPF_REG_0
, 1),
9461 BPF_MOV64_IMM(BPF_REG_0
, 1),
9462 BPF_JMP_IMM(BPF_JA
, 0, 0, -6),
9463 BPF_MOV64_IMM(BPF_REG_0
, 3),
9466 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9467 .errstr
= "back-edge from insn",
9471 "calls: conditional call 6",
9473 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
9474 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, -2),
9476 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9477 offsetof(struct __sk_buff
, mark
)),
9480 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9481 .errstr
= "back-edge from insn",
9485 "calls: using r0 returned by callee",
9487 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9489 BPF_MOV64_IMM(BPF_REG_0
, 2),
9492 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9496 "calls: using uninit r0 from callee",
9498 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9502 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9503 .errstr
= "!read_ok",
9507 "calls: callee is using r1",
9509 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9511 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9512 offsetof(struct __sk_buff
, len
)),
9515 .prog_type
= BPF_PROG_TYPE_SCHED_ACT
,
9517 .retval
= TEST_DATA_LEN
,
9520 "calls: callee using args1",
9522 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9524 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9527 .errstr_unpriv
= "allowed for root only",
9528 .result_unpriv
= REJECT
,
9530 .retval
= POINTER_VALUE
,
9533 "calls: callee using wrong args2",
9535 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9537 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
9540 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9541 .errstr
= "R2 !read_ok",
9545 "calls: callee using two args",
9547 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9548 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_6
,
9549 offsetof(struct __sk_buff
, len
)),
9550 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_6
,
9551 offsetof(struct __sk_buff
, len
)),
9552 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9554 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9555 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
9558 .errstr_unpriv
= "allowed for root only",
9559 .result_unpriv
= REJECT
,
9561 .retval
= TEST_DATA_LEN
+ TEST_DATA_LEN
- ETH_HLEN
- ETH_HLEN
,
9564 "calls: callee changing pkt pointers",
9566 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
9567 offsetof(struct xdp_md
, data
)),
9568 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
9569 offsetof(struct xdp_md
, data_end
)),
9570 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_6
),
9571 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_8
, 8),
9572 BPF_JMP_REG(BPF_JGT
, BPF_REG_8
, BPF_REG_7
, 2),
9573 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
9574 /* clear_all_pkt_pointers() has to walk all frames
9575 * to make sure that pkt pointers in the caller
9576 * are cleared when callee is calling a helper that
9577 * adjusts packet size
9579 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
9580 BPF_MOV32_IMM(BPF_REG_0
, 0),
9582 BPF_MOV64_IMM(BPF_REG_2
, 0),
9583 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
9584 BPF_FUNC_xdp_adjust_head
),
9588 .errstr
= "R6 invalid mem access 'inv'",
9589 .prog_type
= BPF_PROG_TYPE_XDP
,
9592 "calls: two calls with args",
9594 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9596 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9597 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 6),
9598 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_0
),
9599 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
9600 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
9601 BPF_ALU64_REG(BPF_ADD
, BPF_REG_7
, BPF_REG_0
),
9602 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
9604 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9605 offsetof(struct __sk_buff
, len
)),
9608 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9610 .retval
= TEST_DATA_LEN
+ TEST_DATA_LEN
,
9613 "calls: calls with stack arith",
9615 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
9616 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -64),
9617 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9619 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -64),
9620 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9622 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -64),
9623 BPF_MOV64_IMM(BPF_REG_0
, 42),
9624 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
9627 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9632 "calls: calls with misaligned stack access",
9634 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
9635 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -63),
9636 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9638 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -61),
9639 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9641 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -63),
9642 BPF_MOV64_IMM(BPF_REG_0
, 42),
9643 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
9646 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9647 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
9648 .errstr
= "misaligned stack access",
9652 "calls: calls control flow, jump test",
9654 BPF_MOV64_IMM(BPF_REG_0
, 42),
9655 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
9656 BPF_MOV64_IMM(BPF_REG_0
, 43),
9657 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
9658 BPF_JMP_IMM(BPF_JA
, 0, 0, -3),
9661 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9666 "calls: calls control flow, jump test 2",
9668 BPF_MOV64_IMM(BPF_REG_0
, 42),
9669 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
9670 BPF_MOV64_IMM(BPF_REG_0
, 43),
9671 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
9672 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, -3),
9675 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9676 .errstr
= "jump out of range from insn 1 to 4",
9680 "calls: two calls with bad jump",
9682 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9684 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9685 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 6),
9686 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_0
),
9687 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
9688 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
9689 BPF_ALU64_REG(BPF_ADD
, BPF_REG_7
, BPF_REG_0
),
9690 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
9692 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9693 offsetof(struct __sk_buff
, len
)),
9694 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, -3),
9697 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9698 .errstr
= "jump out of range from insn 11 to 9",
9702 "calls: recursive call. test1",
9704 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9706 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, -1),
9709 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9710 .errstr
= "back-edge",
9714 "calls: recursive call. test2",
9716 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9718 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, -3),
9721 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9722 .errstr
= "back-edge",
9726 "calls: unreachable code",
9728 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9730 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9732 BPF_MOV64_IMM(BPF_REG_0
, 0),
9734 BPF_MOV64_IMM(BPF_REG_0
, 0),
9737 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9738 .errstr
= "unreachable insn 6",
9742 "calls: invalid call",
9744 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9746 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, -4),
9749 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9750 .errstr
= "invalid destination",
9754 "calls: invalid call 2",
9756 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9758 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 0x7fffffff),
9761 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9762 .errstr
= "invalid destination",
9766 "calls: jumping across function bodies. test1",
9768 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
9769 BPF_MOV64_IMM(BPF_REG_0
, 0),
9771 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
9774 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9775 .errstr
= "jump out of range",
9779 "calls: jumping across function bodies. test2",
9781 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
9782 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
9783 BPF_MOV64_IMM(BPF_REG_0
, 0),
9787 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9788 .errstr
= "jump out of range",
9792 "calls: call without exit",
9794 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9796 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9798 BPF_MOV64_IMM(BPF_REG_0
, 0),
9799 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -2),
9801 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9802 .errstr
= "not an exit",
9806 "calls: call into middle of ld_imm64",
9808 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
9809 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
9810 BPF_MOV64_IMM(BPF_REG_0
, 0),
9812 BPF_LD_IMM64(BPF_REG_0
, 0),
9815 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9816 .errstr
= "last insn",
9820 "calls: call into middle of other call",
9822 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
9823 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
9824 BPF_MOV64_IMM(BPF_REG_0
, 0),
9826 BPF_MOV64_IMM(BPF_REG_0
, 0),
9827 BPF_MOV64_IMM(BPF_REG_0
, 0),
9830 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9831 .errstr
= "last insn",
9835 "calls: ld_abs with changing ctx data in callee",
9837 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9838 BPF_LD_ABS(BPF_B
, 0),
9839 BPF_LD_ABS(BPF_H
, 0),
9840 BPF_LD_ABS(BPF_W
, 0),
9841 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_6
),
9842 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 5),
9843 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_7
),
9844 BPF_LD_ABS(BPF_B
, 0),
9845 BPF_LD_ABS(BPF_H
, 0),
9846 BPF_LD_ABS(BPF_W
, 0),
9848 BPF_MOV64_IMM(BPF_REG_2
, 1),
9849 BPF_MOV64_IMM(BPF_REG_3
, 2),
9850 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
9851 BPF_FUNC_skb_vlan_push
),
9854 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9855 .errstr
= "BPF_LD_[ABS|IND] instructions cannot be mixed",
9859 "calls: two calls with bad fallthrough",
9861 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9863 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9864 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 6),
9865 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_0
),
9866 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
9867 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
9868 BPF_ALU64_REG(BPF_ADD
, BPF_REG_7
, BPF_REG_0
),
9869 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
9870 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_0
),
9871 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9872 offsetof(struct __sk_buff
, len
)),
9875 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
9876 .errstr
= "not an exit",
9880 "calls: two calls with stack read",
9882 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
9883 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
9884 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
9885 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
9887 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9888 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 6),
9889 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_0
),
9890 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
9891 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
9892 BPF_ALU64_REG(BPF_ADD
, BPF_REG_7
, BPF_REG_0
),
9893 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
9895 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9898 .prog_type
= BPF_PROG_TYPE_XDP
,
9902 "calls: two calls with stack write",
9905 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
9906 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
9907 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
9908 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
9909 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
9910 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
9911 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -16),
9915 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
9916 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
9917 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 7),
9918 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
9919 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
9920 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 4),
9921 BPF_ALU64_REG(BPF_ADD
, BPF_REG_8
, BPF_REG_0
),
9922 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_8
),
9923 /* write into stack frame of main prog */
9924 BPF_STX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
9928 /* read from stack frame of main prog */
9929 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9932 .prog_type
= BPF_PROG_TYPE_XDP
,
9936 "calls: stack overflow using two frames (pre-call access)",
9939 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -300, 0),
9940 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1),
9944 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -300, 0),
9945 BPF_MOV64_IMM(BPF_REG_0
, 0),
9948 .prog_type
= BPF_PROG_TYPE_XDP
,
9949 .errstr
= "combined stack size",
9953 "calls: stack overflow using two frames (post-call access)",
9956 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 2),
9957 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -300, 0),
9961 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -300, 0),
9962 BPF_MOV64_IMM(BPF_REG_0
, 0),
9965 .prog_type
= BPF_PROG_TYPE_XDP
,
9966 .errstr
= "combined stack size",
9970 "calls: stack depth check using three frames. test1",
9973 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 4), /* call A */
9974 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 5), /* call B */
9975 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -32, 0),
9976 BPF_MOV64_IMM(BPF_REG_0
, 0),
9979 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -256, 0),
9982 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, -3), /* call A */
9983 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -64, 0),
9986 .prog_type
= BPF_PROG_TYPE_XDP
,
9987 /* stack_main=32, stack_A=256, stack_B=64
9988 * and max(main+A, main+A+B) < 512
9993 "calls: stack depth check using three frames. test2",
9996 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 4), /* call A */
9997 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 5), /* call B */
9998 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -32, 0),
9999 BPF_MOV64_IMM(BPF_REG_0
, 0),
10002 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -64, 0),
10005 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, -3), /* call A */
10006 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -256, 0),
10009 .prog_type
= BPF_PROG_TYPE_XDP
,
10010 /* stack_main=32, stack_A=64, stack_B=256
10011 * and max(main+A, main+A+B) < 512
10016 "calls: stack depth check using three frames. test3",
10019 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10020 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 6), /* call A */
10021 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
10022 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 8), /* call B */
10023 BPF_JMP_IMM(BPF_JGE
, BPF_REG_6
, 0, 1),
10024 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -64, 0),
10025 BPF_MOV64_IMM(BPF_REG_0
, 0),
10028 BPF_JMP_IMM(BPF_JLT
, BPF_REG_1
, 10, 1),
10030 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -224, 0),
10031 BPF_JMP_IMM(BPF_JA
, 0, 0, -3),
10033 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 2, 1),
10034 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, -6), /* call A */
10035 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -256, 0),
10038 .prog_type
= BPF_PROG_TYPE_XDP
,
10039 /* stack_main=64, stack_A=224, stack_B=256
10040 * and max(main+A, main+A+B) > 512
10042 .errstr
= "combined stack",
10046 "calls: stack depth check using three frames. test4",
10047 /* void main(void) {
10052 * void func1(int alloc_or_recurse) {
10053 * if (alloc_or_recurse) {
10054 * frame_pointer[-300] = 1;
10056 * func2(alloc_or_recurse);
10059 * void func2(int alloc_or_recurse) {
10060 * if (alloc_or_recurse) {
10061 * frame_pointer[-300] = 1;
10067 BPF_MOV64_IMM(BPF_REG_1
, 0),
10068 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 6), /* call A */
10069 BPF_MOV64_IMM(BPF_REG_1
, 1),
10070 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 4), /* call A */
10071 BPF_MOV64_IMM(BPF_REG_1
, 1),
10072 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 7), /* call B */
10073 BPF_MOV64_IMM(BPF_REG_0
, 0),
10076 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
10077 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -300, 0),
10079 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1), /* call B */
10082 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
10083 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -300, 0),
10086 .prog_type
= BPF_PROG_TYPE_XDP
,
10088 .errstr
= "combined stack",
10091 "calls: stack depth check using three frames. test5",
10094 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1), /* call A */
10097 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1), /* call B */
10100 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1), /* call C */
10103 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1), /* call D */
10106 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1), /* call E */
10109 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1), /* call F */
10112 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1), /* call G */
10115 BPF_RAW_INSN(BPF_JMP
|BPF_CALL
, 0, 1, 0, 1), /* call H */
10118 BPF_MOV64_IMM(BPF_REG_0
, 0),
10121 .prog_type
= BPF_PROG_TYPE_XDP
,
10122 .errstr
= "call stack",
10126 "calls: spill into caller stack frame",
10128 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
10129 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10130 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10131 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
10133 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
, 0),
10134 BPF_MOV64_IMM(BPF_REG_0
, 0),
10137 .prog_type
= BPF_PROG_TYPE_XDP
,
10138 .errstr
= "cannot spill",
10142 "calls: write into caller stack frame",
10144 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10145 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10146 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10147 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10148 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
10150 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 0, 42),
10151 BPF_MOV64_IMM(BPF_REG_0
, 0),
10154 .prog_type
= BPF_PROG_TYPE_XDP
,
10159 "calls: write into callee stack frame",
10161 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10162 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
10164 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
10165 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, -8),
10168 .prog_type
= BPF_PROG_TYPE_XDP
,
10169 .errstr
= "cannot return stack pointer",
10173 "calls: two calls with stack write and void return",
10176 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
10177 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10178 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10179 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10180 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
10181 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10182 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -16),
10186 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10187 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
10188 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
10189 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
10190 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
10194 /* write into stack frame of main prog */
10195 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 0, 0),
10196 BPF_EXIT_INSN(), /* void return */
10198 .prog_type
= BPF_PROG_TYPE_XDP
,
10202 "calls: ambiguous return value",
10204 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10205 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 5),
10206 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
10207 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
10208 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10209 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
10211 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
10212 BPF_MOV64_IMM(BPF_REG_0
, 0),
10215 .errstr_unpriv
= "allowed for root only",
10216 .result_unpriv
= REJECT
,
10217 .errstr
= "R0 !read_ok",
10221 "calls: two calls that return map_value",
10224 /* pass fp-16, fp-8 into a function */
10225 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10226 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10227 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10228 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
10229 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 8),
10231 /* fetch map_value_ptr from the stack of this function */
10232 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
10233 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
10234 /* write into map value */
10235 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10236 /* fetch secound map_value_ptr from the stack */
10237 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -16),
10238 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
10239 /* write into map value */
10240 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10241 BPF_MOV64_IMM(BPF_REG_0
, 0),
10245 /* call 3rd function twice */
10246 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10247 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
10248 /* first time with fp-8 */
10249 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
10250 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
10251 /* second time with fp-16 */
10252 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
10256 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10257 /* lookup from map */
10258 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
10259 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10260 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10261 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10262 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10263 BPF_FUNC_map_lookup_elem
),
10264 /* write map_value_ptr into stack frame of main prog */
10265 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_0
, 0),
10266 BPF_MOV64_IMM(BPF_REG_0
, 0),
10267 BPF_EXIT_INSN(), /* return 0 */
10269 .prog_type
= BPF_PROG_TYPE_XDP
,
10270 .fixup_map1
= { 23 },
10274 "calls: two calls that return map_value with bool condition",
10277 /* pass fp-16, fp-8 into a function */
10278 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10279 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10280 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10281 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
10282 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10283 BPF_MOV64_IMM(BPF_REG_0
, 0),
10287 /* call 3rd function twice */
10288 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10289 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
10290 /* first time with fp-8 */
10291 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 9),
10292 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 1, 2),
10293 /* fetch map_value_ptr from the stack of this function */
10294 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
10295 /* write into map value */
10296 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10297 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
10298 /* second time with fp-16 */
10299 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 4),
10300 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 1, 2),
10301 /* fetch secound map_value_ptr from the stack */
10302 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_7
, 0),
10303 /* write into map value */
10304 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10308 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10309 /* lookup from map */
10310 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
10311 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10312 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10313 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10314 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10315 BPF_FUNC_map_lookup_elem
),
10316 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10317 BPF_MOV64_IMM(BPF_REG_0
, 0),
10318 BPF_EXIT_INSN(), /* return 0 */
10319 /* write map_value_ptr into stack frame of main prog */
10320 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_0
, 0),
10321 BPF_MOV64_IMM(BPF_REG_0
, 1),
10322 BPF_EXIT_INSN(), /* return 1 */
10324 .prog_type
= BPF_PROG_TYPE_XDP
,
10325 .fixup_map1
= { 23 },
10329 "calls: two calls that return map_value with incorrect bool check",
10332 /* pass fp-16, fp-8 into a function */
10333 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10334 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10335 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10336 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
10337 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10338 BPF_MOV64_IMM(BPF_REG_0
, 0),
10342 /* call 3rd function twice */
10343 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10344 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
10345 /* first time with fp-8 */
10346 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 9),
10347 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 1, 2),
10348 /* fetch map_value_ptr from the stack of this function */
10349 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
10350 /* write into map value */
10351 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10352 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
10353 /* second time with fp-16 */
10354 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 4),
10355 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10356 /* fetch secound map_value_ptr from the stack */
10357 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_7
, 0),
10358 /* write into map value */
10359 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10363 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10364 /* lookup from map */
10365 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
10366 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10367 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10368 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10369 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10370 BPF_FUNC_map_lookup_elem
),
10371 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10372 BPF_MOV64_IMM(BPF_REG_0
, 0),
10373 BPF_EXIT_INSN(), /* return 0 */
10374 /* write map_value_ptr into stack frame of main prog */
10375 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_0
, 0),
10376 BPF_MOV64_IMM(BPF_REG_0
, 1),
10377 BPF_EXIT_INSN(), /* return 1 */
10379 .prog_type
= BPF_PROG_TYPE_XDP
,
10380 .fixup_map1
= { 23 },
10382 .errstr
= "invalid read from stack off -16+0 size 8",
10385 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
10388 /* pass fp-16, fp-8 into a function */
10389 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10390 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10391 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10392 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
10393 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10394 BPF_MOV64_IMM(BPF_REG_0
, 0),
10398 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10399 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
10400 /* 1st lookup from map */
10401 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
10402 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10403 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10404 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10405 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10406 BPF_FUNC_map_lookup_elem
),
10407 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10408 BPF_MOV64_IMM(BPF_REG_8
, 0),
10409 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
10410 /* write map_value_ptr into stack frame of main prog at fp-8 */
10411 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_0
, 0),
10412 BPF_MOV64_IMM(BPF_REG_8
, 1),
10414 /* 2nd lookup from map */
10415 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
), /* 20 */
10416 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10417 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10418 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, /* 24 */
10419 BPF_FUNC_map_lookup_elem
),
10420 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10421 BPF_MOV64_IMM(BPF_REG_9
, 0),
10422 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
10423 /* write map_value_ptr into stack frame of main prog at fp-16 */
10424 BPF_STX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
10425 BPF_MOV64_IMM(BPF_REG_9
, 1),
10427 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10428 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
), /* 30 */
10429 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_8
),
10430 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_7
),
10431 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_9
),
10432 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1), /* 34 */
10436 /* if arg2 == 1 do *arg1 = 0 */
10437 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 1, 2),
10438 /* fetch map_value_ptr from the stack of this function */
10439 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
10440 /* write into map value */
10441 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10443 /* if arg4 == 1 do *arg3 = 0 */
10444 BPF_JMP_IMM(BPF_JNE
, BPF_REG_4
, 1, 2),
10445 /* fetch map_value_ptr from the stack of this function */
10446 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
10447 /* write into map value */
10448 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 2, 0),
10451 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10452 .fixup_map1
= { 12, 22 },
10454 .errstr
= "invalid access to map value, value_size=8 off=2 size=8",
10457 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
10460 /* pass fp-16, fp-8 into a function */
10461 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10462 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10463 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10464 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
10465 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10466 BPF_MOV64_IMM(BPF_REG_0
, 0),
10470 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10471 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
10472 /* 1st lookup from map */
10473 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
10474 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10475 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10476 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10477 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10478 BPF_FUNC_map_lookup_elem
),
10479 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10480 BPF_MOV64_IMM(BPF_REG_8
, 0),
10481 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
10482 /* write map_value_ptr into stack frame of main prog at fp-8 */
10483 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_0
, 0),
10484 BPF_MOV64_IMM(BPF_REG_8
, 1),
10486 /* 2nd lookup from map */
10487 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
), /* 20 */
10488 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10489 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10490 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, /* 24 */
10491 BPF_FUNC_map_lookup_elem
),
10492 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10493 BPF_MOV64_IMM(BPF_REG_9
, 0),
10494 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
10495 /* write map_value_ptr into stack frame of main prog at fp-16 */
10496 BPF_STX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
10497 BPF_MOV64_IMM(BPF_REG_9
, 1),
10499 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10500 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
), /* 30 */
10501 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_8
),
10502 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_7
),
10503 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_9
),
10504 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1), /* 34 */
10508 /* if arg2 == 1 do *arg1 = 0 */
10509 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 1, 2),
10510 /* fetch map_value_ptr from the stack of this function */
10511 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
10512 /* write into map value */
10513 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10515 /* if arg4 == 1 do *arg3 = 0 */
10516 BPF_JMP_IMM(BPF_JNE
, BPF_REG_4
, 1, 2),
10517 /* fetch map_value_ptr from the stack of this function */
10518 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
10519 /* write into map value */
10520 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10523 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10524 .fixup_map1
= { 12, 22 },
10528 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
10531 /* pass fp-16, fp-8 into a function */
10532 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10533 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10534 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10535 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
10536 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 2),
10537 BPF_MOV64_IMM(BPF_REG_0
, 0),
10541 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10542 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
10543 /* 1st lookup from map */
10544 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -24, 0),
10545 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10546 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -24),
10547 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10548 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10549 BPF_FUNC_map_lookup_elem
),
10550 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10551 BPF_MOV64_IMM(BPF_REG_8
, 0),
10552 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
10553 /* write map_value_ptr into stack frame of main prog at fp-8 */
10554 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_0
, 0),
10555 BPF_MOV64_IMM(BPF_REG_8
, 1),
10557 /* 2nd lookup from map */
10558 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10559 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -24),
10560 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10561 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10562 BPF_FUNC_map_lookup_elem
),
10563 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10564 BPF_MOV64_IMM(BPF_REG_9
, 0), // 26
10565 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
10566 /* write map_value_ptr into stack frame of main prog at fp-16 */
10567 BPF_STX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
10568 BPF_MOV64_IMM(BPF_REG_9
, 1),
10570 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10571 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
), // 30
10572 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_8
),
10573 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_7
),
10574 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_9
),
10575 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1), // 34
10576 BPF_JMP_IMM(BPF_JA
, 0, 0, -30),
10579 /* if arg2 == 1 do *arg1 = 0 */
10580 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 1, 2),
10581 /* fetch map_value_ptr from the stack of this function */
10582 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
10583 /* write into map value */
10584 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10586 /* if arg4 == 1 do *arg3 = 0 */
10587 BPF_JMP_IMM(BPF_JNE
, BPF_REG_4
, 1, 2),
10588 /* fetch map_value_ptr from the stack of this function */
10589 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
10590 /* write into map value */
10591 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 2, 0),
10592 BPF_JMP_IMM(BPF_JA
, 0, 0, -8),
10594 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10595 .fixup_map1
= { 12, 22 },
10597 .errstr
= "invalid access to map value, value_size=8 off=2 size=8",
10600 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
10603 /* pass fp-16, fp-8 into a function */
10604 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10605 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10606 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10607 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
10608 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10609 BPF_MOV64_IMM(BPF_REG_0
, 0),
10613 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10614 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
10615 /* 1st lookup from map */
10616 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
10617 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10618 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10619 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10620 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10621 BPF_FUNC_map_lookup_elem
),
10622 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10623 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_0
, 0),
10624 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10625 BPF_MOV64_IMM(BPF_REG_8
, 0),
10626 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
10627 BPF_MOV64_IMM(BPF_REG_8
, 1),
10629 /* 2nd lookup from map */
10630 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10631 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10632 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10633 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10634 BPF_FUNC_map_lookup_elem
),
10635 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10636 BPF_STX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
10637 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10638 BPF_MOV64_IMM(BPF_REG_9
, 0),
10639 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
10640 BPF_MOV64_IMM(BPF_REG_9
, 1),
10642 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10643 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
10644 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_8
),
10645 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_7
),
10646 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_9
),
10647 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
10651 /* if arg2 == 1 do *arg1 = 0 */
10652 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 1, 2),
10653 /* fetch map_value_ptr from the stack of this function */
10654 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
10655 /* write into map value */
10656 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10658 /* if arg4 == 1 do *arg3 = 0 */
10659 BPF_JMP_IMM(BPF_JNE
, BPF_REG_4
, 1, 2),
10660 /* fetch map_value_ptr from the stack of this function */
10661 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
10662 /* write into map value */
10663 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10666 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10667 .fixup_map1
= { 12, 22 },
10671 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
10674 /* pass fp-16, fp-8 into a function */
10675 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
10676 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
10677 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10678 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
10679 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 2),
10680 BPF_MOV64_IMM(BPF_REG_0
, 0),
10684 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
10685 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
10686 /* 1st lookup from map */
10687 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
10688 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10689 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10690 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10691 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10692 BPF_FUNC_map_lookup_elem
),
10693 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10694 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_0
, 0),
10695 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10696 BPF_MOV64_IMM(BPF_REG_8
, 0),
10697 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
10698 BPF_MOV64_IMM(BPF_REG_8
, 1),
10700 /* 2nd lookup from map */
10701 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
10702 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
10703 BPF_LD_MAP_FD(BPF_REG_1
, 0),
10704 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
10705 BPF_FUNC_map_lookup_elem
),
10706 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10707 BPF_STX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
10708 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
10709 BPF_MOV64_IMM(BPF_REG_9
, 0),
10710 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
10711 BPF_MOV64_IMM(BPF_REG_9
, 1),
10713 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10714 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
10715 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_8
),
10716 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_7
),
10717 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_9
),
10718 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
10722 /* if arg2 == 1 do *arg1 = 0 */
10723 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 1, 2),
10724 /* fetch map_value_ptr from the stack of this function */
10725 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
10726 /* write into map value */
10727 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10729 /* if arg4 == 0 do *arg3 = 0 */
10730 BPF_JMP_IMM(BPF_JNE
, BPF_REG_4
, 0, 2),
10731 /* fetch map_value_ptr from the stack of this function */
10732 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
10733 /* write into map value */
10734 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
10737 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10738 .fixup_map1
= { 12, 22 },
10740 .errstr
= "R0 invalid mem access 'inv'",
10743 "calls: pkt_ptr spill into caller stack",
10745 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
10746 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
10747 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 1),
10751 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10752 offsetof(struct __sk_buff
, data
)),
10753 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
10754 offsetof(struct __sk_buff
, data_end
)),
10755 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
10756 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
10757 /* spill unchecked pkt_ptr into stack of caller */
10758 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10759 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
10760 /* now the pkt range is verified, read pkt_ptr from stack */
10761 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_4
, 0),
10762 /* write 4 bytes into packet */
10763 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
10767 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10768 .retval
= POINTER_VALUE
,
10771 "calls: pkt_ptr spill into caller stack 2",
10773 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
10774 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
10775 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
10776 /* Marking is still kept, but not in all cases safe. */
10777 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
10778 BPF_ST_MEM(BPF_W
, BPF_REG_4
, 0, 0),
10782 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10783 offsetof(struct __sk_buff
, data
)),
10784 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
10785 offsetof(struct __sk_buff
, data_end
)),
10786 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
10787 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
10788 /* spill unchecked pkt_ptr into stack of caller */
10789 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10790 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
10791 /* now the pkt range is verified, read pkt_ptr from stack */
10792 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_4
, 0),
10793 /* write 4 bytes into packet */
10794 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
10797 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10798 .errstr
= "invalid access to packet",
10802 "calls: pkt_ptr spill into caller stack 3",
10804 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
10805 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
10806 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 4),
10807 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
10808 /* Marking is still kept and safe here. */
10809 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
10810 BPF_ST_MEM(BPF_W
, BPF_REG_4
, 0, 0),
10814 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10815 offsetof(struct __sk_buff
, data
)),
10816 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
10817 offsetof(struct __sk_buff
, data_end
)),
10818 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
10819 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
10820 /* spill unchecked pkt_ptr into stack of caller */
10821 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10822 BPF_MOV64_IMM(BPF_REG_5
, 0),
10823 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
10824 BPF_MOV64_IMM(BPF_REG_5
, 1),
10825 /* now the pkt range is verified, read pkt_ptr from stack */
10826 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_4
, 0),
10827 /* write 4 bytes into packet */
10828 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
10829 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
10832 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10837 "calls: pkt_ptr spill into caller stack 4",
10839 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
10840 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
10841 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 4),
10842 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
10843 /* Check marking propagated. */
10844 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
10845 BPF_ST_MEM(BPF_W
, BPF_REG_4
, 0, 0),
10849 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10850 offsetof(struct __sk_buff
, data
)),
10851 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
10852 offsetof(struct __sk_buff
, data_end
)),
10853 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
10854 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
10855 /* spill unchecked pkt_ptr into stack of caller */
10856 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10857 BPF_MOV64_IMM(BPF_REG_5
, 0),
10858 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
10859 BPF_MOV64_IMM(BPF_REG_5
, 1),
10860 /* don't read back pkt_ptr from stack here */
10861 /* write 4 bytes into packet */
10862 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
10863 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
10866 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10871 "calls: pkt_ptr spill into caller stack 5",
10873 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
10874 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
10875 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_1
, 0),
10876 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
10877 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
10878 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_4
, 0),
10882 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10883 offsetof(struct __sk_buff
, data
)),
10884 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
10885 offsetof(struct __sk_buff
, data_end
)),
10886 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
10887 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
10888 BPF_MOV64_IMM(BPF_REG_5
, 0),
10889 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
10890 /* spill checked pkt_ptr into stack of caller */
10891 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10892 BPF_MOV64_IMM(BPF_REG_5
, 1),
10893 /* don't read back pkt_ptr from stack here */
10894 /* write 4 bytes into packet */
10895 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
10896 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
10899 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10900 .errstr
= "same insn cannot be used with different",
10904 "calls: pkt_ptr spill into caller stack 6",
10906 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10907 offsetof(struct __sk_buff
, data_end
)),
10908 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
10909 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
10910 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10911 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
10912 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
10913 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_4
, 0),
10917 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10918 offsetof(struct __sk_buff
, data
)),
10919 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
10920 offsetof(struct __sk_buff
, data_end
)),
10921 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
10922 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
10923 BPF_MOV64_IMM(BPF_REG_5
, 0),
10924 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
10925 /* spill checked pkt_ptr into stack of caller */
10926 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10927 BPF_MOV64_IMM(BPF_REG_5
, 1),
10928 /* don't read back pkt_ptr from stack here */
10929 /* write 4 bytes into packet */
10930 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
10931 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
10934 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10935 .errstr
= "R4 invalid mem access",
10939 "calls: pkt_ptr spill into caller stack 7",
10941 BPF_MOV64_IMM(BPF_REG_2
, 0),
10942 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
10943 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
10944 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10945 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
10946 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
10947 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_4
, 0),
10951 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10952 offsetof(struct __sk_buff
, data
)),
10953 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
10954 offsetof(struct __sk_buff
, data_end
)),
10955 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
10956 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
10957 BPF_MOV64_IMM(BPF_REG_5
, 0),
10958 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
10959 /* spill checked pkt_ptr into stack of caller */
10960 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10961 BPF_MOV64_IMM(BPF_REG_5
, 1),
10962 /* don't read back pkt_ptr from stack here */
10963 /* write 4 bytes into packet */
10964 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
10965 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
10968 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
10969 .errstr
= "R4 invalid mem access",
10973 "calls: pkt_ptr spill into caller stack 8",
10975 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10976 offsetof(struct __sk_buff
, data
)),
10977 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
10978 offsetof(struct __sk_buff
, data_end
)),
10979 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
10980 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
10981 BPF_JMP_REG(BPF_JLE
, BPF_REG_0
, BPF_REG_3
, 1),
10983 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
10984 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
10985 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
10986 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
10987 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
10988 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_4
, 0),
10992 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
10993 offsetof(struct __sk_buff
, data
)),
10994 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
10995 offsetof(struct __sk_buff
, data_end
)),
10996 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
10997 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
10998 BPF_MOV64_IMM(BPF_REG_5
, 0),
10999 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
11000 /* spill checked pkt_ptr into stack of caller */
11001 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
11002 BPF_MOV64_IMM(BPF_REG_5
, 1),
11003 /* don't read back pkt_ptr from stack here */
11004 /* write 4 bytes into packet */
11005 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
11006 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
11009 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
11013 "calls: pkt_ptr spill into caller stack 9",
11015 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
11016 offsetof(struct __sk_buff
, data
)),
11017 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
11018 offsetof(struct __sk_buff
, data_end
)),
11019 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
11020 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
11021 BPF_JMP_REG(BPF_JLE
, BPF_REG_0
, BPF_REG_3
, 1),
11023 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
11024 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
11025 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
11026 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 3),
11027 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
11028 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_4
, 0),
11032 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
11033 offsetof(struct __sk_buff
, data
)),
11034 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
11035 offsetof(struct __sk_buff
, data_end
)),
11036 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
11037 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
11038 BPF_MOV64_IMM(BPF_REG_5
, 0),
11039 /* spill unchecked pkt_ptr into stack of caller */
11040 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
11041 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
11042 BPF_MOV64_IMM(BPF_REG_5
, 1),
11043 /* don't read back pkt_ptr from stack here */
11044 /* write 4 bytes into packet */
11045 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
11046 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
11049 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
11050 .errstr
= "invalid access to packet",
11054 "calls: caller stack init to zero or map_value_or_null",
11056 BPF_MOV64_IMM(BPF_REG_0
, 0),
11057 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
11058 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
11059 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
11060 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 1, 0, 4),
11061 /* fetch map_value_or_null or const_zero from stack */
11062 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
11063 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
11064 /* store into map_value */
11065 BPF_ST_MEM(BPF_W
, BPF_REG_0
, 0, 0),
11069 /* if (ctx == 0) return; */
11070 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 8),
11071 /* else bpf_map_lookup() and *(fp - 8) = r0 */
11072 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
11073 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
11074 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
11075 BPF_LD_MAP_FD(BPF_REG_1
, 0),
11076 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
11077 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
11078 BPF_FUNC_map_lookup_elem
),
11079 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11080 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_0
, 0),
11083 .fixup_map1
= { 13 },
11085 .prog_type
= BPF_PROG_TYPE_XDP
,
11088 "calls: stack init to zero and pruning",
11090 /* first make allocated_stack 16 byte */
11091 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, 0),
11092 /* now fork the execution such that the false branch
11093 * of JGT insn will be verified second and it skisp zero
11094 * init of fp-8 stack slot. If stack liveness marking
11095 * is missing live_read marks from call map_lookup
11096 * processing then pruning will incorrectly assume
11097 * that fp-8 stack slot was unused in the fall-through
11098 * branch and will accept the program incorrectly
11100 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 2, 2),
11101 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
11102 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
11103 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
11104 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
11105 BPF_LD_MAP_FD(BPF_REG_1
, 0),
11106 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
11107 BPF_FUNC_map_lookup_elem
),
11110 .fixup_map2
= { 6 },
11111 .errstr
= "invalid indirect read from stack off -8+0 size 8",
11113 .prog_type
= BPF_PROG_TYPE_XDP
,
11116 "search pruning: all branches should be verified (nop operation)",
11118 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
11119 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
11120 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
11121 BPF_LD_MAP_FD(BPF_REG_1
, 0),
11122 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
11123 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
11124 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
11125 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_3
, 0xbeef, 2),
11126 BPF_MOV64_IMM(BPF_REG_4
, 0),
11128 BPF_MOV64_IMM(BPF_REG_4
, 1),
11129 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -16),
11130 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns
),
11131 BPF_LDX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_10
, -16),
11132 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_5
, 0, 2),
11133 BPF_MOV64_IMM(BPF_REG_6
, 0),
11134 BPF_ST_MEM(BPF_DW
, BPF_REG_6
, 0, 0xdead),
11137 .fixup_map1
= { 3 },
11138 .errstr
= "R6 invalid mem access 'inv'",
11140 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
11143 "search pruning: all branches should be verified (invalid stack access)",
11145 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
11146 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
11147 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
11148 BPF_LD_MAP_FD(BPF_REG_1
, 0),
11149 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
11150 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
11151 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
11152 BPF_MOV64_IMM(BPF_REG_4
, 0),
11153 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_3
, 0xbeef, 2),
11154 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -16),
11156 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -24),
11157 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns
),
11158 BPF_LDX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_10
, -16),
11161 .fixup_map1
= { 3 },
11162 .errstr
= "invalid read from stack off -16+0 size 8",
11164 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
11168 static int probe_filter_length(const struct bpf_insn
*fp
)
11172 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
11173 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
11178 static int create_map(uint32_t size_value
, uint32_t max_elem
)
11182 fd
= bpf_create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
11183 size_value
, max_elem
, BPF_F_NO_PREALLOC
);
11185 printf("Failed to create hash map '%s'!\n", strerror(errno
));
11190 static int create_prog_array(void)
11194 fd
= bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY
, sizeof(int),
11195 sizeof(int), 4, 0);
11197 printf("Failed to create prog array '%s'!\n", strerror(errno
));
11202 static int create_map_in_map(void)
11204 int inner_map_fd
, outer_map_fd
;
11206 inner_map_fd
= bpf_create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
11207 sizeof(int), 1, 0);
11208 if (inner_map_fd
< 0) {
11209 printf("Failed to create array '%s'!\n", strerror(errno
));
11210 return inner_map_fd
;
11213 outer_map_fd
= bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS
, NULL
,
11214 sizeof(int), inner_map_fd
, 1, 0);
11215 if (outer_map_fd
< 0)
11216 printf("Failed to create array of maps '%s'!\n",
11219 close(inner_map_fd
);
11221 return outer_map_fd
;
11224 static char bpf_vlog
[32768];
11226 static void do_test_fixup(struct bpf_test
*test
, struct bpf_insn
*prog
,
11229 int *fixup_map1
= test
->fixup_map1
;
11230 int *fixup_map2
= test
->fixup_map2
;
11231 int *fixup_prog
= test
->fixup_prog
;
11232 int *fixup_map_in_map
= test
->fixup_map_in_map
;
11234 /* Allocating HTs with 1 elem is fine here, since we only test
11235 * for verifier and not do a runtime lookup, so the only thing
11236 * that really matters is value size in this case.
11239 map_fds
[0] = create_map(sizeof(long long), 1);
11241 prog
[*fixup_map1
].imm
= map_fds
[0];
11243 } while (*fixup_map1
);
11247 map_fds
[1] = create_map(sizeof(struct test_val
), 1);
11249 prog
[*fixup_map2
].imm
= map_fds
[1];
11251 } while (*fixup_map2
);
11255 map_fds
[2] = create_prog_array();
11257 prog
[*fixup_prog
].imm
= map_fds
[2];
11259 } while (*fixup_prog
);
11262 if (*fixup_map_in_map
) {
11263 map_fds
[3] = create_map_in_map();
11265 prog
[*fixup_map_in_map
].imm
= map_fds
[3];
11266 fixup_map_in_map
++;
11267 } while (*fixup_map_in_map
);
11271 static void do_test_single(struct bpf_test
*test
, bool unpriv
,
11272 int *passes
, int *errors
)
11274 int fd_prog
, expected_ret
, reject_from_alignment
;
11275 struct bpf_insn
*prog
= test
->insns
;
11276 int prog_len
= probe_filter_length(prog
);
11277 char data_in
[TEST_DATA_LEN
] = {};
11278 int prog_type
= test
->prog_type
;
11279 int map_fds
[MAX_NR_MAPS
];
11280 const char *expected_err
;
11284 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
11287 do_test_fixup(test
, prog
, map_fds
);
11289 fd_prog
= bpf_verify_program(prog_type
? : BPF_PROG_TYPE_SOCKET_FILTER
,
11290 prog
, prog_len
, test
->flags
& F_LOAD_WITH_STRICT_ALIGNMENT
,
11291 "GPL", 0, bpf_vlog
, sizeof(bpf_vlog
), 1);
11293 expected_ret
= unpriv
&& test
->result_unpriv
!= UNDEF
?
11294 test
->result_unpriv
: test
->result
;
11295 expected_err
= unpriv
&& test
->errstr_unpriv
?
11296 test
->errstr_unpriv
: test
->errstr
;
11298 reject_from_alignment
= fd_prog
< 0 &&
11299 (test
->flags
& F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
) &&
11300 strstr(bpf_vlog
, "Unknown alignment.");
11301 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
11302 if (reject_from_alignment
) {
11303 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
11308 if (expected_ret
== ACCEPT
) {
11309 if (fd_prog
< 0 && !reject_from_alignment
) {
11310 printf("FAIL\nFailed to load prog '%s'!\n",
11315 if (fd_prog
>= 0) {
11316 printf("FAIL\nUnexpected success to load!\n");
11319 if (!strstr(bpf_vlog
, expected_err
) && !reject_from_alignment
) {
11320 printf("FAIL\nUnexpected error message!\n");
11325 if (fd_prog
>= 0) {
11326 err
= bpf_prog_test_run(fd_prog
, 1, data_in
, sizeof(data_in
),
11327 NULL
, NULL
, &retval
, NULL
);
11328 if (err
&& errno
!= 524/*ENOTSUPP*/ && errno
!= EPERM
) {
11329 printf("Unexpected bpf_prog_test_run error\n");
11332 if (!err
&& retval
!= test
->retval
&&
11333 test
->retval
!= POINTER_VALUE
) {
11334 printf("FAIL retval %d != %d\n", retval
, test
->retval
);
11339 printf("OK%s\n", reject_from_alignment
?
11340 " (NOTE: reject due to unknown alignment)" : "");
11343 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
11349 printf("%s", bpf_vlog
);
11353 static bool is_admin(void)
11356 cap_flag_value_t sysadmin
= CAP_CLEAR
;
11357 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
11359 #ifdef CAP_IS_SUPPORTED
11360 if (!CAP_IS_SUPPORTED(CAP_SETFCAP
)) {
11361 perror("cap_get_flag");
11365 caps
= cap_get_proc();
11367 perror("cap_get_proc");
11370 if (cap_get_flag(caps
, cap_val
, CAP_EFFECTIVE
, &sysadmin
))
11371 perror("cap_get_flag");
11372 if (cap_free(caps
))
11373 perror("cap_free");
11374 return (sysadmin
== CAP_SET
);
11377 static int set_admin(bool admin
)
11380 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
11383 caps
= cap_get_proc();
11385 perror("cap_get_proc");
11388 if (cap_set_flag(caps
, CAP_EFFECTIVE
, 1, &cap_val
,
11389 admin
? CAP_SET
: CAP_CLEAR
)) {
11390 perror("cap_set_flag");
11393 if (cap_set_proc(caps
)) {
11394 perror("cap_set_proc");
11399 if (cap_free(caps
))
11400 perror("cap_free");
11404 static int do_test(bool unpriv
, unsigned int from
, unsigned int to
)
11406 int i
, passes
= 0, errors
= 0;
11408 for (i
= from
; i
< to
; i
++) {
11409 struct bpf_test
*test
= &tests
[i
];
11411 /* Program types that are not supported by non-root we
11414 if (!test
->prog_type
) {
11417 printf("#%d/u %s ", i
, test
->descr
);
11418 do_test_single(test
, true, &passes
, &errors
);
11424 printf("#%d/p %s ", i
, test
->descr
);
11425 do_test_single(test
, false, &passes
, &errors
);
11429 printf("Summary: %d PASSED, %d FAILED\n", passes
, errors
);
11430 return errors
? EXIT_FAILURE
: EXIT_SUCCESS
;
11433 int main(int argc
, char **argv
)
11435 struct rlimit rinf
= { RLIM_INFINITY
, RLIM_INFINITY
};
11436 struct rlimit rlim
= { 1 << 20, 1 << 20 };
11437 unsigned int from
= 0, to
= ARRAY_SIZE(tests
);
11438 bool unpriv
= !is_admin();
11441 unsigned int l
= atoi(argv
[argc
- 2]);
11442 unsigned int u
= atoi(argv
[argc
- 1]);
11444 if (l
< to
&& u
< to
) {
11448 } else if (argc
== 2) {
11449 unsigned int t
= atoi(argv
[argc
- 1]);
11457 setrlimit(RLIMIT_MEMLOCK
, unpriv
? &rlim
: &rinf
);
11458 return do_test(unpriv
, from
, to
);