2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <linux/bpf.h>
14 #include <linux/unistd.h>
16 #include <linux/filter.h>
19 #include <sys/resource.h>
23 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
29 struct bpf_insn insns
[MAX_INSNS
];
30 int fixup
[MAX_FIXUPS
];
31 int prog_array_fixup
[MAX_FIXUPS
];
33 const char *errstr_unpriv
;
38 } result
, result_unpriv
;
39 enum bpf_prog_type prog_type
;
42 static struct bpf_test tests
[] = {
46 BPF_MOV64_IMM(BPF_REG_1
, 1),
47 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 2),
48 BPF_MOV64_IMM(BPF_REG_2
, 3),
49 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_2
),
50 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
51 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 3),
52 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
63 .errstr
= "unreachable",
69 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
70 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
73 .errstr
= "unreachable",
79 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
82 .errstr
= "jump out of range",
88 BPF_JMP_IMM(BPF_JA
, 0, 0, -2),
91 .errstr
= "jump out of range",
97 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
98 BPF_LD_IMM64(BPF_REG_0
, 0),
99 BPF_LD_IMM64(BPF_REG_0
, 0),
100 BPF_LD_IMM64(BPF_REG_0
, 1),
101 BPF_LD_IMM64(BPF_REG_0
, 1),
102 BPF_MOV64_IMM(BPF_REG_0
, 2),
105 .errstr
= "invalid BPF_LD_IMM insn",
106 .errstr_unpriv
= "R1 pointer comparison",
112 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
113 BPF_LD_IMM64(BPF_REG_0
, 0),
114 BPF_LD_IMM64(BPF_REG_0
, 0),
115 BPF_LD_IMM64(BPF_REG_0
, 1),
116 BPF_LD_IMM64(BPF_REG_0
, 1),
119 .errstr
= "invalid BPF_LD_IMM insn",
120 .errstr_unpriv
= "R1 pointer comparison",
126 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
127 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
128 BPF_LD_IMM64(BPF_REG_0
, 0),
129 BPF_LD_IMM64(BPF_REG_0
, 0),
130 BPF_LD_IMM64(BPF_REG_0
, 1),
131 BPF_LD_IMM64(BPF_REG_0
, 1),
134 .errstr
= "invalid bpf_ld_imm64 insn",
140 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
143 .errstr
= "invalid bpf_ld_imm64 insn",
149 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
151 .errstr
= "invalid bpf_ld_imm64 insn",
157 BPF_ALU64_REG(BPF_MOV
, BPF_REG_0
, BPF_REG_2
),
159 .errstr
= "jump out of range",
165 BPF_JMP_IMM(BPF_JA
, 0, 0, -1),
168 .errstr
= "back-edge",
174 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
175 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
176 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
177 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
180 .errstr
= "back-edge",
186 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
187 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
188 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
189 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
192 .errstr
= "back-edge",
196 "read uninitialized register",
198 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
201 .errstr
= "R2 !read_ok",
205 "read invalid register",
207 BPF_MOV64_REG(BPF_REG_0
, -1),
210 .errstr
= "R15 is invalid",
214 "program doesn't init R0 before exit",
216 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_1
),
219 .errstr
= "R0 !read_ok",
223 "program doesn't init R0 before exit in all branches",
225 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
226 BPF_MOV64_IMM(BPF_REG_0
, 1),
227 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
230 .errstr
= "R0 !read_ok",
231 .errstr_unpriv
= "R1 pointer comparison",
235 "stack out of bounds",
237 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, 8, 0),
240 .errstr
= "invalid stack",
244 "invalid call insn1",
246 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
| BPF_X
, 0, 0, 0, 0),
249 .errstr
= "BPF_CALL uses reserved",
253 "invalid call insn2",
255 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 1, 0),
258 .errstr
= "BPF_CALL uses reserved",
262 "invalid function call",
264 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, 1234567),
267 .errstr
= "invalid func 1234567",
271 "uninitialized stack1",
273 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
274 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
275 BPF_LD_MAP_FD(BPF_REG_1
, 0),
276 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
280 .errstr
= "invalid indirect read from stack",
284 "uninitialized stack2",
286 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
287 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -8),
290 .errstr
= "invalid read from stack",
294 "check valid spill/fill",
296 /* spill R1(ctx) into stack */
297 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
299 /* fill it back into R2 */
300 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
302 /* should be able to access R0 = *(R2 + 8) */
303 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
304 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
307 .errstr_unpriv
= "R0 leaks addr",
309 .result_unpriv
= REJECT
,
312 "check valid spill/fill, skb mark",
314 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
315 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
316 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
317 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
318 offsetof(struct __sk_buff
, mark
)),
322 .result_unpriv
= ACCEPT
,
325 "check corrupted spill/fill",
327 /* spill R1(ctx) into stack */
328 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
330 /* mess up with R1 pointer on stack */
331 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -7, 0x23),
333 /* fill back into R0 should fail */
334 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
338 .errstr_unpriv
= "attempt to corrupt spilled",
339 .errstr
= "corrupted spill",
343 "invalid src register in STX",
345 BPF_STX_MEM(BPF_B
, BPF_REG_10
, -1, -1),
348 .errstr
= "R15 is invalid",
352 "invalid dst register in STX",
354 BPF_STX_MEM(BPF_B
, 14, BPF_REG_10
, -1),
357 .errstr
= "R14 is invalid",
361 "invalid dst register in ST",
363 BPF_ST_MEM(BPF_B
, 14, -1, -1),
366 .errstr
= "R14 is invalid",
370 "invalid src register in LDX",
372 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, 12, 0),
375 .errstr
= "R12 is invalid",
379 "invalid dst register in LDX",
381 BPF_LDX_MEM(BPF_B
, 11, BPF_REG_1
, 0),
384 .errstr
= "R11 is invalid",
390 BPF_RAW_INSN(0, 0, 0, 0, 0),
393 .errstr
= "invalid BPF_LD_IMM",
399 BPF_RAW_INSN(1, 0, 0, 0, 0),
402 .errstr
= "BPF_LDX uses reserved fields",
408 BPF_RAW_INSN(-1, 0, 0, 0, 0),
411 .errstr
= "invalid BPF_ALU opcode f0",
417 BPF_RAW_INSN(-1, -1, -1, -1, -1),
420 .errstr
= "invalid BPF_ALU opcode f0",
426 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
429 .errstr
= "BPF_ALU uses reserved fields",
433 "misaligned read from stack",
435 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
436 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -4),
439 .errstr
= "misaligned access",
443 "invalid map_fd for function call",
445 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
446 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_10
),
447 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
448 BPF_LD_MAP_FD(BPF_REG_1
, 0),
449 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_delete_elem
),
452 .errstr
= "fd 0 is not pointing to valid bpf_map",
456 "don't check return value before access",
458 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
459 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
460 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
461 BPF_LD_MAP_FD(BPF_REG_1
, 0),
462 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
463 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
467 .errstr
= "R0 invalid mem access 'map_value_or_null'",
471 "access memory with incorrect alignment",
473 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
474 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
475 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
476 BPF_LD_MAP_FD(BPF_REG_1
, 0),
477 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
478 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
479 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 4, 0),
483 .errstr
= "misaligned access",
487 "sometimes access memory with incorrect alignment",
489 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
490 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
491 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
492 BPF_LD_MAP_FD(BPF_REG_1
, 0),
493 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
494 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
495 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
497 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 1),
501 .errstr
= "R0 invalid mem access",
502 .errstr_unpriv
= "R0 leaks addr",
508 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
509 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -8),
510 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
511 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
512 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 1),
513 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 1),
514 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 1),
515 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 2),
516 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 1),
517 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 3),
518 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 1),
519 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 4),
520 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
521 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 5),
522 BPF_MOV64_IMM(BPF_REG_0
, 0),
525 .errstr_unpriv
= "R1 pointer comparison",
526 .result_unpriv
= REJECT
,
532 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
533 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
534 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
535 BPF_JMP_IMM(BPF_JA
, 0, 0, 14),
536 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 2),
537 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
538 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
539 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 2),
540 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
541 BPF_JMP_IMM(BPF_JA
, 0, 0, 8),
542 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 2),
543 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
544 BPF_JMP_IMM(BPF_JA
, 0, 0, 5),
545 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 2),
546 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
547 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
548 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
549 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
550 BPF_MOV64_IMM(BPF_REG_0
, 0),
553 .errstr_unpriv
= "R1 pointer comparison",
554 .result_unpriv
= REJECT
,
560 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
561 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
562 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
563 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
564 BPF_JMP_IMM(BPF_JA
, 0, 0, 19),
565 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 3),
566 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
567 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
568 BPF_JMP_IMM(BPF_JA
, 0, 0, 15),
569 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 3),
570 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
571 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -32),
572 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
573 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 3),
574 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
575 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -40),
576 BPF_JMP_IMM(BPF_JA
, 0, 0, 7),
577 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 3),
578 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
579 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -48),
580 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
581 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 0),
582 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
583 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -56),
584 BPF_LD_MAP_FD(BPF_REG_1
, 0),
585 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_delete_elem
),
589 .errstr_unpriv
= "R1 pointer comparison",
590 .result_unpriv
= REJECT
,
596 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
597 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
598 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
599 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
600 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
601 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
602 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
603 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
604 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
605 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
606 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
607 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
608 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
609 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
610 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
611 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
612 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
613 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
614 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
615 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
616 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
617 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
618 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
619 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
620 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
621 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
622 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
623 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
624 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
625 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
626 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
627 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
628 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
629 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
630 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
631 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
632 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
633 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
634 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
635 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
636 BPF_MOV64_IMM(BPF_REG_0
, 0),
639 .errstr_unpriv
= "R1 pointer comparison",
640 .result_unpriv
= REJECT
,
646 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
647 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
648 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
649 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
650 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
651 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
652 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
653 BPF_MOV64_IMM(BPF_REG_0
, 0),
654 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
655 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
656 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
657 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
658 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
659 BPF_MOV64_IMM(BPF_REG_0
, 0),
660 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
661 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
662 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
663 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
664 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
665 BPF_MOV64_IMM(BPF_REG_0
, 0),
666 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
667 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
668 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
669 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
670 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
671 BPF_MOV64_IMM(BPF_REG_0
, 0),
672 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
673 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
674 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
675 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
676 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
677 BPF_MOV64_IMM(BPF_REG_0
, 0),
680 .errstr_unpriv
= "R1 pointer comparison",
681 .result_unpriv
= REJECT
,
685 "access skb fields ok",
687 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
688 offsetof(struct __sk_buff
, len
)),
689 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
690 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
691 offsetof(struct __sk_buff
, mark
)),
692 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
693 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
694 offsetof(struct __sk_buff
, pkt_type
)),
695 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
696 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
697 offsetof(struct __sk_buff
, queue_mapping
)),
698 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
699 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
700 offsetof(struct __sk_buff
, protocol
)),
701 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
702 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
703 offsetof(struct __sk_buff
, vlan_present
)),
704 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
705 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
706 offsetof(struct __sk_buff
, vlan_tci
)),
707 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
713 "access skb fields bad1",
715 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -4),
718 .errstr
= "invalid bpf_context access",
722 "access skb fields bad2",
724 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 9),
725 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
726 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
727 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
728 BPF_LD_MAP_FD(BPF_REG_1
, 0),
729 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
730 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
732 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
733 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
734 offsetof(struct __sk_buff
, pkt_type
)),
738 .errstr
= "different pointers",
739 .errstr_unpriv
= "R1 pointer comparison",
743 "access skb fields bad3",
745 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
746 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
747 offsetof(struct __sk_buff
, pkt_type
)),
749 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
750 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
751 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
752 BPF_LD_MAP_FD(BPF_REG_1
, 0),
753 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
754 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
756 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
757 BPF_JMP_IMM(BPF_JA
, 0, 0, -12),
760 .errstr
= "different pointers",
761 .errstr_unpriv
= "R1 pointer comparison",
765 "access skb fields bad4",
767 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 3),
768 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
769 offsetof(struct __sk_buff
, len
)),
770 BPF_MOV64_IMM(BPF_REG_0
, 0),
772 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
773 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
774 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
775 BPF_LD_MAP_FD(BPF_REG_1
, 0),
776 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
777 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
779 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
780 BPF_JMP_IMM(BPF_JA
, 0, 0, -13),
783 .errstr
= "different pointers",
784 .errstr_unpriv
= "R1 pointer comparison",
788 "check skb->mark is not writeable by sockets",
790 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
791 offsetof(struct __sk_buff
, mark
)),
794 .errstr
= "invalid bpf_context access",
795 .errstr_unpriv
= "R1 leaks addr",
799 "check skb->tc_index is not writeable by sockets",
801 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
802 offsetof(struct __sk_buff
, tc_index
)),
805 .errstr
= "invalid bpf_context access",
806 .errstr_unpriv
= "R1 leaks addr",
810 "check non-u32 access to cb",
812 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_1
,
813 offsetof(struct __sk_buff
, cb
[0])),
816 .errstr
= "invalid bpf_context access",
817 .errstr_unpriv
= "R1 leaks addr",
821 "check out of range skb->cb access",
823 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
824 offsetof(struct __sk_buff
, cb
[0]) + 256),
827 .errstr
= "invalid bpf_context access",
830 .prog_type
= BPF_PROG_TYPE_SCHED_ACT
,
833 "write skb fields from socket prog",
835 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
836 offsetof(struct __sk_buff
, cb
[4])),
837 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
838 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
839 offsetof(struct __sk_buff
, mark
)),
840 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
841 offsetof(struct __sk_buff
, tc_index
)),
842 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
843 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
844 offsetof(struct __sk_buff
, cb
[0])),
845 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
846 offsetof(struct __sk_buff
, cb
[2])),
850 .errstr_unpriv
= "R1 leaks addr",
851 .result_unpriv
= REJECT
,
854 "write skb fields from tc_cls_act prog",
856 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
857 offsetof(struct __sk_buff
, cb
[0])),
858 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
859 offsetof(struct __sk_buff
, mark
)),
860 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
861 offsetof(struct __sk_buff
, tc_index
)),
862 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
863 offsetof(struct __sk_buff
, tc_index
)),
864 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
865 offsetof(struct __sk_buff
, cb
[3])),
869 .result_unpriv
= REJECT
,
871 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
874 "PTR_TO_STACK store/load",
876 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
877 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
878 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
879 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
885 "PTR_TO_STACK store/load - bad alignment on off",
887 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
888 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
889 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
890 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
894 .errstr
= "misaligned access off -6 size 8",
897 "PTR_TO_STACK store/load - bad alignment on reg",
899 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
900 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
901 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
902 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
906 .errstr
= "misaligned access off -2 size 8",
909 "PTR_TO_STACK store/load - out of bounds low",
911 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
912 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -80000),
913 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
914 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
918 .errstr
= "invalid stack off=-79992 size=8",
921 "PTR_TO_STACK store/load - out of bounds high",
923 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
924 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
925 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
926 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
930 .errstr
= "invalid stack off=0 size=8",
933 "unpriv: return pointer",
935 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
939 .result_unpriv
= REJECT
,
940 .errstr_unpriv
= "R0 leaks addr",
943 "unpriv: add const to pointer",
945 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
946 BPF_MOV64_IMM(BPF_REG_0
, 0),
950 .result_unpriv
= REJECT
,
951 .errstr_unpriv
= "R1 pointer arithmetic",
954 "unpriv: add pointer to pointer",
956 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
957 BPF_MOV64_IMM(BPF_REG_0
, 0),
961 .result_unpriv
= REJECT
,
962 .errstr_unpriv
= "R1 pointer arithmetic",
965 "unpriv: neg pointer",
967 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_1
, 0),
968 BPF_MOV64_IMM(BPF_REG_0
, 0),
972 .result_unpriv
= REJECT
,
973 .errstr_unpriv
= "R1 pointer arithmetic",
976 "unpriv: cmp pointer with const",
978 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
979 BPF_MOV64_IMM(BPF_REG_0
, 0),
983 .result_unpriv
= REJECT
,
984 .errstr_unpriv
= "R1 pointer comparison",
987 "unpriv: cmp pointer with pointer",
989 BPF_JMP_REG(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
990 BPF_MOV64_IMM(BPF_REG_0
, 0),
994 .result_unpriv
= REJECT
,
995 .errstr_unpriv
= "R10 pointer comparison",
998 "unpriv: check that printk is disallowed",
1000 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1001 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1002 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1003 BPF_MOV64_IMM(BPF_REG_2
, 8),
1004 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
1005 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_trace_printk
),
1006 BPF_MOV64_IMM(BPF_REG_0
, 0),
1009 .errstr_unpriv
= "unknown func 6",
1010 .result_unpriv
= REJECT
,
1014 "unpriv: pass pointer to helper function",
1016 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1017 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1018 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1019 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1020 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
1021 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
1022 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_update_elem
),
1023 BPF_MOV64_IMM(BPF_REG_0
, 0),
1027 .errstr_unpriv
= "R4 leaks addr",
1028 .result_unpriv
= REJECT
,
1032 "unpriv: indirectly pass pointer on stack to helper function",
1034 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1035 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1036 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1037 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1038 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
1039 BPF_MOV64_IMM(BPF_REG_0
, 0),
1043 .errstr
= "invalid indirect read from stack off -8+0 size 8",
1047 "unpriv: mangle pointer on stack 1",
1049 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1050 BPF_ST_MEM(BPF_W
, BPF_REG_10
, -8, 0),
1051 BPF_MOV64_IMM(BPF_REG_0
, 0),
1054 .errstr_unpriv
= "attempt to corrupt spilled",
1055 .result_unpriv
= REJECT
,
1059 "unpriv: mangle pointer on stack 2",
1061 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1062 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -1, 0),
1063 BPF_MOV64_IMM(BPF_REG_0
, 0),
1066 .errstr_unpriv
= "attempt to corrupt spilled",
1067 .result_unpriv
= REJECT
,
1071 "unpriv: read pointer from stack in small chunks",
1073 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1074 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_10
, -8),
1075 BPF_MOV64_IMM(BPF_REG_0
, 0),
1078 .errstr
= "invalid size",
1082 "unpriv: write pointer into ctx",
1084 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
, 0),
1085 BPF_MOV64_IMM(BPF_REG_0
, 0),
1088 .errstr_unpriv
= "R1 leaks addr",
1089 .result_unpriv
= REJECT
,
1090 .errstr
= "invalid bpf_context access",
1094 "unpriv: write pointer into map elem value",
1096 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1097 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1098 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1099 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1100 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
1101 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
1102 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
1106 .errstr_unpriv
= "R0 leaks addr",
1107 .result_unpriv
= REJECT
,
1111 "unpriv: partial copy of pointer",
1113 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_10
),
1114 BPF_MOV64_IMM(BPF_REG_0
, 0),
1117 .errstr_unpriv
= "R10 partial copy",
1118 .result_unpriv
= REJECT
,
1122 "unpriv: pass pointer to tail_call",
1124 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
1125 BPF_LD_MAP_FD(BPF_REG_2
, 0),
1126 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
1127 BPF_MOV64_IMM(BPF_REG_0
, 0),
1130 .prog_array_fixup
= {1},
1131 .errstr_unpriv
= "R3 leaks addr into helper",
1132 .result_unpriv
= REJECT
,
1136 "unpriv: cmp map pointer with zero",
1138 BPF_MOV64_IMM(BPF_REG_1
, 0),
1139 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1140 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
1141 BPF_MOV64_IMM(BPF_REG_0
, 0),
1145 .errstr_unpriv
= "R1 pointer comparison",
1146 .result_unpriv
= REJECT
,
1150 "unpriv: write into frame pointer",
1152 BPF_MOV64_REG(BPF_REG_10
, BPF_REG_1
),
1153 BPF_MOV64_IMM(BPF_REG_0
, 0),
1156 .errstr
= "frame pointer is read only",
1160 "unpriv: cmp of frame pointer",
1162 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_10
, 0, 0),
1163 BPF_MOV64_IMM(BPF_REG_0
, 0),
1166 .errstr_unpriv
= "R10 pointer comparison",
1167 .result_unpriv
= REJECT
,
1171 "unpriv: cmp of stack pointer",
1173 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1174 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1175 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_2
, 0, 0),
1176 BPF_MOV64_IMM(BPF_REG_0
, 0),
1179 .errstr_unpriv
= "R2 pointer comparison",
1180 .result_unpriv
= REJECT
,
1184 "unpriv: obfuscate stack pointer",
1186 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1187 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1188 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1189 BPF_MOV64_IMM(BPF_REG_0
, 0),
1192 .errstr_unpriv
= "R2 pointer arithmetic",
1193 .result_unpriv
= REJECT
,
1197 "raw_stack: no skb_load_bytes",
1199 BPF_MOV64_IMM(BPF_REG_2
, 4),
1200 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1201 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1202 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1203 BPF_MOV64_IMM(BPF_REG_4
, 8),
1204 /* Call to skb_load_bytes() omitted. */
1205 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1209 .errstr
= "invalid read from stack off -8+0 size 8",
1210 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1213 "raw_stack: skb_load_bytes, no init",
1215 BPF_MOV64_IMM(BPF_REG_2
, 4),
1216 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1217 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1218 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1219 BPF_MOV64_IMM(BPF_REG_4
, 8),
1220 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1221 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1225 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1228 "raw_stack: skb_load_bytes, init",
1230 BPF_MOV64_IMM(BPF_REG_2
, 4),
1231 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1232 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1233 BPF_ST_MEM(BPF_DW
, BPF_REG_6
, 0, 0xcafe),
1234 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1235 BPF_MOV64_IMM(BPF_REG_4
, 8),
1236 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1237 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1241 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1244 "raw_stack: skb_load_bytes, spilled regs around bounds",
1246 BPF_MOV64_IMM(BPF_REG_2
, 4),
1247 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1248 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
1249 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8), /* spill ctx from R1 */
1250 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8), /* spill ctx from R1 */
1251 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1252 BPF_MOV64_IMM(BPF_REG_4
, 8),
1253 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1254 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8), /* fill ctx into R0 */
1255 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8), /* fill ctx into R2 */
1256 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
1257 offsetof(struct __sk_buff
, mark
)),
1258 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
1259 offsetof(struct __sk_buff
, priority
)),
1260 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
1264 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1267 "raw_stack: skb_load_bytes, spilled regs corruption",
1269 BPF_MOV64_IMM(BPF_REG_2
, 4),
1270 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1271 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1272 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0), /* spill ctx from R1 */
1273 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1274 BPF_MOV64_IMM(BPF_REG_4
, 8),
1275 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1276 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0), /* fill ctx into R0 */
1277 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
1278 offsetof(struct __sk_buff
, mark
)),
1282 .errstr
= "R0 invalid mem access 'inv'",
1283 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1286 "raw_stack: skb_load_bytes, spilled regs corruption 2",
1288 BPF_MOV64_IMM(BPF_REG_2
, 4),
1289 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1290 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
1291 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8), /* spill ctx from R1 */
1292 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0), /* spill ctx from R1 */
1293 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8), /* spill ctx from R1 */
1294 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1295 BPF_MOV64_IMM(BPF_REG_4
, 8),
1296 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1297 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8), /* fill ctx into R0 */
1298 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8), /* fill ctx into R2 */
1299 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0), /* fill ctx into R3 */
1300 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
1301 offsetof(struct __sk_buff
, mark
)),
1302 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
1303 offsetof(struct __sk_buff
, priority
)),
1304 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
1305 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_3
,
1306 offsetof(struct __sk_buff
, pkt_type
)),
1307 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
1311 .errstr
= "R3 invalid mem access 'inv'",
1312 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1315 "raw_stack: skb_load_bytes, spilled regs + data",
1317 BPF_MOV64_IMM(BPF_REG_2
, 4),
1318 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1319 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
1320 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8), /* spill ctx from R1 */
1321 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0), /* spill ctx from R1 */
1322 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8), /* spill ctx from R1 */
1323 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1324 BPF_MOV64_IMM(BPF_REG_4
, 8),
1325 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1326 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8), /* fill ctx into R0 */
1327 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8), /* fill ctx into R2 */
1328 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0), /* fill data into R3 */
1329 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
1330 offsetof(struct __sk_buff
, mark
)),
1331 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
1332 offsetof(struct __sk_buff
, priority
)),
1333 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
1334 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
1338 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1341 "raw_stack: skb_load_bytes, invalid access 1",
1343 BPF_MOV64_IMM(BPF_REG_2
, 4),
1344 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1345 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -513),
1346 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1347 BPF_MOV64_IMM(BPF_REG_4
, 8),
1348 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1349 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1353 .errstr
= "invalid stack type R3 off=-513 access_size=8",
1354 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1357 "raw_stack: skb_load_bytes, invalid access 2",
1359 BPF_MOV64_IMM(BPF_REG_2
, 4),
1360 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1361 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
1362 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1363 BPF_MOV64_IMM(BPF_REG_4
, 8),
1364 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1365 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1369 .errstr
= "invalid stack type R3 off=-1 access_size=8",
1370 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1373 "raw_stack: skb_load_bytes, invalid access 3",
1375 BPF_MOV64_IMM(BPF_REG_2
, 4),
1376 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1377 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 0xffffffff),
1378 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1379 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
1380 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1381 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1385 .errstr
= "invalid stack type R3 off=-1 access_size=-1",
1386 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1389 "raw_stack: skb_load_bytes, invalid access 4",
1391 BPF_MOV64_IMM(BPF_REG_2
, 4),
1392 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1393 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
1394 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1395 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
1396 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1397 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1401 .errstr
= "invalid stack type R3 off=-1 access_size=2147483647",
1402 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1405 "raw_stack: skb_load_bytes, invalid access 5",
1407 BPF_MOV64_IMM(BPF_REG_2
, 4),
1408 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1409 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
1410 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1411 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
1412 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1413 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1417 .errstr
= "invalid stack type R3 off=-512 access_size=2147483647",
1418 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1421 "raw_stack: skb_load_bytes, invalid access 6",
1423 BPF_MOV64_IMM(BPF_REG_2
, 4),
1424 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1425 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
1426 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1427 BPF_MOV64_IMM(BPF_REG_4
, 0),
1428 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1429 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1433 .errstr
= "invalid stack type R3 off=-512 access_size=0",
1434 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1437 "raw_stack: skb_load_bytes, large access",
1439 BPF_MOV64_IMM(BPF_REG_2
, 4),
1440 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1441 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
1442 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1443 BPF_MOV64_IMM(BPF_REG_4
, 512),
1444 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
1445 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1449 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1454 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1455 offsetof(struct __sk_buff
, data
)),
1456 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1457 offsetof(struct __sk_buff
, data_end
)),
1458 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1459 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1460 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1461 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
1462 BPF_MOV64_IMM(BPF_REG_0
, 0),
1466 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1471 BPF_MOV64_IMM(BPF_REG_0
, 1),
1472 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
1473 offsetof(struct __sk_buff
, data_end
)),
1474 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1475 offsetof(struct __sk_buff
, data
)),
1476 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
1477 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
1478 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_4
, 15),
1479 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 7),
1480 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_3
, 12),
1481 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 14),
1482 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1483 offsetof(struct __sk_buff
, data
)),
1484 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_4
),
1485 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_1
),
1486 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 48),
1487 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 48),
1488 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_2
),
1489 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_3
),
1490 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
1491 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1492 offsetof(struct __sk_buff
, data_end
)),
1493 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
1494 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_3
, 4),
1495 BPF_MOV64_IMM(BPF_REG_0
, 0),
1499 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1504 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1505 offsetof(struct __sk_buff
, data
)),
1506 BPF_MOV64_IMM(BPF_REG_0
, 0),
1509 .errstr
= "invalid bpf_context access off=76",
1511 .prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
,
1516 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1517 offsetof(struct __sk_buff
, data
)),
1518 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1519 offsetof(struct __sk_buff
, data_end
)),
1520 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1521 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1522 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1523 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
1524 BPF_MOV64_IMM(BPF_REG_0
, 0),
1527 .errstr
= "cannot write",
1529 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1533 static int probe_filter_length(struct bpf_insn
*fp
)
1537 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
1538 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
1544 static int create_map(void)
1548 map_fd
= bpf_create_map(BPF_MAP_TYPE_HASH
,
1549 sizeof(long long), sizeof(long long), 1024, 0);
1551 printf("failed to create map '%s'\n", strerror(errno
));
1556 static int create_prog_array(void)
1560 map_fd
= bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY
,
1561 sizeof(int), sizeof(int), 4, 0);
1563 printf("failed to create prog_array '%s'\n", strerror(errno
));
1568 static int test(void)
1570 int prog_fd
, i
, pass_cnt
= 0, err_cnt
= 0;
1571 bool unpriv
= geteuid() != 0;
1573 for (i
= 0; i
< ARRAY_SIZE(tests
); i
++) {
1574 struct bpf_insn
*prog
= tests
[i
].insns
;
1575 int prog_type
= tests
[i
].prog_type
;
1576 int prog_len
= probe_filter_length(prog
);
1577 int *fixup
= tests
[i
].fixup
;
1578 int *prog_array_fixup
= tests
[i
].prog_array_fixup
;
1579 int expected_result
;
1580 const char *expected_errstr
;
1581 int map_fd
= -1, prog_array_fd
= -1;
1584 map_fd
= create_map();
1587 prog
[*fixup
].imm
= map_fd
;
1591 if (*prog_array_fixup
) {
1592 prog_array_fd
= create_prog_array();
1595 prog
[*prog_array_fixup
].imm
= prog_array_fd
;
1597 } while (*prog_array_fixup
);
1599 printf("#%d %s ", i
, tests
[i
].descr
);
1601 prog_fd
= bpf_prog_load(prog_type
?: BPF_PROG_TYPE_SOCKET_FILTER
,
1602 prog
, prog_len
* sizeof(struct bpf_insn
),
1605 if (unpriv
&& tests
[i
].result_unpriv
!= UNDEF
)
1606 expected_result
= tests
[i
].result_unpriv
;
1608 expected_result
= tests
[i
].result
;
1610 if (unpriv
&& tests
[i
].errstr_unpriv
)
1611 expected_errstr
= tests
[i
].errstr_unpriv
;
1613 expected_errstr
= tests
[i
].errstr
;
1615 if (expected_result
== ACCEPT
) {
1617 printf("FAIL\nfailed to load prog '%s'\n",
1619 printf("%s", bpf_log_buf
);
1625 printf("FAIL\nunexpected success to load\n");
1626 printf("%s", bpf_log_buf
);
1630 if (strstr(bpf_log_buf
, expected_errstr
) == 0) {
1631 printf("FAIL\nunexpected error message: %s",
1643 if (prog_array_fd
>= 0)
1644 close(prog_array_fd
);
1648 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt
, err_cnt
);
1655 struct rlimit r
= {1 << 20, 1 << 20};
1657 setrlimit(RLIMIT_MEMLOCK
, &r
);