hwmon: (tmp102) Convert to use regmap, and drop local cache
[linux/fpc-iii.git] / samples / bpf / test_verifier.c
blobfe2fcec98c1ff087a308219e370de3040df11ee5
1 /*
2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10 #include <stdio.h>
11 #include <unistd.h>
12 #include <linux/bpf.h>
13 #include <errno.h>
14 #include <linux/unistd.h>
15 #include <string.h>
16 #include <linux/filter.h>
17 #include <stddef.h>
18 #include <stdbool.h>
19 #include <sys/resource.h>
20 #include "libbpf.h"
22 #define MAX_INSNS 512
23 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
25 #define MAX_FIXUPS 8
27 struct bpf_test {
28 const char *descr;
29 struct bpf_insn insns[MAX_INSNS];
30 int fixup[MAX_FIXUPS];
31 int prog_array_fixup[MAX_FIXUPS];
32 const char *errstr;
33 const char *errstr_unpriv;
34 enum {
35 UNDEF,
36 ACCEPT,
37 REJECT
38 } result, result_unpriv;
39 enum bpf_prog_type prog_type;
42 static struct bpf_test tests[] = {
44 "add+sub+mul",
45 .insns = {
46 BPF_MOV64_IMM(BPF_REG_1, 1),
47 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
48 BPF_MOV64_IMM(BPF_REG_2, 3),
49 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
50 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
51 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
52 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
53 BPF_EXIT_INSN(),
55 .result = ACCEPT,
58 "unreachable",
59 .insns = {
60 BPF_EXIT_INSN(),
61 BPF_EXIT_INSN(),
63 .errstr = "unreachable",
64 .result = REJECT,
67 "unreachable2",
68 .insns = {
69 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
70 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
71 BPF_EXIT_INSN(),
73 .errstr = "unreachable",
74 .result = REJECT,
77 "out of range jump",
78 .insns = {
79 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
80 BPF_EXIT_INSN(),
82 .errstr = "jump out of range",
83 .result = REJECT,
86 "out of range jump2",
87 .insns = {
88 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
89 BPF_EXIT_INSN(),
91 .errstr = "jump out of range",
92 .result = REJECT,
95 "test1 ld_imm64",
96 .insns = {
97 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
98 BPF_LD_IMM64(BPF_REG_0, 0),
99 BPF_LD_IMM64(BPF_REG_0, 0),
100 BPF_LD_IMM64(BPF_REG_0, 1),
101 BPF_LD_IMM64(BPF_REG_0, 1),
102 BPF_MOV64_IMM(BPF_REG_0, 2),
103 BPF_EXIT_INSN(),
105 .errstr = "invalid BPF_LD_IMM insn",
106 .errstr_unpriv = "R1 pointer comparison",
107 .result = REJECT,
110 "test2 ld_imm64",
111 .insns = {
112 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
113 BPF_LD_IMM64(BPF_REG_0, 0),
114 BPF_LD_IMM64(BPF_REG_0, 0),
115 BPF_LD_IMM64(BPF_REG_0, 1),
116 BPF_LD_IMM64(BPF_REG_0, 1),
117 BPF_EXIT_INSN(),
119 .errstr = "invalid BPF_LD_IMM insn",
120 .errstr_unpriv = "R1 pointer comparison",
121 .result = REJECT,
124 "test3 ld_imm64",
125 .insns = {
126 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
127 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
128 BPF_LD_IMM64(BPF_REG_0, 0),
129 BPF_LD_IMM64(BPF_REG_0, 0),
130 BPF_LD_IMM64(BPF_REG_0, 1),
131 BPF_LD_IMM64(BPF_REG_0, 1),
132 BPF_EXIT_INSN(),
134 .errstr = "invalid bpf_ld_imm64 insn",
135 .result = REJECT,
138 "test4 ld_imm64",
139 .insns = {
140 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
141 BPF_EXIT_INSN(),
143 .errstr = "invalid bpf_ld_imm64 insn",
144 .result = REJECT,
147 "test5 ld_imm64",
148 .insns = {
149 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
151 .errstr = "invalid bpf_ld_imm64 insn",
152 .result = REJECT,
155 "no bpf_exit",
156 .insns = {
157 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
159 .errstr = "jump out of range",
160 .result = REJECT,
163 "loop (back-edge)",
164 .insns = {
165 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
166 BPF_EXIT_INSN(),
168 .errstr = "back-edge",
169 .result = REJECT,
172 "loop2 (back-edge)",
173 .insns = {
174 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
175 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
176 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
177 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
178 BPF_EXIT_INSN(),
180 .errstr = "back-edge",
181 .result = REJECT,
184 "conditional loop",
185 .insns = {
186 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
188 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
189 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
190 BPF_EXIT_INSN(),
192 .errstr = "back-edge",
193 .result = REJECT,
196 "read uninitialized register",
197 .insns = {
198 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
199 BPF_EXIT_INSN(),
201 .errstr = "R2 !read_ok",
202 .result = REJECT,
205 "read invalid register",
206 .insns = {
207 BPF_MOV64_REG(BPF_REG_0, -1),
208 BPF_EXIT_INSN(),
210 .errstr = "R15 is invalid",
211 .result = REJECT,
214 "program doesn't init R0 before exit",
215 .insns = {
216 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
217 BPF_EXIT_INSN(),
219 .errstr = "R0 !read_ok",
220 .result = REJECT,
223 "program doesn't init R0 before exit in all branches",
224 .insns = {
225 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
226 BPF_MOV64_IMM(BPF_REG_0, 1),
227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
228 BPF_EXIT_INSN(),
230 .errstr = "R0 !read_ok",
231 .errstr_unpriv = "R1 pointer comparison",
232 .result = REJECT,
235 "stack out of bounds",
236 .insns = {
237 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
238 BPF_EXIT_INSN(),
240 .errstr = "invalid stack",
241 .result = REJECT,
244 "invalid call insn1",
245 .insns = {
246 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
247 BPF_EXIT_INSN(),
249 .errstr = "BPF_CALL uses reserved",
250 .result = REJECT,
253 "invalid call insn2",
254 .insns = {
255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
256 BPF_EXIT_INSN(),
258 .errstr = "BPF_CALL uses reserved",
259 .result = REJECT,
262 "invalid function call",
263 .insns = {
264 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
265 BPF_EXIT_INSN(),
267 .errstr = "invalid func 1234567",
268 .result = REJECT,
271 "uninitialized stack1",
272 .insns = {
273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
275 BPF_LD_MAP_FD(BPF_REG_1, 0),
276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
277 BPF_EXIT_INSN(),
279 .fixup = {2},
280 .errstr = "invalid indirect read from stack",
281 .result = REJECT,
284 "uninitialized stack2",
285 .insns = {
286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
287 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
288 BPF_EXIT_INSN(),
290 .errstr = "invalid read from stack",
291 .result = REJECT,
294 "check valid spill/fill",
295 .insns = {
296 /* spill R1(ctx) into stack */
297 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
299 /* fill it back into R2 */
300 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
302 /* should be able to access R0 = *(R2 + 8) */
303 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
304 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
305 BPF_EXIT_INSN(),
307 .errstr_unpriv = "R0 leaks addr",
308 .result = ACCEPT,
309 .result_unpriv = REJECT,
312 "check valid spill/fill, skb mark",
313 .insns = {
314 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
315 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
316 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
317 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
318 offsetof(struct __sk_buff, mark)),
319 BPF_EXIT_INSN(),
321 .result = ACCEPT,
322 .result_unpriv = ACCEPT,
325 "check corrupted spill/fill",
326 .insns = {
327 /* spill R1(ctx) into stack */
328 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
330 /* mess up with R1 pointer on stack */
331 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
333 /* fill back into R0 should fail */
334 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
336 BPF_EXIT_INSN(),
338 .errstr_unpriv = "attempt to corrupt spilled",
339 .errstr = "corrupted spill",
340 .result = REJECT,
343 "invalid src register in STX",
344 .insns = {
345 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
346 BPF_EXIT_INSN(),
348 .errstr = "R15 is invalid",
349 .result = REJECT,
352 "invalid dst register in STX",
353 .insns = {
354 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
355 BPF_EXIT_INSN(),
357 .errstr = "R14 is invalid",
358 .result = REJECT,
361 "invalid dst register in ST",
362 .insns = {
363 BPF_ST_MEM(BPF_B, 14, -1, -1),
364 BPF_EXIT_INSN(),
366 .errstr = "R14 is invalid",
367 .result = REJECT,
370 "invalid src register in LDX",
371 .insns = {
372 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
373 BPF_EXIT_INSN(),
375 .errstr = "R12 is invalid",
376 .result = REJECT,
379 "invalid dst register in LDX",
380 .insns = {
381 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
382 BPF_EXIT_INSN(),
384 .errstr = "R11 is invalid",
385 .result = REJECT,
388 "junk insn",
389 .insns = {
390 BPF_RAW_INSN(0, 0, 0, 0, 0),
391 BPF_EXIT_INSN(),
393 .errstr = "invalid BPF_LD_IMM",
394 .result = REJECT,
397 "junk insn2",
398 .insns = {
399 BPF_RAW_INSN(1, 0, 0, 0, 0),
400 BPF_EXIT_INSN(),
402 .errstr = "BPF_LDX uses reserved fields",
403 .result = REJECT,
406 "junk insn3",
407 .insns = {
408 BPF_RAW_INSN(-1, 0, 0, 0, 0),
409 BPF_EXIT_INSN(),
411 .errstr = "invalid BPF_ALU opcode f0",
412 .result = REJECT,
415 "junk insn4",
416 .insns = {
417 BPF_RAW_INSN(-1, -1, -1, -1, -1),
418 BPF_EXIT_INSN(),
420 .errstr = "invalid BPF_ALU opcode f0",
421 .result = REJECT,
424 "junk insn5",
425 .insns = {
426 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
427 BPF_EXIT_INSN(),
429 .errstr = "BPF_ALU uses reserved fields",
430 .result = REJECT,
433 "misaligned read from stack",
434 .insns = {
435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
436 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
437 BPF_EXIT_INSN(),
439 .errstr = "misaligned access",
440 .result = REJECT,
443 "invalid map_fd for function call",
444 .insns = {
445 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
446 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
448 BPF_LD_MAP_FD(BPF_REG_1, 0),
449 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
450 BPF_EXIT_INSN(),
452 .errstr = "fd 0 is not pointing to valid bpf_map",
453 .result = REJECT,
456 "don't check return value before access",
457 .insns = {
458 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
459 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
461 BPF_LD_MAP_FD(BPF_REG_1, 0),
462 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
463 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
464 BPF_EXIT_INSN(),
466 .fixup = {3},
467 .errstr = "R0 invalid mem access 'map_value_or_null'",
468 .result = REJECT,
471 "access memory with incorrect alignment",
472 .insns = {
473 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
474 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
476 BPF_LD_MAP_FD(BPF_REG_1, 0),
477 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
478 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
479 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
480 BPF_EXIT_INSN(),
482 .fixup = {3},
483 .errstr = "misaligned access",
484 .result = REJECT,
487 "sometimes access memory with incorrect alignment",
488 .insns = {
489 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
490 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
492 BPF_LD_MAP_FD(BPF_REG_1, 0),
493 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
494 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
495 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
496 BPF_EXIT_INSN(),
497 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
498 BPF_EXIT_INSN(),
500 .fixup = {3},
501 .errstr = "R0 invalid mem access",
502 .errstr_unpriv = "R0 leaks addr",
503 .result = REJECT,
506 "jump test 1",
507 .insns = {
508 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
509 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
510 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
511 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
512 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
513 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
514 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
515 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
516 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
517 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
518 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
519 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
520 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
521 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
522 BPF_MOV64_IMM(BPF_REG_0, 0),
523 BPF_EXIT_INSN(),
525 .errstr_unpriv = "R1 pointer comparison",
526 .result_unpriv = REJECT,
527 .result = ACCEPT,
530 "jump test 2",
531 .insns = {
532 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
533 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
534 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
535 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
536 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
537 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
538 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
539 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
540 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
541 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
543 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
544 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
545 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
546 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
547 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
548 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
549 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
550 BPF_MOV64_IMM(BPF_REG_0, 0),
551 BPF_EXIT_INSN(),
553 .errstr_unpriv = "R1 pointer comparison",
554 .result_unpriv = REJECT,
555 .result = ACCEPT,
558 "jump test 3",
559 .insns = {
560 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
561 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
562 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
563 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
564 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
565 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
566 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
568 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
569 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
570 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
572 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
574 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
576 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
578 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
580 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
582 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
584 BPF_LD_MAP_FD(BPF_REG_1, 0),
585 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
586 BPF_EXIT_INSN(),
588 .fixup = {24},
589 .errstr_unpriv = "R1 pointer comparison",
590 .result_unpriv = REJECT,
591 .result = ACCEPT,
594 "jump test 4",
595 .insns = {
596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
601 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
605 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
606 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
607 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
608 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
611 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
612 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
613 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
614 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
615 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
616 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
623 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
624 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
625 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
626 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
627 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
628 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
629 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
630 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
631 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
632 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
633 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
634 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
635 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
636 BPF_MOV64_IMM(BPF_REG_0, 0),
637 BPF_EXIT_INSN(),
639 .errstr_unpriv = "R1 pointer comparison",
640 .result_unpriv = REJECT,
641 .result = ACCEPT,
644 "jump test 5",
645 .insns = {
646 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
647 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
648 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
649 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
650 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
651 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
652 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
653 BPF_MOV64_IMM(BPF_REG_0, 0),
654 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
655 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
656 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
657 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
658 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
659 BPF_MOV64_IMM(BPF_REG_0, 0),
660 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
661 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
662 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
663 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
664 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
665 BPF_MOV64_IMM(BPF_REG_0, 0),
666 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
667 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
668 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
669 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
670 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
671 BPF_MOV64_IMM(BPF_REG_0, 0),
672 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
673 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
674 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
675 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
676 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
677 BPF_MOV64_IMM(BPF_REG_0, 0),
678 BPF_EXIT_INSN(),
680 .errstr_unpriv = "R1 pointer comparison",
681 .result_unpriv = REJECT,
682 .result = ACCEPT,
685 "access skb fields ok",
686 .insns = {
687 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
688 offsetof(struct __sk_buff, len)),
689 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
690 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
691 offsetof(struct __sk_buff, mark)),
692 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
693 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
694 offsetof(struct __sk_buff, pkt_type)),
695 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
697 offsetof(struct __sk_buff, queue_mapping)),
698 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
699 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
700 offsetof(struct __sk_buff, protocol)),
701 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
702 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
703 offsetof(struct __sk_buff, vlan_present)),
704 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
705 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
706 offsetof(struct __sk_buff, vlan_tci)),
707 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
708 BPF_EXIT_INSN(),
710 .result = ACCEPT,
713 "access skb fields bad1",
714 .insns = {
715 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
716 BPF_EXIT_INSN(),
718 .errstr = "invalid bpf_context access",
719 .result = REJECT,
722 "access skb fields bad2",
723 .insns = {
724 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
725 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
726 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
728 BPF_LD_MAP_FD(BPF_REG_1, 0),
729 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
730 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
731 BPF_EXIT_INSN(),
732 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
733 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
734 offsetof(struct __sk_buff, pkt_type)),
735 BPF_EXIT_INSN(),
737 .fixup = {4},
738 .errstr = "different pointers",
739 .errstr_unpriv = "R1 pointer comparison",
740 .result = REJECT,
743 "access skb fields bad3",
744 .insns = {
745 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
746 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
747 offsetof(struct __sk_buff, pkt_type)),
748 BPF_EXIT_INSN(),
749 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
750 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
752 BPF_LD_MAP_FD(BPF_REG_1, 0),
753 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
754 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
755 BPF_EXIT_INSN(),
756 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
757 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
759 .fixup = {6},
760 .errstr = "different pointers",
761 .errstr_unpriv = "R1 pointer comparison",
762 .result = REJECT,
765 "access skb fields bad4",
766 .insns = {
767 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
768 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
769 offsetof(struct __sk_buff, len)),
770 BPF_MOV64_IMM(BPF_REG_0, 0),
771 BPF_EXIT_INSN(),
772 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
773 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
775 BPF_LD_MAP_FD(BPF_REG_1, 0),
776 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
777 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
778 BPF_EXIT_INSN(),
779 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
780 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
782 .fixup = {7},
783 .errstr = "different pointers",
784 .errstr_unpriv = "R1 pointer comparison",
785 .result = REJECT,
788 "check skb->mark is not writeable by sockets",
789 .insns = {
790 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
791 offsetof(struct __sk_buff, mark)),
792 BPF_EXIT_INSN(),
794 .errstr = "invalid bpf_context access",
795 .errstr_unpriv = "R1 leaks addr",
796 .result = REJECT,
799 "check skb->tc_index is not writeable by sockets",
800 .insns = {
801 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
802 offsetof(struct __sk_buff, tc_index)),
803 BPF_EXIT_INSN(),
805 .errstr = "invalid bpf_context access",
806 .errstr_unpriv = "R1 leaks addr",
807 .result = REJECT,
810 "check non-u32 access to cb",
811 .insns = {
812 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
813 offsetof(struct __sk_buff, cb[0])),
814 BPF_EXIT_INSN(),
816 .errstr = "invalid bpf_context access",
817 .errstr_unpriv = "R1 leaks addr",
818 .result = REJECT,
821 "check out of range skb->cb access",
822 .insns = {
823 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
824 offsetof(struct __sk_buff, cb[0]) + 256),
825 BPF_EXIT_INSN(),
827 .errstr = "invalid bpf_context access",
828 .errstr_unpriv = "",
829 .result = REJECT,
830 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
833 "write skb fields from socket prog",
834 .insns = {
835 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
836 offsetof(struct __sk_buff, cb[4])),
837 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
838 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
839 offsetof(struct __sk_buff, mark)),
840 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
841 offsetof(struct __sk_buff, tc_index)),
842 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
843 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
844 offsetof(struct __sk_buff, cb[0])),
845 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
846 offsetof(struct __sk_buff, cb[2])),
847 BPF_EXIT_INSN(),
849 .result = ACCEPT,
850 .errstr_unpriv = "R1 leaks addr",
851 .result_unpriv = REJECT,
854 "write skb fields from tc_cls_act prog",
855 .insns = {
856 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
857 offsetof(struct __sk_buff, cb[0])),
858 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
859 offsetof(struct __sk_buff, mark)),
860 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
861 offsetof(struct __sk_buff, tc_index)),
862 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
863 offsetof(struct __sk_buff, tc_index)),
864 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
865 offsetof(struct __sk_buff, cb[3])),
866 BPF_EXIT_INSN(),
868 .errstr_unpriv = "",
869 .result_unpriv = REJECT,
870 .result = ACCEPT,
871 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
874 "PTR_TO_STACK store/load",
875 .insns = {
876 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
878 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
879 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
880 BPF_EXIT_INSN(),
882 .result = ACCEPT,
885 "PTR_TO_STACK store/load - bad alignment on off",
886 .insns = {
887 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
889 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
890 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
891 BPF_EXIT_INSN(),
893 .result = REJECT,
894 .errstr = "misaligned access off -6 size 8",
897 "PTR_TO_STACK store/load - bad alignment on reg",
898 .insns = {
899 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
901 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
902 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
903 BPF_EXIT_INSN(),
905 .result = REJECT,
906 .errstr = "misaligned access off -2 size 8",
909 "PTR_TO_STACK store/load - out of bounds low",
910 .insns = {
911 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
913 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
914 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
915 BPF_EXIT_INSN(),
917 .result = REJECT,
918 .errstr = "invalid stack off=-79992 size=8",
921 "PTR_TO_STACK store/load - out of bounds high",
922 .insns = {
923 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
925 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
926 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
927 BPF_EXIT_INSN(),
929 .result = REJECT,
930 .errstr = "invalid stack off=0 size=8",
933 "unpriv: return pointer",
934 .insns = {
935 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
936 BPF_EXIT_INSN(),
938 .result = ACCEPT,
939 .result_unpriv = REJECT,
940 .errstr_unpriv = "R0 leaks addr",
943 "unpriv: add const to pointer",
944 .insns = {
945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
946 BPF_MOV64_IMM(BPF_REG_0, 0),
947 BPF_EXIT_INSN(),
949 .result = ACCEPT,
950 .result_unpriv = REJECT,
951 .errstr_unpriv = "R1 pointer arithmetic",
954 "unpriv: add pointer to pointer",
955 .insns = {
956 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
957 BPF_MOV64_IMM(BPF_REG_0, 0),
958 BPF_EXIT_INSN(),
960 .result = ACCEPT,
961 .result_unpriv = REJECT,
962 .errstr_unpriv = "R1 pointer arithmetic",
965 "unpriv: neg pointer",
966 .insns = {
967 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
968 BPF_MOV64_IMM(BPF_REG_0, 0),
969 BPF_EXIT_INSN(),
971 .result = ACCEPT,
972 .result_unpriv = REJECT,
973 .errstr_unpriv = "R1 pointer arithmetic",
976 "unpriv: cmp pointer with const",
977 .insns = {
978 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
979 BPF_MOV64_IMM(BPF_REG_0, 0),
980 BPF_EXIT_INSN(),
982 .result = ACCEPT,
983 .result_unpriv = REJECT,
984 .errstr_unpriv = "R1 pointer comparison",
987 "unpriv: cmp pointer with pointer",
988 .insns = {
989 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
990 BPF_MOV64_IMM(BPF_REG_0, 0),
991 BPF_EXIT_INSN(),
993 .result = ACCEPT,
994 .result_unpriv = REJECT,
995 .errstr_unpriv = "R10 pointer comparison",
998 "unpriv: check that printk is disallowed",
999 .insns = {
1000 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1001 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1003 BPF_MOV64_IMM(BPF_REG_2, 8),
1004 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
1006 BPF_MOV64_IMM(BPF_REG_0, 0),
1007 BPF_EXIT_INSN(),
1009 .errstr_unpriv = "unknown func 6",
1010 .result_unpriv = REJECT,
1011 .result = ACCEPT,
1014 "unpriv: pass pointer to helper function",
1015 .insns = {
1016 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1017 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1019 BPF_LD_MAP_FD(BPF_REG_1, 0),
1020 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1021 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1022 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
1023 BPF_MOV64_IMM(BPF_REG_0, 0),
1024 BPF_EXIT_INSN(),
1026 .fixup = {3},
1027 .errstr_unpriv = "R4 leaks addr",
1028 .result_unpriv = REJECT,
1029 .result = ACCEPT,
1032 "unpriv: indirectly pass pointer on stack to helper function",
1033 .insns = {
1034 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1035 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1037 BPF_LD_MAP_FD(BPF_REG_1, 0),
1038 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1039 BPF_MOV64_IMM(BPF_REG_0, 0),
1040 BPF_EXIT_INSN(),
1042 .fixup = {3},
1043 .errstr = "invalid indirect read from stack off -8+0 size 8",
1044 .result = REJECT,
1047 "unpriv: mangle pointer on stack 1",
1048 .insns = {
1049 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1050 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1051 BPF_MOV64_IMM(BPF_REG_0, 0),
1052 BPF_EXIT_INSN(),
1054 .errstr_unpriv = "attempt to corrupt spilled",
1055 .result_unpriv = REJECT,
1056 .result = ACCEPT,
1059 "unpriv: mangle pointer on stack 2",
1060 .insns = {
1061 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1062 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1063 BPF_MOV64_IMM(BPF_REG_0, 0),
1064 BPF_EXIT_INSN(),
1066 .errstr_unpriv = "attempt to corrupt spilled",
1067 .result_unpriv = REJECT,
1068 .result = ACCEPT,
1071 "unpriv: read pointer from stack in small chunks",
1072 .insns = {
1073 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1074 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1075 BPF_MOV64_IMM(BPF_REG_0, 0),
1076 BPF_EXIT_INSN(),
1078 .errstr = "invalid size",
1079 .result = REJECT,
1082 "unpriv: write pointer into ctx",
1083 .insns = {
1084 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1085 BPF_MOV64_IMM(BPF_REG_0, 0),
1086 BPF_EXIT_INSN(),
1088 .errstr_unpriv = "R1 leaks addr",
1089 .result_unpriv = REJECT,
1090 .errstr = "invalid bpf_context access",
1091 .result = REJECT,
1094 "unpriv: write pointer into map elem value",
1095 .insns = {
1096 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1097 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1099 BPF_LD_MAP_FD(BPF_REG_1, 0),
1100 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1101 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1102 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1103 BPF_EXIT_INSN(),
1105 .fixup = {3},
1106 .errstr_unpriv = "R0 leaks addr",
1107 .result_unpriv = REJECT,
1108 .result = ACCEPT,
1111 "unpriv: partial copy of pointer",
1112 .insns = {
1113 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1114 BPF_MOV64_IMM(BPF_REG_0, 0),
1115 BPF_EXIT_INSN(),
1117 .errstr_unpriv = "R10 partial copy",
1118 .result_unpriv = REJECT,
1119 .result = ACCEPT,
1122 "unpriv: pass pointer to tail_call",
1123 .insns = {
1124 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1125 BPF_LD_MAP_FD(BPF_REG_2, 0),
1126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
1127 BPF_MOV64_IMM(BPF_REG_0, 0),
1128 BPF_EXIT_INSN(),
1130 .prog_array_fixup = {1},
1131 .errstr_unpriv = "R3 leaks addr into helper",
1132 .result_unpriv = REJECT,
1133 .result = ACCEPT,
1136 "unpriv: cmp map pointer with zero",
1137 .insns = {
1138 BPF_MOV64_IMM(BPF_REG_1, 0),
1139 BPF_LD_MAP_FD(BPF_REG_1, 0),
1140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1141 BPF_MOV64_IMM(BPF_REG_0, 0),
1142 BPF_EXIT_INSN(),
1144 .fixup = {1},
1145 .errstr_unpriv = "R1 pointer comparison",
1146 .result_unpriv = REJECT,
1147 .result = ACCEPT,
1150 "unpriv: write into frame pointer",
1151 .insns = {
1152 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1153 BPF_MOV64_IMM(BPF_REG_0, 0),
1154 BPF_EXIT_INSN(),
1156 .errstr = "frame pointer is read only",
1157 .result = REJECT,
1160 "unpriv: cmp of frame pointer",
1161 .insns = {
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1163 BPF_MOV64_IMM(BPF_REG_0, 0),
1164 BPF_EXIT_INSN(),
1166 .errstr_unpriv = "R10 pointer comparison",
1167 .result_unpriv = REJECT,
1168 .result = ACCEPT,
1171 "unpriv: cmp of stack pointer",
1172 .insns = {
1173 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1175 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1176 BPF_MOV64_IMM(BPF_REG_0, 0),
1177 BPF_EXIT_INSN(),
1179 .errstr_unpriv = "R2 pointer comparison",
1180 .result_unpriv = REJECT,
1181 .result = ACCEPT,
1184 "unpriv: obfuscate stack pointer",
1185 .insns = {
1186 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1187 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1189 BPF_MOV64_IMM(BPF_REG_0, 0),
1190 BPF_EXIT_INSN(),
1192 .errstr_unpriv = "R2 pointer arithmetic",
1193 .result_unpriv = REJECT,
1194 .result = ACCEPT,
1197 "raw_stack: no skb_load_bytes",
1198 .insns = {
1199 BPF_MOV64_IMM(BPF_REG_2, 4),
1200 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1201 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1202 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1203 BPF_MOV64_IMM(BPF_REG_4, 8),
1204 /* Call to skb_load_bytes() omitted. */
1205 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1206 BPF_EXIT_INSN(),
1208 .result = REJECT,
1209 .errstr = "invalid read from stack off -8+0 size 8",
1210 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1213 "raw_stack: skb_load_bytes, no init",
1214 .insns = {
1215 BPF_MOV64_IMM(BPF_REG_2, 4),
1216 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1218 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1219 BPF_MOV64_IMM(BPF_REG_4, 8),
1220 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1221 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1222 BPF_EXIT_INSN(),
1224 .result = ACCEPT,
1225 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1228 "raw_stack: skb_load_bytes, init",
1229 .insns = {
1230 BPF_MOV64_IMM(BPF_REG_2, 4),
1231 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1233 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
1234 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1235 BPF_MOV64_IMM(BPF_REG_4, 8),
1236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1237 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1238 BPF_EXIT_INSN(),
1240 .result = ACCEPT,
1241 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1244 "raw_stack: skb_load_bytes, spilled regs around bounds",
1245 .insns = {
1246 BPF_MOV64_IMM(BPF_REG_2, 4),
1247 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
1249 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
1250 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */
1251 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1252 BPF_MOV64_IMM(BPF_REG_4, 8),
1253 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1254 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
1255 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */
1256 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1257 offsetof(struct __sk_buff, mark)),
1258 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1259 offsetof(struct __sk_buff, priority)),
1260 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1261 BPF_EXIT_INSN(),
1263 .result = ACCEPT,
1264 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1267 "raw_stack: skb_load_bytes, spilled regs corruption",
1268 .insns = {
1269 BPF_MOV64_IMM(BPF_REG_2, 4),
1270 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1272 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
1273 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1274 BPF_MOV64_IMM(BPF_REG_4, 8),
1275 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1276 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), /* fill ctx into R0 */
1277 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1278 offsetof(struct __sk_buff, mark)),
1279 BPF_EXIT_INSN(),
1281 .result = REJECT,
1282 .errstr = "R0 invalid mem access 'inv'",
1283 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1286 "raw_stack: skb_load_bytes, spilled regs corruption 2",
1287 .insns = {
1288 BPF_MOV64_IMM(BPF_REG_2, 4),
1289 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
1291 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
1292 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
1293 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */
1294 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1295 BPF_MOV64_IMM(BPF_REG_4, 8),
1296 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1297 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
1298 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */
1299 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill ctx into R3 */
1300 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1301 offsetof(struct __sk_buff, mark)),
1302 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1303 offsetof(struct __sk_buff, priority)),
1304 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1305 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
1306 offsetof(struct __sk_buff, pkt_type)),
1307 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
1308 BPF_EXIT_INSN(),
1310 .result = REJECT,
1311 .errstr = "R3 invalid mem access 'inv'",
1312 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1315 "raw_stack: skb_load_bytes, spilled regs + data",
1316 .insns = {
1317 BPF_MOV64_IMM(BPF_REG_2, 4),
1318 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
1320 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
1321 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
1322 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */
1323 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1324 BPF_MOV64_IMM(BPF_REG_4, 8),
1325 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1326 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
1327 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */
1328 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill data into R3 */
1329 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1330 offsetof(struct __sk_buff, mark)),
1331 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1332 offsetof(struct __sk_buff, priority)),
1333 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1334 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
1335 BPF_EXIT_INSN(),
1337 .result = ACCEPT,
1338 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1341 "raw_stack: skb_load_bytes, invalid access 1",
1342 .insns = {
1343 BPF_MOV64_IMM(BPF_REG_2, 4),
1344 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
1346 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1347 BPF_MOV64_IMM(BPF_REG_4, 8),
1348 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1349 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1350 BPF_EXIT_INSN(),
1352 .result = REJECT,
1353 .errstr = "invalid stack type R3 off=-513 access_size=8",
1354 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1357 "raw_stack: skb_load_bytes, invalid access 2",
1358 .insns = {
1359 BPF_MOV64_IMM(BPF_REG_2, 4),
1360 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
1362 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1363 BPF_MOV64_IMM(BPF_REG_4, 8),
1364 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1365 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1366 BPF_EXIT_INSN(),
1368 .result = REJECT,
1369 .errstr = "invalid stack type R3 off=-1 access_size=8",
1370 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1373 "raw_stack: skb_load_bytes, invalid access 3",
1374 .insns = {
1375 BPF_MOV64_IMM(BPF_REG_2, 4),
1376 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
1378 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1379 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
1380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1381 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1382 BPF_EXIT_INSN(),
1384 .result = REJECT,
1385 .errstr = "invalid stack type R3 off=-1 access_size=-1",
1386 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1389 "raw_stack: skb_load_bytes, invalid access 4",
1390 .insns = {
1391 BPF_MOV64_IMM(BPF_REG_2, 4),
1392 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
1394 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1395 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
1396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1397 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1398 BPF_EXIT_INSN(),
1400 .result = REJECT,
1401 .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
1402 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1405 "raw_stack: skb_load_bytes, invalid access 5",
1406 .insns = {
1407 BPF_MOV64_IMM(BPF_REG_2, 4),
1408 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
1410 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1411 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
1412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1413 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1414 BPF_EXIT_INSN(),
1416 .result = REJECT,
1417 .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
1418 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1421 "raw_stack: skb_load_bytes, invalid access 6",
1422 .insns = {
1423 BPF_MOV64_IMM(BPF_REG_2, 4),
1424 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
1426 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1427 BPF_MOV64_IMM(BPF_REG_4, 0),
1428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1429 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1430 BPF_EXIT_INSN(),
1432 .result = REJECT,
1433 .errstr = "invalid stack type R3 off=-512 access_size=0",
1434 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1437 "raw_stack: skb_load_bytes, large access",
1438 .insns = {
1439 BPF_MOV64_IMM(BPF_REG_2, 4),
1440 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1441 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
1442 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1443 BPF_MOV64_IMM(BPF_REG_4, 512),
1444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
1445 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1446 BPF_EXIT_INSN(),
1448 .result = ACCEPT,
1449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1452 "pkt: test1",
1453 .insns = {
1454 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1455 offsetof(struct __sk_buff, data)),
1456 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1457 offsetof(struct __sk_buff, data_end)),
1458 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1460 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1461 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1462 BPF_MOV64_IMM(BPF_REG_0, 0),
1463 BPF_EXIT_INSN(),
1465 .result = ACCEPT,
1466 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1469 "pkt: test2",
1470 .insns = {
1471 BPF_MOV64_IMM(BPF_REG_0, 1),
1472 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
1473 offsetof(struct __sk_buff, data_end)),
1474 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1475 offsetof(struct __sk_buff, data)),
1476 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
1477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
1478 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
1479 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
1480 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
1481 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
1482 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1483 offsetof(struct __sk_buff, data)),
1484 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
1485 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
1486 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
1487 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
1488 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
1489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
1490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
1491 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1492 offsetof(struct __sk_buff, data_end)),
1493 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
1494 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
1495 BPF_MOV64_IMM(BPF_REG_0, 0),
1496 BPF_EXIT_INSN(),
1498 .result = ACCEPT,
1499 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1502 "pkt: test3",
1503 .insns = {
1504 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1505 offsetof(struct __sk_buff, data)),
1506 BPF_MOV64_IMM(BPF_REG_0, 0),
1507 BPF_EXIT_INSN(),
1509 .errstr = "invalid bpf_context access off=76",
1510 .result = REJECT,
1511 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1514 "pkt: test4",
1515 .insns = {
1516 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1517 offsetof(struct __sk_buff, data)),
1518 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1519 offsetof(struct __sk_buff, data_end)),
1520 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1522 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1523 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1524 BPF_MOV64_IMM(BPF_REG_0, 0),
1525 BPF_EXIT_INSN(),
1527 .errstr = "cannot write",
1528 .result = REJECT,
1529 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1533 static int probe_filter_length(struct bpf_insn *fp)
1535 int len = 0;
1537 for (len = MAX_INSNS - 1; len > 0; --len)
1538 if (fp[len].code != 0 || fp[len].imm != 0)
1539 break;
1541 return len + 1;
1544 static int create_map(void)
1546 int map_fd;
1548 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
1549 sizeof(long long), sizeof(long long), 1024, 0);
1550 if (map_fd < 0)
1551 printf("failed to create map '%s'\n", strerror(errno));
1553 return map_fd;
1556 static int create_prog_array(void)
1558 int map_fd;
1560 map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY,
1561 sizeof(int), sizeof(int), 4, 0);
1562 if (map_fd < 0)
1563 printf("failed to create prog_array '%s'\n", strerror(errno));
1565 return map_fd;
1568 static int test(void)
1570 int prog_fd, i, pass_cnt = 0, err_cnt = 0;
1571 bool unpriv = geteuid() != 0;
1573 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1574 struct bpf_insn *prog = tests[i].insns;
1575 int prog_type = tests[i].prog_type;
1576 int prog_len = probe_filter_length(prog);
1577 int *fixup = tests[i].fixup;
1578 int *prog_array_fixup = tests[i].prog_array_fixup;
1579 int expected_result;
1580 const char *expected_errstr;
1581 int map_fd = -1, prog_array_fd = -1;
1583 if (*fixup) {
1584 map_fd = create_map();
1586 do {
1587 prog[*fixup].imm = map_fd;
1588 fixup++;
1589 } while (*fixup);
1591 if (*prog_array_fixup) {
1592 prog_array_fd = create_prog_array();
1594 do {
1595 prog[*prog_array_fixup].imm = prog_array_fd;
1596 prog_array_fixup++;
1597 } while (*prog_array_fixup);
1599 printf("#%d %s ", i, tests[i].descr);
1601 prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
1602 prog, prog_len * sizeof(struct bpf_insn),
1603 "GPL", 0);
1605 if (unpriv && tests[i].result_unpriv != UNDEF)
1606 expected_result = tests[i].result_unpriv;
1607 else
1608 expected_result = tests[i].result;
1610 if (unpriv && tests[i].errstr_unpriv)
1611 expected_errstr = tests[i].errstr_unpriv;
1612 else
1613 expected_errstr = tests[i].errstr;
1615 if (expected_result == ACCEPT) {
1616 if (prog_fd < 0) {
1617 printf("FAIL\nfailed to load prog '%s'\n",
1618 strerror(errno));
1619 printf("%s", bpf_log_buf);
1620 err_cnt++;
1621 goto fail;
1623 } else {
1624 if (prog_fd >= 0) {
1625 printf("FAIL\nunexpected success to load\n");
1626 printf("%s", bpf_log_buf);
1627 err_cnt++;
1628 goto fail;
1630 if (strstr(bpf_log_buf, expected_errstr) == 0) {
1631 printf("FAIL\nunexpected error message: %s",
1632 bpf_log_buf);
1633 err_cnt++;
1634 goto fail;
1638 pass_cnt++;
1639 printf("OK\n");
1640 fail:
1641 if (map_fd >= 0)
1642 close(map_fd);
1643 if (prog_array_fd >= 0)
1644 close(prog_array_fd);
1645 close(prog_fd);
1648 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
1650 return 0;
1653 int main(void)
1655 struct rlimit r = {1 << 20, 1 << 20};
1657 setrlimit(RLIMIT_MEMLOCK, &r);
1658 return test();