ethtool.h: define INT_MAX for userland
[linux/fpc-iii.git] / samples / bpf / test_verifier.c
blob563c507c0a09f3868827ce6ce1e0e88669803016
1 /*
2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10 #include <stdio.h>
11 #include <unistd.h>
12 #include <linux/bpf.h>
13 #include <errno.h>
14 #include <linux/unistd.h>
15 #include <string.h>
16 #include <linux/filter.h>
17 #include <stddef.h>
18 #include <stdbool.h>
19 #include <sys/resource.h>
20 #include "libbpf.h"
22 #define MAX_INSNS 512
23 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
25 #define MAX_FIXUPS 8
27 struct bpf_test {
28 const char *descr;
29 struct bpf_insn insns[MAX_INSNS];
30 int fixup[MAX_FIXUPS];
31 int prog_array_fixup[MAX_FIXUPS];
32 const char *errstr;
33 const char *errstr_unpriv;
34 enum {
35 UNDEF,
36 ACCEPT,
37 REJECT
38 } result, result_unpriv;
39 enum bpf_prog_type prog_type;
42 static struct bpf_test tests[] = {
44 "add+sub+mul",
45 .insns = {
46 BPF_MOV64_IMM(BPF_REG_1, 1),
47 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
48 BPF_MOV64_IMM(BPF_REG_2, 3),
49 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
50 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
51 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
52 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
53 BPF_EXIT_INSN(),
55 .result = ACCEPT,
58 "unreachable",
59 .insns = {
60 BPF_EXIT_INSN(),
61 BPF_EXIT_INSN(),
63 .errstr = "unreachable",
64 .result = REJECT,
67 "unreachable2",
68 .insns = {
69 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
70 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
71 BPF_EXIT_INSN(),
73 .errstr = "unreachable",
74 .result = REJECT,
77 "out of range jump",
78 .insns = {
79 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
80 BPF_EXIT_INSN(),
82 .errstr = "jump out of range",
83 .result = REJECT,
86 "out of range jump2",
87 .insns = {
88 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
89 BPF_EXIT_INSN(),
91 .errstr = "jump out of range",
92 .result = REJECT,
95 "test1 ld_imm64",
96 .insns = {
97 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
98 BPF_LD_IMM64(BPF_REG_0, 0),
99 BPF_LD_IMM64(BPF_REG_0, 0),
100 BPF_LD_IMM64(BPF_REG_0, 1),
101 BPF_LD_IMM64(BPF_REG_0, 1),
102 BPF_MOV64_IMM(BPF_REG_0, 2),
103 BPF_EXIT_INSN(),
105 .errstr = "invalid BPF_LD_IMM insn",
106 .errstr_unpriv = "R1 pointer comparison",
107 .result = REJECT,
110 "test2 ld_imm64",
111 .insns = {
112 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
113 BPF_LD_IMM64(BPF_REG_0, 0),
114 BPF_LD_IMM64(BPF_REG_0, 0),
115 BPF_LD_IMM64(BPF_REG_0, 1),
116 BPF_LD_IMM64(BPF_REG_0, 1),
117 BPF_EXIT_INSN(),
119 .errstr = "invalid BPF_LD_IMM insn",
120 .errstr_unpriv = "R1 pointer comparison",
121 .result = REJECT,
124 "test3 ld_imm64",
125 .insns = {
126 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
127 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
128 BPF_LD_IMM64(BPF_REG_0, 0),
129 BPF_LD_IMM64(BPF_REG_0, 0),
130 BPF_LD_IMM64(BPF_REG_0, 1),
131 BPF_LD_IMM64(BPF_REG_0, 1),
132 BPF_EXIT_INSN(),
134 .errstr = "invalid bpf_ld_imm64 insn",
135 .result = REJECT,
138 "test4 ld_imm64",
139 .insns = {
140 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
141 BPF_EXIT_INSN(),
143 .errstr = "invalid bpf_ld_imm64 insn",
144 .result = REJECT,
147 "test5 ld_imm64",
148 .insns = {
149 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
151 .errstr = "invalid bpf_ld_imm64 insn",
152 .result = REJECT,
155 "no bpf_exit",
156 .insns = {
157 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
159 .errstr = "jump out of range",
160 .result = REJECT,
163 "loop (back-edge)",
164 .insns = {
165 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
166 BPF_EXIT_INSN(),
168 .errstr = "back-edge",
169 .result = REJECT,
172 "loop2 (back-edge)",
173 .insns = {
174 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
175 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
176 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
177 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
178 BPF_EXIT_INSN(),
180 .errstr = "back-edge",
181 .result = REJECT,
184 "conditional loop",
185 .insns = {
186 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
188 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
189 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
190 BPF_EXIT_INSN(),
192 .errstr = "back-edge",
193 .result = REJECT,
196 "read uninitialized register",
197 .insns = {
198 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
199 BPF_EXIT_INSN(),
201 .errstr = "R2 !read_ok",
202 .result = REJECT,
205 "read invalid register",
206 .insns = {
207 BPF_MOV64_REG(BPF_REG_0, -1),
208 BPF_EXIT_INSN(),
210 .errstr = "R15 is invalid",
211 .result = REJECT,
214 "program doesn't init R0 before exit",
215 .insns = {
216 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
217 BPF_EXIT_INSN(),
219 .errstr = "R0 !read_ok",
220 .result = REJECT,
223 "program doesn't init R0 before exit in all branches",
224 .insns = {
225 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
226 BPF_MOV64_IMM(BPF_REG_0, 1),
227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
228 BPF_EXIT_INSN(),
230 .errstr = "R0 !read_ok",
231 .errstr_unpriv = "R1 pointer comparison",
232 .result = REJECT,
235 "stack out of bounds",
236 .insns = {
237 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
238 BPF_EXIT_INSN(),
240 .errstr = "invalid stack",
241 .result = REJECT,
244 "invalid call insn1",
245 .insns = {
246 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
247 BPF_EXIT_INSN(),
249 .errstr = "BPF_CALL uses reserved",
250 .result = REJECT,
253 "invalid call insn2",
254 .insns = {
255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
256 BPF_EXIT_INSN(),
258 .errstr = "BPF_CALL uses reserved",
259 .result = REJECT,
262 "invalid function call",
263 .insns = {
264 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
265 BPF_EXIT_INSN(),
267 .errstr = "invalid func 1234567",
268 .result = REJECT,
271 "uninitialized stack1",
272 .insns = {
273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
275 BPF_LD_MAP_FD(BPF_REG_1, 0),
276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
277 BPF_EXIT_INSN(),
279 .fixup = {2},
280 .errstr = "invalid indirect read from stack",
281 .result = REJECT,
284 "uninitialized stack2",
285 .insns = {
286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
287 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
288 BPF_EXIT_INSN(),
290 .errstr = "invalid read from stack",
291 .result = REJECT,
294 "check valid spill/fill",
295 .insns = {
296 /* spill R1(ctx) into stack */
297 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
299 /* fill it back into R2 */
300 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
302 /* should be able to access R0 = *(R2 + 8) */
303 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
304 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
305 BPF_EXIT_INSN(),
307 .errstr_unpriv = "R0 leaks addr",
308 .result = ACCEPT,
309 .result_unpriv = REJECT,
312 "check corrupted spill/fill",
313 .insns = {
314 /* spill R1(ctx) into stack */
315 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
317 /* mess up with R1 pointer on stack */
318 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
320 /* fill back into R0 should fail */
321 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
323 BPF_EXIT_INSN(),
325 .errstr_unpriv = "attempt to corrupt spilled",
326 .errstr = "corrupted spill",
327 .result = REJECT,
330 "invalid src register in STX",
331 .insns = {
332 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
333 BPF_EXIT_INSN(),
335 .errstr = "R15 is invalid",
336 .result = REJECT,
339 "invalid dst register in STX",
340 .insns = {
341 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
342 BPF_EXIT_INSN(),
344 .errstr = "R14 is invalid",
345 .result = REJECT,
348 "invalid dst register in ST",
349 .insns = {
350 BPF_ST_MEM(BPF_B, 14, -1, -1),
351 BPF_EXIT_INSN(),
353 .errstr = "R14 is invalid",
354 .result = REJECT,
357 "invalid src register in LDX",
358 .insns = {
359 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
360 BPF_EXIT_INSN(),
362 .errstr = "R12 is invalid",
363 .result = REJECT,
366 "invalid dst register in LDX",
367 .insns = {
368 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
369 BPF_EXIT_INSN(),
371 .errstr = "R11 is invalid",
372 .result = REJECT,
375 "junk insn",
376 .insns = {
377 BPF_RAW_INSN(0, 0, 0, 0, 0),
378 BPF_EXIT_INSN(),
380 .errstr = "invalid BPF_LD_IMM",
381 .result = REJECT,
384 "junk insn2",
385 .insns = {
386 BPF_RAW_INSN(1, 0, 0, 0, 0),
387 BPF_EXIT_INSN(),
389 .errstr = "BPF_LDX uses reserved fields",
390 .result = REJECT,
393 "junk insn3",
394 .insns = {
395 BPF_RAW_INSN(-1, 0, 0, 0, 0),
396 BPF_EXIT_INSN(),
398 .errstr = "invalid BPF_ALU opcode f0",
399 .result = REJECT,
402 "junk insn4",
403 .insns = {
404 BPF_RAW_INSN(-1, -1, -1, -1, -1),
405 BPF_EXIT_INSN(),
407 .errstr = "invalid BPF_ALU opcode f0",
408 .result = REJECT,
411 "junk insn5",
412 .insns = {
413 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
414 BPF_EXIT_INSN(),
416 .errstr = "BPF_ALU uses reserved fields",
417 .result = REJECT,
420 "misaligned read from stack",
421 .insns = {
422 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
423 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
424 BPF_EXIT_INSN(),
426 .errstr = "misaligned access",
427 .result = REJECT,
430 "invalid map_fd for function call",
431 .insns = {
432 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
433 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
435 BPF_LD_MAP_FD(BPF_REG_1, 0),
436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
437 BPF_EXIT_INSN(),
439 .errstr = "fd 0 is not pointing to valid bpf_map",
440 .result = REJECT,
443 "don't check return value before access",
444 .insns = {
445 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
446 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
448 BPF_LD_MAP_FD(BPF_REG_1, 0),
449 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
450 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
451 BPF_EXIT_INSN(),
453 .fixup = {3},
454 .errstr = "R0 invalid mem access 'map_value_or_null'",
455 .result = REJECT,
458 "access memory with incorrect alignment",
459 .insns = {
460 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
461 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
463 BPF_LD_MAP_FD(BPF_REG_1, 0),
464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
466 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
467 BPF_EXIT_INSN(),
469 .fixup = {3},
470 .errstr = "misaligned access",
471 .result = REJECT,
474 "sometimes access memory with incorrect alignment",
475 .insns = {
476 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
479 BPF_LD_MAP_FD(BPF_REG_1, 0),
480 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
481 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
482 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
483 BPF_EXIT_INSN(),
484 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
485 BPF_EXIT_INSN(),
487 .fixup = {3},
488 .errstr = "R0 invalid mem access",
489 .errstr_unpriv = "R0 leaks addr",
490 .result = REJECT,
493 "jump test 1",
494 .insns = {
495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
496 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
497 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
498 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
500 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
501 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
502 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
503 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
504 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
505 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
506 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
508 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
509 BPF_MOV64_IMM(BPF_REG_0, 0),
510 BPF_EXIT_INSN(),
512 .errstr_unpriv = "R1 pointer comparison",
513 .result_unpriv = REJECT,
514 .result = ACCEPT,
517 "jump test 2",
518 .insns = {
519 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
520 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
521 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
522 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
523 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
524 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
525 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
526 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
527 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
528 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
529 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
530 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
531 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
533 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
534 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
535 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
536 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
537 BPF_MOV64_IMM(BPF_REG_0, 0),
538 BPF_EXIT_INSN(),
540 .errstr_unpriv = "R1 pointer comparison",
541 .result_unpriv = REJECT,
542 .result = ACCEPT,
545 "jump test 3",
546 .insns = {
547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
548 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
549 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
551 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
552 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
553 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
555 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
556 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
557 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
559 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
561 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
563 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
565 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
567 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
569 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
571 BPF_LD_MAP_FD(BPF_REG_1, 0),
572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
573 BPF_EXIT_INSN(),
575 .fixup = {24},
576 .errstr_unpriv = "R1 pointer comparison",
577 .result_unpriv = REJECT,
578 .result = ACCEPT,
581 "jump test 4",
582 .insns = {
583 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
589 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
590 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
591 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
592 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
601 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
605 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
606 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
607 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
608 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
611 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
612 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
613 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
614 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
615 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
616 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
623 BPF_MOV64_IMM(BPF_REG_0, 0),
624 BPF_EXIT_INSN(),
626 .errstr_unpriv = "R1 pointer comparison",
627 .result_unpriv = REJECT,
628 .result = ACCEPT,
631 "jump test 5",
632 .insns = {
633 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
634 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
635 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
636 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
637 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
638 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
639 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
640 BPF_MOV64_IMM(BPF_REG_0, 0),
641 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
642 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
643 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
644 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
645 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
646 BPF_MOV64_IMM(BPF_REG_0, 0),
647 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
648 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
649 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
650 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
651 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
652 BPF_MOV64_IMM(BPF_REG_0, 0),
653 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
654 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
655 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
656 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
657 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
658 BPF_MOV64_IMM(BPF_REG_0, 0),
659 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
660 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
661 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
662 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
663 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
664 BPF_MOV64_IMM(BPF_REG_0, 0),
665 BPF_EXIT_INSN(),
667 .errstr_unpriv = "R1 pointer comparison",
668 .result_unpriv = REJECT,
669 .result = ACCEPT,
672 "access skb fields ok",
673 .insns = {
674 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
675 offsetof(struct __sk_buff, len)),
676 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
677 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
678 offsetof(struct __sk_buff, mark)),
679 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
680 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
681 offsetof(struct __sk_buff, pkt_type)),
682 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
683 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
684 offsetof(struct __sk_buff, queue_mapping)),
685 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
686 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
687 offsetof(struct __sk_buff, protocol)),
688 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
689 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
690 offsetof(struct __sk_buff, vlan_present)),
691 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
692 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
693 offsetof(struct __sk_buff, vlan_tci)),
694 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
695 BPF_EXIT_INSN(),
697 .result = ACCEPT,
700 "access skb fields bad1",
701 .insns = {
702 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
703 BPF_EXIT_INSN(),
705 .errstr = "invalid bpf_context access",
706 .result = REJECT,
709 "access skb fields bad2",
710 .insns = {
711 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
712 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
713 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
715 BPF_LD_MAP_FD(BPF_REG_1, 0),
716 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
717 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
718 BPF_EXIT_INSN(),
719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
720 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
721 offsetof(struct __sk_buff, pkt_type)),
722 BPF_EXIT_INSN(),
724 .fixup = {4},
725 .errstr = "different pointers",
726 .errstr_unpriv = "R1 pointer comparison",
727 .result = REJECT,
730 "access skb fields bad3",
731 .insns = {
732 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
733 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
734 offsetof(struct __sk_buff, pkt_type)),
735 BPF_EXIT_INSN(),
736 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
737 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
739 BPF_LD_MAP_FD(BPF_REG_1, 0),
740 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
741 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
742 BPF_EXIT_INSN(),
743 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
744 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
746 .fixup = {6},
747 .errstr = "different pointers",
748 .errstr_unpriv = "R1 pointer comparison",
749 .result = REJECT,
752 "access skb fields bad4",
753 .insns = {
754 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
755 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
756 offsetof(struct __sk_buff, len)),
757 BPF_MOV64_IMM(BPF_REG_0, 0),
758 BPF_EXIT_INSN(),
759 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
760 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
762 BPF_LD_MAP_FD(BPF_REG_1, 0),
763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
764 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
765 BPF_EXIT_INSN(),
766 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
767 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
769 .fixup = {7},
770 .errstr = "different pointers",
771 .errstr_unpriv = "R1 pointer comparison",
772 .result = REJECT,
775 "check skb->mark is not writeable by sockets",
776 .insns = {
777 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
778 offsetof(struct __sk_buff, mark)),
779 BPF_EXIT_INSN(),
781 .errstr = "invalid bpf_context access",
782 .errstr_unpriv = "R1 leaks addr",
783 .result = REJECT,
786 "check skb->tc_index is not writeable by sockets",
787 .insns = {
788 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
789 offsetof(struct __sk_buff, tc_index)),
790 BPF_EXIT_INSN(),
792 .errstr = "invalid bpf_context access",
793 .errstr_unpriv = "R1 leaks addr",
794 .result = REJECT,
797 "check non-u32 access to cb",
798 .insns = {
799 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
800 offsetof(struct __sk_buff, cb[0])),
801 BPF_EXIT_INSN(),
803 .errstr = "invalid bpf_context access",
804 .errstr_unpriv = "R1 leaks addr",
805 .result = REJECT,
808 "check out of range skb->cb access",
809 .insns = {
810 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
811 offsetof(struct __sk_buff, cb[0]) + 256),
812 BPF_EXIT_INSN(),
814 .errstr = "invalid bpf_context access",
815 .errstr_unpriv = "",
816 .result = REJECT,
817 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
820 "write skb fields from socket prog",
821 .insns = {
822 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
823 offsetof(struct __sk_buff, cb[4])),
824 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
825 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
826 offsetof(struct __sk_buff, mark)),
827 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
828 offsetof(struct __sk_buff, tc_index)),
829 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
830 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
831 offsetof(struct __sk_buff, cb[0])),
832 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
833 offsetof(struct __sk_buff, cb[2])),
834 BPF_EXIT_INSN(),
836 .result = ACCEPT,
837 .errstr_unpriv = "R1 leaks addr",
838 .result_unpriv = REJECT,
841 "write skb fields from tc_cls_act prog",
842 .insns = {
843 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
844 offsetof(struct __sk_buff, cb[0])),
845 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
846 offsetof(struct __sk_buff, mark)),
847 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
848 offsetof(struct __sk_buff, tc_index)),
849 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
850 offsetof(struct __sk_buff, tc_index)),
851 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
852 offsetof(struct __sk_buff, cb[3])),
853 BPF_EXIT_INSN(),
855 .errstr_unpriv = "",
856 .result_unpriv = REJECT,
857 .result = ACCEPT,
858 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
861 "PTR_TO_STACK store/load",
862 .insns = {
863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
865 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
866 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
867 BPF_EXIT_INSN(),
869 .result = ACCEPT,
872 "PTR_TO_STACK store/load - bad alignment on off",
873 .insns = {
874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
876 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
877 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
878 BPF_EXIT_INSN(),
880 .result = REJECT,
881 .errstr = "misaligned access off -6 size 8",
884 "PTR_TO_STACK store/load - bad alignment on reg",
885 .insns = {
886 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
888 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
889 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
890 BPF_EXIT_INSN(),
892 .result = REJECT,
893 .errstr = "misaligned access off -2 size 8",
896 "PTR_TO_STACK store/load - out of bounds low",
897 .insns = {
898 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
900 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
901 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
902 BPF_EXIT_INSN(),
904 .result = REJECT,
905 .errstr = "invalid stack off=-79992 size=8",
908 "PTR_TO_STACK store/load - out of bounds high",
909 .insns = {
910 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
912 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
913 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
914 BPF_EXIT_INSN(),
916 .result = REJECT,
917 .errstr = "invalid stack off=0 size=8",
920 "unpriv: return pointer",
921 .insns = {
922 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
923 BPF_EXIT_INSN(),
925 .result = ACCEPT,
926 .result_unpriv = REJECT,
927 .errstr_unpriv = "R0 leaks addr",
930 "unpriv: add const to pointer",
931 .insns = {
932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
933 BPF_MOV64_IMM(BPF_REG_0, 0),
934 BPF_EXIT_INSN(),
936 .result = ACCEPT,
937 .result_unpriv = REJECT,
938 .errstr_unpriv = "R1 pointer arithmetic",
941 "unpriv: add pointer to pointer",
942 .insns = {
943 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
944 BPF_MOV64_IMM(BPF_REG_0, 0),
945 BPF_EXIT_INSN(),
947 .result = ACCEPT,
948 .result_unpriv = REJECT,
949 .errstr_unpriv = "R1 pointer arithmetic",
952 "unpriv: neg pointer",
953 .insns = {
954 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
955 BPF_MOV64_IMM(BPF_REG_0, 0),
956 BPF_EXIT_INSN(),
958 .result = ACCEPT,
959 .result_unpriv = REJECT,
960 .errstr_unpriv = "R1 pointer arithmetic",
963 "unpriv: cmp pointer with const",
964 .insns = {
965 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
966 BPF_MOV64_IMM(BPF_REG_0, 0),
967 BPF_EXIT_INSN(),
969 .result = ACCEPT,
970 .result_unpriv = REJECT,
971 .errstr_unpriv = "R1 pointer comparison",
974 "unpriv: cmp pointer with pointer",
975 .insns = {
976 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
977 BPF_MOV64_IMM(BPF_REG_0, 0),
978 BPF_EXIT_INSN(),
980 .result = ACCEPT,
981 .result_unpriv = REJECT,
982 .errstr_unpriv = "R10 pointer comparison",
985 "unpriv: check that printk is disallowed",
986 .insns = {
987 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
988 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
990 BPF_MOV64_IMM(BPF_REG_2, 8),
991 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
993 BPF_MOV64_IMM(BPF_REG_0, 0),
994 BPF_EXIT_INSN(),
996 .errstr_unpriv = "unknown func 6",
997 .result_unpriv = REJECT,
998 .result = ACCEPT,
1001 "unpriv: pass pointer to helper function",
1002 .insns = {
1003 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1004 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1006 BPF_LD_MAP_FD(BPF_REG_1, 0),
1007 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1008 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
1010 BPF_MOV64_IMM(BPF_REG_0, 0),
1011 BPF_EXIT_INSN(),
1013 .fixup = {3},
1014 .errstr_unpriv = "R4 leaks addr",
1015 .result_unpriv = REJECT,
1016 .result = ACCEPT,
1019 "unpriv: indirectly pass pointer on stack to helper function",
1020 .insns = {
1021 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1022 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1024 BPF_LD_MAP_FD(BPF_REG_1, 0),
1025 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1026 BPF_MOV64_IMM(BPF_REG_0, 0),
1027 BPF_EXIT_INSN(),
1029 .fixup = {3},
1030 .errstr = "invalid indirect read from stack off -8+0 size 8",
1031 .result = REJECT,
1034 "unpriv: mangle pointer on stack 1",
1035 .insns = {
1036 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1037 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1038 BPF_MOV64_IMM(BPF_REG_0, 0),
1039 BPF_EXIT_INSN(),
1041 .errstr_unpriv = "attempt to corrupt spilled",
1042 .result_unpriv = REJECT,
1043 .result = ACCEPT,
1046 "unpriv: mangle pointer on stack 2",
1047 .insns = {
1048 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1049 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1050 BPF_MOV64_IMM(BPF_REG_0, 0),
1051 BPF_EXIT_INSN(),
1053 .errstr_unpriv = "attempt to corrupt spilled",
1054 .result_unpriv = REJECT,
1055 .result = ACCEPT,
1058 "unpriv: read pointer from stack in small chunks",
1059 .insns = {
1060 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1061 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1062 BPF_MOV64_IMM(BPF_REG_0, 0),
1063 BPF_EXIT_INSN(),
1065 .errstr = "invalid size",
1066 .result = REJECT,
1069 "unpriv: write pointer into ctx",
1070 .insns = {
1071 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1072 BPF_MOV64_IMM(BPF_REG_0, 0),
1073 BPF_EXIT_INSN(),
1075 .errstr_unpriv = "R1 leaks addr",
1076 .result_unpriv = REJECT,
1077 .errstr = "invalid bpf_context access",
1078 .result = REJECT,
1081 "unpriv: write pointer into map elem value",
1082 .insns = {
1083 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1084 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1086 BPF_LD_MAP_FD(BPF_REG_1, 0),
1087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1089 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1090 BPF_EXIT_INSN(),
1092 .fixup = {3},
1093 .errstr_unpriv = "R0 leaks addr",
1094 .result_unpriv = REJECT,
1095 .result = ACCEPT,
1098 "unpriv: partial copy of pointer",
1099 .insns = {
1100 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1101 BPF_MOV64_IMM(BPF_REG_0, 0),
1102 BPF_EXIT_INSN(),
1104 .errstr_unpriv = "R10 partial copy",
1105 .result_unpriv = REJECT,
1106 .result = ACCEPT,
1109 "unpriv: pass pointer to tail_call",
1110 .insns = {
1111 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1112 BPF_LD_MAP_FD(BPF_REG_2, 0),
1113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
1114 BPF_MOV64_IMM(BPF_REG_0, 0),
1115 BPF_EXIT_INSN(),
1117 .prog_array_fixup = {1},
1118 .errstr_unpriv = "R3 leaks addr into helper",
1119 .result_unpriv = REJECT,
1120 .result = ACCEPT,
1123 "unpriv: cmp map pointer with zero",
1124 .insns = {
1125 BPF_MOV64_IMM(BPF_REG_1, 0),
1126 BPF_LD_MAP_FD(BPF_REG_1, 0),
1127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1128 BPF_MOV64_IMM(BPF_REG_0, 0),
1129 BPF_EXIT_INSN(),
1131 .fixup = {1},
1132 .errstr_unpriv = "R1 pointer comparison",
1133 .result_unpriv = REJECT,
1134 .result = ACCEPT,
1137 "unpriv: write into frame pointer",
1138 .insns = {
1139 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1140 BPF_MOV64_IMM(BPF_REG_0, 0),
1141 BPF_EXIT_INSN(),
1143 .errstr = "frame pointer is read only",
1144 .result = REJECT,
1147 "unpriv: cmp of frame pointer",
1148 .insns = {
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1150 BPF_MOV64_IMM(BPF_REG_0, 0),
1151 BPF_EXIT_INSN(),
1153 .errstr_unpriv = "R10 pointer comparison",
1154 .result_unpriv = REJECT,
1155 .result = ACCEPT,
1158 "unpriv: cmp of stack pointer",
1159 .insns = {
1160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1163 BPF_MOV64_IMM(BPF_REG_0, 0),
1164 BPF_EXIT_INSN(),
1166 .errstr_unpriv = "R2 pointer comparison",
1167 .result_unpriv = REJECT,
1168 .result = ACCEPT,
1171 "unpriv: obfuscate stack pointer",
1172 .insns = {
1173 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1176 BPF_MOV64_IMM(BPF_REG_0, 0),
1177 BPF_EXIT_INSN(),
1179 .errstr_unpriv = "R2 pointer arithmetic",
1180 .result_unpriv = REJECT,
1181 .result = ACCEPT,
1185 static int probe_filter_length(struct bpf_insn *fp)
1187 int len = 0;
1189 for (len = MAX_INSNS - 1; len > 0; --len)
1190 if (fp[len].code != 0 || fp[len].imm != 0)
1191 break;
1193 return len + 1;
1196 static int create_map(void)
1198 int map_fd;
1200 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
1201 sizeof(long long), sizeof(long long), 1024);
1202 if (map_fd < 0)
1203 printf("failed to create map '%s'\n", strerror(errno));
1205 return map_fd;
1208 static int create_prog_array(void)
1210 int map_fd;
1212 map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY,
1213 sizeof(int), sizeof(int), 4);
1214 if (map_fd < 0)
1215 printf("failed to create prog_array '%s'\n", strerror(errno));
1217 return map_fd;
1220 static int test(void)
1222 int prog_fd, i, pass_cnt = 0, err_cnt = 0;
1223 bool unpriv = geteuid() != 0;
1225 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1226 struct bpf_insn *prog = tests[i].insns;
1227 int prog_type = tests[i].prog_type;
1228 int prog_len = probe_filter_length(prog);
1229 int *fixup = tests[i].fixup;
1230 int *prog_array_fixup = tests[i].prog_array_fixup;
1231 int expected_result;
1232 const char *expected_errstr;
1233 int map_fd = -1, prog_array_fd = -1;
1235 if (*fixup) {
1236 map_fd = create_map();
1238 do {
1239 prog[*fixup].imm = map_fd;
1240 fixup++;
1241 } while (*fixup);
1243 if (*prog_array_fixup) {
1244 prog_array_fd = create_prog_array();
1246 do {
1247 prog[*prog_array_fixup].imm = prog_array_fd;
1248 prog_array_fixup++;
1249 } while (*prog_array_fixup);
1251 printf("#%d %s ", i, tests[i].descr);
1253 prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
1254 prog, prog_len * sizeof(struct bpf_insn),
1255 "GPL", 0);
1257 if (unpriv && tests[i].result_unpriv != UNDEF)
1258 expected_result = tests[i].result_unpriv;
1259 else
1260 expected_result = tests[i].result;
1262 if (unpriv && tests[i].errstr_unpriv)
1263 expected_errstr = tests[i].errstr_unpriv;
1264 else
1265 expected_errstr = tests[i].errstr;
1267 if (expected_result == ACCEPT) {
1268 if (prog_fd < 0) {
1269 printf("FAIL\nfailed to load prog '%s'\n",
1270 strerror(errno));
1271 printf("%s", bpf_log_buf);
1272 err_cnt++;
1273 goto fail;
1275 } else {
1276 if (prog_fd >= 0) {
1277 printf("FAIL\nunexpected success to load\n");
1278 printf("%s", bpf_log_buf);
1279 err_cnt++;
1280 goto fail;
1282 if (strstr(bpf_log_buf, expected_errstr) == 0) {
1283 printf("FAIL\nunexpected error message: %s",
1284 bpf_log_buf);
1285 err_cnt++;
1286 goto fail;
1290 pass_cnt++;
1291 printf("OK\n");
1292 fail:
1293 if (map_fd >= 0)
1294 close(map_fd);
1295 if (prog_array_fd >= 0)
1296 close(prog_array_fd);
1297 close(prog_fd);
1300 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
1302 return 0;
1305 int main(void)
1307 struct rlimit r = {1 << 20, 1 << 20};
1309 setrlimit(RLIMIT_MEMLOCK, &r);
1310 return test();