Linux 4.3-rc3
[linux/fpc-iii.git] / samples / bpf / test_verifier.c
blobee0f110c9c543b54fd8593607d161598fb29e472
1 /*
2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10 #include <stdio.h>
11 #include <unistd.h>
12 #include <linux/bpf.h>
13 #include <errno.h>
14 #include <linux/unistd.h>
15 #include <string.h>
16 #include <linux/filter.h>
17 #include <stddef.h>
18 #include "libbpf.h"
20 #define MAX_INSNS 512
21 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
23 struct bpf_test {
24 const char *descr;
25 struct bpf_insn insns[MAX_INSNS];
26 int fixup[32];
27 const char *errstr;
28 enum {
29 ACCEPT,
30 REJECT
31 } result;
32 enum bpf_prog_type prog_type;
35 static struct bpf_test tests[] = {
37 "add+sub+mul",
38 .insns = {
39 BPF_MOV64_IMM(BPF_REG_1, 1),
40 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
41 BPF_MOV64_IMM(BPF_REG_2, 3),
42 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
44 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
45 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
46 BPF_EXIT_INSN(),
48 .result = ACCEPT,
51 "unreachable",
52 .insns = {
53 BPF_EXIT_INSN(),
54 BPF_EXIT_INSN(),
56 .errstr = "unreachable",
57 .result = REJECT,
60 "unreachable2",
61 .insns = {
62 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
63 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
64 BPF_EXIT_INSN(),
66 .errstr = "unreachable",
67 .result = REJECT,
70 "out of range jump",
71 .insns = {
72 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
73 BPF_EXIT_INSN(),
75 .errstr = "jump out of range",
76 .result = REJECT,
79 "out of range jump2",
80 .insns = {
81 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
82 BPF_EXIT_INSN(),
84 .errstr = "jump out of range",
85 .result = REJECT,
88 "test1 ld_imm64",
89 .insns = {
90 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
91 BPF_LD_IMM64(BPF_REG_0, 0),
92 BPF_LD_IMM64(BPF_REG_0, 0),
93 BPF_LD_IMM64(BPF_REG_0, 1),
94 BPF_LD_IMM64(BPF_REG_0, 1),
95 BPF_MOV64_IMM(BPF_REG_0, 2),
96 BPF_EXIT_INSN(),
98 .errstr = "invalid BPF_LD_IMM insn",
99 .result = REJECT,
102 "test2 ld_imm64",
103 .insns = {
104 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
105 BPF_LD_IMM64(BPF_REG_0, 0),
106 BPF_LD_IMM64(BPF_REG_0, 0),
107 BPF_LD_IMM64(BPF_REG_0, 1),
108 BPF_LD_IMM64(BPF_REG_0, 1),
109 BPF_EXIT_INSN(),
111 .errstr = "invalid BPF_LD_IMM insn",
112 .result = REJECT,
115 "test3 ld_imm64",
116 .insns = {
117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
118 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
119 BPF_LD_IMM64(BPF_REG_0, 0),
120 BPF_LD_IMM64(BPF_REG_0, 0),
121 BPF_LD_IMM64(BPF_REG_0, 1),
122 BPF_LD_IMM64(BPF_REG_0, 1),
123 BPF_EXIT_INSN(),
125 .errstr = "invalid bpf_ld_imm64 insn",
126 .result = REJECT,
129 "test4 ld_imm64",
130 .insns = {
131 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
132 BPF_EXIT_INSN(),
134 .errstr = "invalid bpf_ld_imm64 insn",
135 .result = REJECT,
138 "test5 ld_imm64",
139 .insns = {
140 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
142 .errstr = "invalid bpf_ld_imm64 insn",
143 .result = REJECT,
146 "no bpf_exit",
147 .insns = {
148 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
150 .errstr = "jump out of range",
151 .result = REJECT,
154 "loop (back-edge)",
155 .insns = {
156 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
157 BPF_EXIT_INSN(),
159 .errstr = "back-edge",
160 .result = REJECT,
163 "loop2 (back-edge)",
164 .insns = {
165 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
166 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
167 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
168 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
169 BPF_EXIT_INSN(),
171 .errstr = "back-edge",
172 .result = REJECT,
175 "conditional loop",
176 .insns = {
177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
178 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
179 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
181 BPF_EXIT_INSN(),
183 .errstr = "back-edge",
184 .result = REJECT,
187 "read uninitialized register",
188 .insns = {
189 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
190 BPF_EXIT_INSN(),
192 .errstr = "R2 !read_ok",
193 .result = REJECT,
196 "read invalid register",
197 .insns = {
198 BPF_MOV64_REG(BPF_REG_0, -1),
199 BPF_EXIT_INSN(),
201 .errstr = "R15 is invalid",
202 .result = REJECT,
205 "program doesn't init R0 before exit",
206 .insns = {
207 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
208 BPF_EXIT_INSN(),
210 .errstr = "R0 !read_ok",
211 .result = REJECT,
214 "program doesn't init R0 before exit in all branches",
215 .insns = {
216 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
217 BPF_MOV64_IMM(BPF_REG_0, 1),
218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
219 BPF_EXIT_INSN(),
221 .errstr = "R0 !read_ok",
222 .result = REJECT,
225 "stack out of bounds",
226 .insns = {
227 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
228 BPF_EXIT_INSN(),
230 .errstr = "invalid stack",
231 .result = REJECT,
234 "invalid call insn1",
235 .insns = {
236 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
237 BPF_EXIT_INSN(),
239 .errstr = "BPF_CALL uses reserved",
240 .result = REJECT,
243 "invalid call insn2",
244 .insns = {
245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
246 BPF_EXIT_INSN(),
248 .errstr = "BPF_CALL uses reserved",
249 .result = REJECT,
252 "invalid function call",
253 .insns = {
254 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
255 BPF_EXIT_INSN(),
257 .errstr = "invalid func 1234567",
258 .result = REJECT,
261 "uninitialized stack1",
262 .insns = {
263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
265 BPF_LD_MAP_FD(BPF_REG_1, 0),
266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
267 BPF_EXIT_INSN(),
269 .fixup = {2},
270 .errstr = "invalid indirect read from stack",
271 .result = REJECT,
274 "uninitialized stack2",
275 .insns = {
276 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
277 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
278 BPF_EXIT_INSN(),
280 .errstr = "invalid read from stack",
281 .result = REJECT,
284 "check valid spill/fill",
285 .insns = {
286 /* spill R1(ctx) into stack */
287 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
289 /* fill it back into R2 */
290 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
292 /* should be able to access R0 = *(R2 + 8) */
293 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
294 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
295 BPF_EXIT_INSN(),
297 .result = ACCEPT,
300 "check corrupted spill/fill",
301 .insns = {
302 /* spill R1(ctx) into stack */
303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
305 /* mess up with R1 pointer on stack */
306 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
308 /* fill back into R0 should fail */
309 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
311 BPF_EXIT_INSN(),
313 .errstr = "corrupted spill",
314 .result = REJECT,
317 "invalid src register in STX",
318 .insns = {
319 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
320 BPF_EXIT_INSN(),
322 .errstr = "R15 is invalid",
323 .result = REJECT,
326 "invalid dst register in STX",
327 .insns = {
328 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
329 BPF_EXIT_INSN(),
331 .errstr = "R14 is invalid",
332 .result = REJECT,
335 "invalid dst register in ST",
336 .insns = {
337 BPF_ST_MEM(BPF_B, 14, -1, -1),
338 BPF_EXIT_INSN(),
340 .errstr = "R14 is invalid",
341 .result = REJECT,
344 "invalid src register in LDX",
345 .insns = {
346 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
347 BPF_EXIT_INSN(),
349 .errstr = "R12 is invalid",
350 .result = REJECT,
353 "invalid dst register in LDX",
354 .insns = {
355 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
356 BPF_EXIT_INSN(),
358 .errstr = "R11 is invalid",
359 .result = REJECT,
362 "junk insn",
363 .insns = {
364 BPF_RAW_INSN(0, 0, 0, 0, 0),
365 BPF_EXIT_INSN(),
367 .errstr = "invalid BPF_LD_IMM",
368 .result = REJECT,
371 "junk insn2",
372 .insns = {
373 BPF_RAW_INSN(1, 0, 0, 0, 0),
374 BPF_EXIT_INSN(),
376 .errstr = "BPF_LDX uses reserved fields",
377 .result = REJECT,
380 "junk insn3",
381 .insns = {
382 BPF_RAW_INSN(-1, 0, 0, 0, 0),
383 BPF_EXIT_INSN(),
385 .errstr = "invalid BPF_ALU opcode f0",
386 .result = REJECT,
389 "junk insn4",
390 .insns = {
391 BPF_RAW_INSN(-1, -1, -1, -1, -1),
392 BPF_EXIT_INSN(),
394 .errstr = "invalid BPF_ALU opcode f0",
395 .result = REJECT,
398 "junk insn5",
399 .insns = {
400 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
401 BPF_EXIT_INSN(),
403 .errstr = "BPF_ALU uses reserved fields",
404 .result = REJECT,
407 "misaligned read from stack",
408 .insns = {
409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
410 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
411 BPF_EXIT_INSN(),
413 .errstr = "misaligned access",
414 .result = REJECT,
417 "invalid map_fd for function call",
418 .insns = {
419 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
420 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
422 BPF_LD_MAP_FD(BPF_REG_1, 0),
423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
424 BPF_EXIT_INSN(),
426 .errstr = "fd 0 is not pointing to valid bpf_map",
427 .result = REJECT,
430 "don't check return value before access",
431 .insns = {
432 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
433 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
435 BPF_LD_MAP_FD(BPF_REG_1, 0),
436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
437 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
438 BPF_EXIT_INSN(),
440 .fixup = {3},
441 .errstr = "R0 invalid mem access 'map_value_or_null'",
442 .result = REJECT,
445 "access memory with incorrect alignment",
446 .insns = {
447 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
448 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
450 BPF_LD_MAP_FD(BPF_REG_1, 0),
451 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
452 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
453 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
454 BPF_EXIT_INSN(),
456 .fixup = {3},
457 .errstr = "misaligned access",
458 .result = REJECT,
461 "sometimes access memory with incorrect alignment",
462 .insns = {
463 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
464 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
466 BPF_LD_MAP_FD(BPF_REG_1, 0),
467 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
468 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
469 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
470 BPF_EXIT_INSN(),
471 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
472 BPF_EXIT_INSN(),
474 .fixup = {3},
475 .errstr = "R0 invalid mem access",
476 .result = REJECT,
479 "jump test 1",
480 .insns = {
481 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
482 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
483 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
484 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
485 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
486 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
487 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
488 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
490 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
491 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
492 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
494 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
495 BPF_MOV64_IMM(BPF_REG_0, 0),
496 BPF_EXIT_INSN(),
498 .result = ACCEPT,
501 "jump test 2",
502 .insns = {
503 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
504 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
505 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
506 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
508 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
509 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
510 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
511 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
512 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
513 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
514 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
515 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
516 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
517 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
518 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
520 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
521 BPF_MOV64_IMM(BPF_REG_0, 0),
522 BPF_EXIT_INSN(),
524 .result = ACCEPT,
527 "jump test 3",
528 .insns = {
529 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
530 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
531 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
533 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
534 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
535 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
537 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
538 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
539 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
541 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
543 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
544 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
545 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
546 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
547 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
549 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
550 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
551 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
553 BPF_LD_MAP_FD(BPF_REG_1, 0),
554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
555 BPF_EXIT_INSN(),
557 .fixup = {24},
558 .result = ACCEPT,
561 "jump test 4",
562 .insns = {
563 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
565 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
566 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
567 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
569 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
570 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
571 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
574 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
576 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
579 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
580 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
583 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
589 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
590 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
591 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
592 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
601 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
603 BPF_MOV64_IMM(BPF_REG_0, 0),
604 BPF_EXIT_INSN(),
606 .result = ACCEPT,
609 "jump test 5",
610 .insns = {
611 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
612 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
613 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
614 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
615 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
616 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
617 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
618 BPF_MOV64_IMM(BPF_REG_0, 0),
619 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
620 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
621 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
622 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
623 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
624 BPF_MOV64_IMM(BPF_REG_0, 0),
625 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
626 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
627 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
628 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
629 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
630 BPF_MOV64_IMM(BPF_REG_0, 0),
631 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
632 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
633 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
634 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
635 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
636 BPF_MOV64_IMM(BPF_REG_0, 0),
637 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
638 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
639 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
640 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
641 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
642 BPF_MOV64_IMM(BPF_REG_0, 0),
643 BPF_EXIT_INSN(),
645 .result = ACCEPT,
648 "access skb fields ok",
649 .insns = {
650 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
651 offsetof(struct __sk_buff, len)),
652 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
653 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
654 offsetof(struct __sk_buff, mark)),
655 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
656 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
657 offsetof(struct __sk_buff, pkt_type)),
658 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
659 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
660 offsetof(struct __sk_buff, queue_mapping)),
661 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
662 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
663 offsetof(struct __sk_buff, protocol)),
664 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
665 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
666 offsetof(struct __sk_buff, vlan_present)),
667 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
668 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
669 offsetof(struct __sk_buff, vlan_tci)),
670 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
671 BPF_EXIT_INSN(),
673 .result = ACCEPT,
676 "access skb fields bad1",
677 .insns = {
678 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
679 BPF_EXIT_INSN(),
681 .errstr = "invalid bpf_context access",
682 .result = REJECT,
685 "access skb fields bad2",
686 .insns = {
687 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
688 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
689 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
691 BPF_LD_MAP_FD(BPF_REG_1, 0),
692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
693 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
694 BPF_EXIT_INSN(),
695 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
697 offsetof(struct __sk_buff, pkt_type)),
698 BPF_EXIT_INSN(),
700 .fixup = {4},
701 .errstr = "different pointers",
702 .result = REJECT,
705 "access skb fields bad3",
706 .insns = {
707 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
708 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
709 offsetof(struct __sk_buff, pkt_type)),
710 BPF_EXIT_INSN(),
711 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
714 BPF_LD_MAP_FD(BPF_REG_1, 0),
715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
716 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
717 BPF_EXIT_INSN(),
718 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
719 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
721 .fixup = {6},
722 .errstr = "different pointers",
723 .result = REJECT,
726 "access skb fields bad4",
727 .insns = {
728 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
729 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
730 offsetof(struct __sk_buff, len)),
731 BPF_MOV64_IMM(BPF_REG_0, 0),
732 BPF_EXIT_INSN(),
733 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
734 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
736 BPF_LD_MAP_FD(BPF_REG_1, 0),
737 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
738 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
739 BPF_EXIT_INSN(),
740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
741 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
743 .fixup = {7},
744 .errstr = "different pointers",
745 .result = REJECT,
748 "check skb->mark is not writeable by sockets",
749 .insns = {
750 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
751 offsetof(struct __sk_buff, mark)),
752 BPF_EXIT_INSN(),
754 .errstr = "invalid bpf_context access",
755 .result = REJECT,
758 "check skb->tc_index is not writeable by sockets",
759 .insns = {
760 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
761 offsetof(struct __sk_buff, tc_index)),
762 BPF_EXIT_INSN(),
764 .errstr = "invalid bpf_context access",
765 .result = REJECT,
768 "check non-u32 access to cb",
769 .insns = {
770 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
771 offsetof(struct __sk_buff, cb[0])),
772 BPF_EXIT_INSN(),
774 .errstr = "invalid bpf_context access",
775 .result = REJECT,
778 "check out of range skb->cb access",
779 .insns = {
780 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
781 offsetof(struct __sk_buff, cb[60])),
782 BPF_EXIT_INSN(),
784 .errstr = "invalid bpf_context access",
785 .result = REJECT,
786 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
789 "write skb fields from socket prog",
790 .insns = {
791 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
792 offsetof(struct __sk_buff, cb[4])),
793 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
794 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
795 offsetof(struct __sk_buff, mark)),
796 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
797 offsetof(struct __sk_buff, tc_index)),
798 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
799 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
800 offsetof(struct __sk_buff, cb[0])),
801 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
802 offsetof(struct __sk_buff, cb[2])),
803 BPF_EXIT_INSN(),
805 .result = ACCEPT,
808 "write skb fields from tc_cls_act prog",
809 .insns = {
810 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
811 offsetof(struct __sk_buff, cb[0])),
812 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
813 offsetof(struct __sk_buff, mark)),
814 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
815 offsetof(struct __sk_buff, tc_index)),
816 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
817 offsetof(struct __sk_buff, tc_index)),
818 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
819 offsetof(struct __sk_buff, cb[3])),
820 BPF_EXIT_INSN(),
822 .result = ACCEPT,
823 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
826 "PTR_TO_STACK store/load",
827 .insns = {
828 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
830 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
831 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
832 BPF_EXIT_INSN(),
834 .result = ACCEPT,
837 "PTR_TO_STACK store/load - bad alignment on off",
838 .insns = {
839 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
841 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
842 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
843 BPF_EXIT_INSN(),
845 .result = REJECT,
846 .errstr = "misaligned access off -6 size 8",
849 "PTR_TO_STACK store/load - bad alignment on reg",
850 .insns = {
851 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
853 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
854 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
855 BPF_EXIT_INSN(),
857 .result = REJECT,
858 .errstr = "misaligned access off -2 size 8",
861 "PTR_TO_STACK store/load - out of bounds low",
862 .insns = {
863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
865 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
866 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
867 BPF_EXIT_INSN(),
869 .result = REJECT,
870 .errstr = "invalid stack off=-79992 size=8",
873 "PTR_TO_STACK store/load - out of bounds high",
874 .insns = {
875 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
877 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
878 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
879 BPF_EXIT_INSN(),
881 .result = REJECT,
882 .errstr = "invalid stack off=0 size=8",
886 static int probe_filter_length(struct bpf_insn *fp)
888 int len = 0;
890 for (len = MAX_INSNS - 1; len > 0; --len)
891 if (fp[len].code != 0 || fp[len].imm != 0)
892 break;
894 return len + 1;
897 static int create_map(void)
899 long long key, value = 0;
900 int map_fd;
902 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 1024);
903 if (map_fd < 0) {
904 printf("failed to create map '%s'\n", strerror(errno));
907 return map_fd;
910 static int test(void)
912 int prog_fd, i, pass_cnt = 0, err_cnt = 0;
914 for (i = 0; i < ARRAY_SIZE(tests); i++) {
915 struct bpf_insn *prog = tests[i].insns;
916 int prog_type = tests[i].prog_type;
917 int prog_len = probe_filter_length(prog);
918 int *fixup = tests[i].fixup;
919 int map_fd = -1;
921 if (*fixup) {
922 map_fd = create_map();
924 do {
925 prog[*fixup].imm = map_fd;
926 fixup++;
927 } while (*fixup);
929 printf("#%d %s ", i, tests[i].descr);
931 prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
932 prog, prog_len * sizeof(struct bpf_insn),
933 "GPL", 0);
935 if (tests[i].result == ACCEPT) {
936 if (prog_fd < 0) {
937 printf("FAIL\nfailed to load prog '%s'\n",
938 strerror(errno));
939 printf("%s", bpf_log_buf);
940 err_cnt++;
941 goto fail;
943 } else {
944 if (prog_fd >= 0) {
945 printf("FAIL\nunexpected success to load\n");
946 printf("%s", bpf_log_buf);
947 err_cnt++;
948 goto fail;
950 if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
951 printf("FAIL\nunexpected error message: %s",
952 bpf_log_buf);
953 err_cnt++;
954 goto fail;
958 pass_cnt++;
959 printf("OK\n");
960 fail:
961 if (map_fd >= 0)
962 close(map_fd);
963 close(prog_fd);
966 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
968 return 0;
971 int main(void)
973 return test();