treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / tools / testing / selftests / bpf / verifier / calls.c
blob2d752c4f8d9d48373f417c352b4db584d7bd91ec
2 "calls: basic sanity",
3 .insns = {
4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
5 BPF_MOV64_IMM(BPF_REG_0, 1),
6 BPF_EXIT_INSN(),
7 BPF_MOV64_IMM(BPF_REG_0, 2),
8 BPF_EXIT_INSN(),
9 },
10 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11 .result = ACCEPT,
14 "calls: not on unpriviledged",
15 .insns = {
16 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
17 BPF_MOV64_IMM(BPF_REG_0, 1),
18 BPF_EXIT_INSN(),
19 BPF_MOV64_IMM(BPF_REG_0, 2),
20 BPF_EXIT_INSN(),
22 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
23 .result_unpriv = REJECT,
24 .result = ACCEPT,
25 .retval = 1,
28 "calls: div by 0 in subprog",
29 .insns = {
30 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
31 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
32 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
34 offsetof(struct __sk_buff, data_end)),
35 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
36 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
37 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
38 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
39 BPF_MOV64_IMM(BPF_REG_0, 1),
40 BPF_EXIT_INSN(),
41 BPF_MOV32_IMM(BPF_REG_2, 0),
42 BPF_MOV32_IMM(BPF_REG_3, 1),
43 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
44 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
45 offsetof(struct __sk_buff, data)),
46 BPF_EXIT_INSN(),
48 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
49 .result = ACCEPT,
50 .retval = 1,
53 "calls: multiple ret types in subprog 1",
54 .insns = {
55 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
56 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
58 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
59 offsetof(struct __sk_buff, data_end)),
60 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
61 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
62 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
63 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
64 BPF_MOV64_IMM(BPF_REG_0, 1),
65 BPF_EXIT_INSN(),
66 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
67 offsetof(struct __sk_buff, data)),
68 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
69 BPF_MOV32_IMM(BPF_REG_0, 42),
70 BPF_EXIT_INSN(),
72 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
73 .result = REJECT,
74 .errstr = "R0 invalid mem access 'inv'",
77 "calls: multiple ret types in subprog 2",
78 .insns = {
79 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
80 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
81 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
82 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
83 offsetof(struct __sk_buff, data_end)),
84 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
85 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
86 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
87 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
88 BPF_MOV64_IMM(BPF_REG_0, 1),
89 BPF_EXIT_INSN(),
90 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
91 offsetof(struct __sk_buff, data)),
92 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
93 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
94 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
95 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
96 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
97 BPF_LD_MAP_FD(BPF_REG_1, 0),
98 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
99 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
100 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
101 offsetof(struct __sk_buff, data)),
102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
103 BPF_EXIT_INSN(),
105 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
106 .fixup_map_hash_8b = { 16 },
107 .result = REJECT,
108 .errstr = "R0 min value is outside of the array range",
111 "calls: overlapping caller/callee",
112 .insns = {
113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
114 BPF_MOV64_IMM(BPF_REG_0, 1),
115 BPF_EXIT_INSN(),
117 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
118 .errstr = "last insn is not an exit or jmp",
119 .result = REJECT,
122 "calls: wrong recursive calls",
123 .insns = {
124 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
125 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
129 BPF_MOV64_IMM(BPF_REG_0, 1),
130 BPF_EXIT_INSN(),
132 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
133 .errstr = "jump out of range",
134 .result = REJECT,
137 "calls: wrong src reg",
138 .insns = {
139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
140 BPF_MOV64_IMM(BPF_REG_0, 1),
141 BPF_EXIT_INSN(),
143 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
144 .errstr = "BPF_CALL uses reserved fields",
145 .result = REJECT,
148 "calls: wrong off value",
149 .insns = {
150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
151 BPF_MOV64_IMM(BPF_REG_0, 1),
152 BPF_EXIT_INSN(),
153 BPF_MOV64_IMM(BPF_REG_0, 2),
154 BPF_EXIT_INSN(),
156 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
157 .errstr = "BPF_CALL uses reserved fields",
158 .result = REJECT,
161 "calls: jump back loop",
162 .insns = {
163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
164 BPF_MOV64_IMM(BPF_REG_0, 1),
165 BPF_EXIT_INSN(),
167 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
168 .errstr = "back-edge from insn 0 to 0",
169 .result = REJECT,
172 "calls: conditional call",
173 .insns = {
174 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
175 offsetof(struct __sk_buff, mark)),
176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
178 BPF_MOV64_IMM(BPF_REG_0, 1),
179 BPF_EXIT_INSN(),
180 BPF_MOV64_IMM(BPF_REG_0, 2),
181 BPF_EXIT_INSN(),
183 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
184 .errstr = "jump out of range",
185 .result = REJECT,
188 "calls: conditional call 2",
189 .insns = {
190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
191 offsetof(struct __sk_buff, mark)),
192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
193 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
194 BPF_MOV64_IMM(BPF_REG_0, 1),
195 BPF_EXIT_INSN(),
196 BPF_MOV64_IMM(BPF_REG_0, 2),
197 BPF_EXIT_INSN(),
198 BPF_MOV64_IMM(BPF_REG_0, 3),
199 BPF_EXIT_INSN(),
201 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
202 .result = ACCEPT,
205 "calls: conditional call 3",
206 .insns = {
207 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
208 offsetof(struct __sk_buff, mark)),
209 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
210 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
211 BPF_MOV64_IMM(BPF_REG_0, 1),
212 BPF_EXIT_INSN(),
213 BPF_MOV64_IMM(BPF_REG_0, 1),
214 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
215 BPF_MOV64_IMM(BPF_REG_0, 3),
216 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
218 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
219 .errstr_unpriv = "back-edge from insn",
220 .result_unpriv = REJECT,
221 .result = ACCEPT,
222 .retval = 1,
225 "calls: conditional call 4",
226 .insns = {
227 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
228 offsetof(struct __sk_buff, mark)),
229 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
230 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
231 BPF_MOV64_IMM(BPF_REG_0, 1),
232 BPF_EXIT_INSN(),
233 BPF_MOV64_IMM(BPF_REG_0, 1),
234 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
235 BPF_MOV64_IMM(BPF_REG_0, 3),
236 BPF_EXIT_INSN(),
238 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
239 .result = ACCEPT,
242 "calls: conditional call 5",
243 .insns = {
244 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
245 offsetof(struct __sk_buff, mark)),
246 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
248 BPF_MOV64_IMM(BPF_REG_0, 1),
249 BPF_EXIT_INSN(),
250 BPF_MOV64_IMM(BPF_REG_0, 1),
251 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
252 BPF_MOV64_IMM(BPF_REG_0, 3),
253 BPF_EXIT_INSN(),
255 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
256 .result = ACCEPT,
257 .retval = 1,
260 "calls: conditional call 6",
261 .insns = {
262 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
263 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
264 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
266 BPF_EXIT_INSN(),
267 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
268 offsetof(struct __sk_buff, mark)),
269 BPF_EXIT_INSN(),
271 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
272 .errstr = "infinite loop detected",
273 .result = REJECT,
276 "calls: using r0 returned by callee",
277 .insns = {
278 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
279 BPF_EXIT_INSN(),
280 BPF_MOV64_IMM(BPF_REG_0, 2),
281 BPF_EXIT_INSN(),
283 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
284 .result = ACCEPT,
287 "calls: using uninit r0 from callee",
288 .insns = {
289 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
290 BPF_EXIT_INSN(),
291 BPF_EXIT_INSN(),
293 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
294 .errstr = "!read_ok",
295 .result = REJECT,
298 "calls: callee is using r1",
299 .insns = {
300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
301 BPF_EXIT_INSN(),
302 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
303 offsetof(struct __sk_buff, len)),
304 BPF_EXIT_INSN(),
306 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
307 .result = ACCEPT,
308 .retval = TEST_DATA_LEN,
311 "calls: callee using args1",
312 .insns = {
313 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
314 BPF_EXIT_INSN(),
315 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
316 BPF_EXIT_INSN(),
318 .errstr_unpriv = "allowed for root only",
319 .result_unpriv = REJECT,
320 .result = ACCEPT,
321 .retval = POINTER_VALUE,
324 "calls: callee using wrong args2",
325 .insns = {
326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
327 BPF_EXIT_INSN(),
328 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
329 BPF_EXIT_INSN(),
331 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
332 .errstr = "R2 !read_ok",
333 .result = REJECT,
336 "calls: callee using two args",
337 .insns = {
338 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
339 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
340 offsetof(struct __sk_buff, len)),
341 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
342 offsetof(struct __sk_buff, len)),
343 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
344 BPF_EXIT_INSN(),
345 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
346 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
347 BPF_EXIT_INSN(),
349 .errstr_unpriv = "allowed for root only",
350 .result_unpriv = REJECT,
351 .result = ACCEPT,
352 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
355 "calls: callee changing pkt pointers",
356 .insns = {
357 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
358 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
359 offsetof(struct xdp_md, data_end)),
360 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
362 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
364 /* clear_all_pkt_pointers() has to walk all frames
365 * to make sure that pkt pointers in the caller
366 * are cleared when callee is calling a helper that
367 * adjusts packet size
369 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
370 BPF_MOV32_IMM(BPF_REG_0, 0),
371 BPF_EXIT_INSN(),
372 BPF_MOV64_IMM(BPF_REG_2, 0),
373 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
374 BPF_EXIT_INSN(),
376 .result = REJECT,
377 .errstr = "R6 invalid mem access 'inv'",
378 .prog_type = BPF_PROG_TYPE_XDP,
379 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
382 "calls: ptr null check in subprog",
383 .insns = {
384 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
385 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
387 BPF_LD_MAP_FD(BPF_REG_1, 0),
388 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
389 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
390 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
391 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
393 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
394 BPF_EXIT_INSN(),
395 BPF_MOV64_IMM(BPF_REG_0, 0),
396 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
397 BPF_MOV64_IMM(BPF_REG_0, 1),
398 BPF_EXIT_INSN(),
400 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
401 .fixup_map_hash_48b = { 3 },
402 .result_unpriv = REJECT,
403 .result = ACCEPT,
404 .retval = 0,
407 "calls: two calls with args",
408 .insns = {
409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
410 BPF_EXIT_INSN(),
411 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
413 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
414 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
415 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
416 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
417 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
418 BPF_EXIT_INSN(),
419 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
420 offsetof(struct __sk_buff, len)),
421 BPF_EXIT_INSN(),
423 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
424 .result = ACCEPT,
425 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
428 "calls: calls with stack arith",
429 .insns = {
430 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
431 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
433 BPF_EXIT_INSN(),
434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
435 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
436 BPF_EXIT_INSN(),
437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
438 BPF_MOV64_IMM(BPF_REG_0, 42),
439 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
440 BPF_EXIT_INSN(),
442 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
443 .result = ACCEPT,
444 .retval = 42,
447 "calls: calls with misaligned stack access",
448 .insns = {
449 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
451 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
452 BPF_EXIT_INSN(),
453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
455 BPF_EXIT_INSN(),
456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
457 BPF_MOV64_IMM(BPF_REG_0, 42),
458 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
459 BPF_EXIT_INSN(),
461 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
462 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
463 .errstr = "misaligned stack access",
464 .result = REJECT,
467 "calls: calls control flow, jump test",
468 .insns = {
469 BPF_MOV64_IMM(BPF_REG_0, 42),
470 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
471 BPF_MOV64_IMM(BPF_REG_0, 43),
472 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
473 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
474 BPF_EXIT_INSN(),
476 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
477 .result = ACCEPT,
478 .retval = 43,
481 "calls: calls control flow, jump test 2",
482 .insns = {
483 BPF_MOV64_IMM(BPF_REG_0, 42),
484 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
485 BPF_MOV64_IMM(BPF_REG_0, 43),
486 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
488 BPF_EXIT_INSN(),
490 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
491 .errstr = "jump out of range from insn 1 to 4",
492 .result = REJECT,
495 "calls: two calls with bad jump",
496 .insns = {
497 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
498 BPF_EXIT_INSN(),
499 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
500 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
501 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
502 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
504 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
505 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
506 BPF_EXIT_INSN(),
507 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
508 offsetof(struct __sk_buff, len)),
509 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
510 BPF_EXIT_INSN(),
512 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
513 .errstr = "jump out of range from insn 11 to 9",
514 .result = REJECT,
517 "calls: recursive call. test1",
518 .insns = {
519 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
520 BPF_EXIT_INSN(),
521 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
522 BPF_EXIT_INSN(),
524 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
525 .errstr = "back-edge",
526 .result = REJECT,
529 "calls: recursive call. test2",
530 .insns = {
531 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
532 BPF_EXIT_INSN(),
533 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
534 BPF_EXIT_INSN(),
536 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
537 .errstr = "back-edge",
538 .result = REJECT,
541 "calls: unreachable code",
542 .insns = {
543 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
544 BPF_EXIT_INSN(),
545 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
546 BPF_EXIT_INSN(),
547 BPF_MOV64_IMM(BPF_REG_0, 0),
548 BPF_EXIT_INSN(),
549 BPF_MOV64_IMM(BPF_REG_0, 0),
550 BPF_EXIT_INSN(),
552 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
553 .errstr = "unreachable insn 6",
554 .result = REJECT,
557 "calls: invalid call",
558 .insns = {
559 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
560 BPF_EXIT_INSN(),
561 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
562 BPF_EXIT_INSN(),
564 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
565 .errstr = "invalid destination",
566 .result = REJECT,
569 "calls: invalid call 2",
570 .insns = {
571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
572 BPF_EXIT_INSN(),
573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
574 BPF_EXIT_INSN(),
576 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
577 .errstr = "invalid destination",
578 .result = REJECT,
581 "calls: jumping across function bodies. test1",
582 .insns = {
583 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
584 BPF_MOV64_IMM(BPF_REG_0, 0),
585 BPF_EXIT_INSN(),
586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
587 BPF_EXIT_INSN(),
589 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
590 .errstr = "jump out of range",
591 .result = REJECT,
594 "calls: jumping across function bodies. test2",
595 .insns = {
596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
597 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
598 BPF_MOV64_IMM(BPF_REG_0, 0),
599 BPF_EXIT_INSN(),
600 BPF_EXIT_INSN(),
602 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
603 .errstr = "jump out of range",
604 .result = REJECT,
607 "calls: call without exit",
608 .insns = {
609 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
610 BPF_EXIT_INSN(),
611 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
612 BPF_EXIT_INSN(),
613 BPF_MOV64_IMM(BPF_REG_0, 0),
614 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
616 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
617 .errstr = "not an exit",
618 .result = REJECT,
621 "calls: call into middle of ld_imm64",
622 .insns = {
623 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
624 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
625 BPF_MOV64_IMM(BPF_REG_0, 0),
626 BPF_EXIT_INSN(),
627 BPF_LD_IMM64(BPF_REG_0, 0),
628 BPF_EXIT_INSN(),
630 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
631 .errstr = "last insn",
632 .result = REJECT,
635 "calls: call into middle of other call",
636 .insns = {
637 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
638 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
639 BPF_MOV64_IMM(BPF_REG_0, 0),
640 BPF_EXIT_INSN(),
641 BPF_MOV64_IMM(BPF_REG_0, 0),
642 BPF_MOV64_IMM(BPF_REG_0, 0),
643 BPF_EXIT_INSN(),
645 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
646 .errstr = "last insn",
647 .result = REJECT,
650 "calls: ld_abs with changing ctx data in callee",
651 .insns = {
652 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
653 BPF_LD_ABS(BPF_B, 0),
654 BPF_LD_ABS(BPF_H, 0),
655 BPF_LD_ABS(BPF_W, 0),
656 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
657 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
658 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
659 BPF_LD_ABS(BPF_B, 0),
660 BPF_LD_ABS(BPF_H, 0),
661 BPF_LD_ABS(BPF_W, 0),
662 BPF_EXIT_INSN(),
663 BPF_MOV64_IMM(BPF_REG_2, 1),
664 BPF_MOV64_IMM(BPF_REG_3, 2),
665 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
666 BPF_EXIT_INSN(),
668 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
669 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
670 .result = REJECT,
673 "calls: two calls with bad fallthrough",
674 .insns = {
675 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
676 BPF_EXIT_INSN(),
677 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
678 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
679 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
680 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
682 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
683 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
684 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
685 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
686 offsetof(struct __sk_buff, len)),
687 BPF_EXIT_INSN(),
689 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
690 .errstr = "not an exit",
691 .result = REJECT,
694 "calls: two calls with stack read",
695 .insns = {
696 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
697 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
698 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
700 BPF_EXIT_INSN(),
701 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
703 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
705 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
706 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
708 BPF_EXIT_INSN(),
709 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
710 BPF_EXIT_INSN(),
712 .prog_type = BPF_PROG_TYPE_XDP,
713 .result = ACCEPT,
716 "calls: two calls with stack write",
717 .insns = {
718 /* main prog */
719 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
720 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
722 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
724 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
725 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
726 BPF_EXIT_INSN(),
728 /* subprog 1 */
729 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
730 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
731 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
732 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
733 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
734 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
735 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
736 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
737 /* write into stack frame of main prog */
738 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
739 BPF_EXIT_INSN(),
741 /* subprog 2 */
742 /* read from stack frame of main prog */
743 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
744 BPF_EXIT_INSN(),
746 .prog_type = BPF_PROG_TYPE_XDP,
747 .result = ACCEPT,
750 "calls: stack overflow using two frames (pre-call access)",
751 .insns = {
752 /* prog 1 */
753 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
754 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
755 BPF_EXIT_INSN(),
757 /* prog 2 */
758 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
759 BPF_MOV64_IMM(BPF_REG_0, 0),
760 BPF_EXIT_INSN(),
762 .prog_type = BPF_PROG_TYPE_XDP,
763 .errstr = "combined stack size",
764 .result = REJECT,
767 "calls: stack overflow using two frames (post-call access)",
768 .insns = {
769 /* prog 1 */
770 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
771 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
772 BPF_EXIT_INSN(),
774 /* prog 2 */
775 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
776 BPF_MOV64_IMM(BPF_REG_0, 0),
777 BPF_EXIT_INSN(),
779 .prog_type = BPF_PROG_TYPE_XDP,
780 .errstr = "combined stack size",
781 .result = REJECT,
784 "calls: stack depth check using three frames. test1",
785 .insns = {
786 /* main */
787 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
788 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
789 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
790 BPF_MOV64_IMM(BPF_REG_0, 0),
791 BPF_EXIT_INSN(),
792 /* A */
793 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
794 BPF_EXIT_INSN(),
795 /* B */
796 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
797 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
798 BPF_EXIT_INSN(),
800 .prog_type = BPF_PROG_TYPE_XDP,
801 /* stack_main=32, stack_A=256, stack_B=64
802 * and max(main+A, main+A+B) < 512
804 .result = ACCEPT,
807 "calls: stack depth check using three frames. test2",
808 .insns = {
809 /* main */
810 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
811 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
812 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
813 BPF_MOV64_IMM(BPF_REG_0, 0),
814 BPF_EXIT_INSN(),
815 /* A */
816 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
817 BPF_EXIT_INSN(),
818 /* B */
819 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
820 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
821 BPF_EXIT_INSN(),
823 .prog_type = BPF_PROG_TYPE_XDP,
824 /* stack_main=32, stack_A=64, stack_B=256
825 * and max(main+A, main+A+B) < 512
827 .result = ACCEPT,
830 "calls: stack depth check using three frames. test3",
831 .insns = {
832 /* main */
833 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
834 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
835 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
836 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
837 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
838 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
839 BPF_MOV64_IMM(BPF_REG_0, 0),
840 BPF_EXIT_INSN(),
841 /* A */
842 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
843 BPF_EXIT_INSN(),
844 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
845 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
846 /* B */
847 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
848 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
849 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
850 BPF_EXIT_INSN(),
852 .prog_type = BPF_PROG_TYPE_XDP,
853 /* stack_main=64, stack_A=224, stack_B=256
854 * and max(main+A, main+A+B) > 512
856 .errstr = "combined stack",
857 .result = REJECT,
860 "calls: stack depth check using three frames. test4",
861 /* void main(void) {
862 * func1(0);
863 * func1(1);
864 * func2(1);
866 * void func1(int alloc_or_recurse) {
867 * if (alloc_or_recurse) {
868 * frame_pointer[-300] = 1;
869 * } else {
870 * func2(alloc_or_recurse);
873 * void func2(int alloc_or_recurse) {
874 * if (alloc_or_recurse) {
875 * frame_pointer[-300] = 1;
879 .insns = {
880 /* main */
881 BPF_MOV64_IMM(BPF_REG_1, 0),
882 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
883 BPF_MOV64_IMM(BPF_REG_1, 1),
884 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
885 BPF_MOV64_IMM(BPF_REG_1, 1),
886 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
887 BPF_MOV64_IMM(BPF_REG_0, 0),
888 BPF_EXIT_INSN(),
889 /* A */
890 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
891 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
892 BPF_EXIT_INSN(),
893 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
894 BPF_EXIT_INSN(),
895 /* B */
896 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
897 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
898 BPF_EXIT_INSN(),
900 .prog_type = BPF_PROG_TYPE_XDP,
901 .result = REJECT,
902 .errstr = "combined stack",
905 "calls: stack depth check using three frames. test5",
906 .insns = {
907 /* main */
908 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
909 BPF_EXIT_INSN(),
910 /* A */
911 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
912 BPF_EXIT_INSN(),
913 /* B */
914 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
915 BPF_EXIT_INSN(),
916 /* C */
917 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
918 BPF_EXIT_INSN(),
919 /* D */
920 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
921 BPF_EXIT_INSN(),
922 /* E */
923 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
924 BPF_EXIT_INSN(),
925 /* F */
926 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
927 BPF_EXIT_INSN(),
928 /* G */
929 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
930 BPF_EXIT_INSN(),
931 /* H */
932 BPF_MOV64_IMM(BPF_REG_0, 0),
933 BPF_EXIT_INSN(),
935 .prog_type = BPF_PROG_TYPE_XDP,
936 .errstr = "call stack",
937 .result = REJECT,
940 "calls: stack depth check in dead code",
941 .insns = {
942 /* main */
943 BPF_MOV64_IMM(BPF_REG_1, 0),
944 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
945 BPF_EXIT_INSN(),
946 /* A */
947 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
948 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
949 BPF_MOV64_IMM(BPF_REG_0, 0),
950 BPF_EXIT_INSN(),
951 /* B */
952 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
953 BPF_EXIT_INSN(),
954 /* C */
955 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
956 BPF_EXIT_INSN(),
957 /* D */
958 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
959 BPF_EXIT_INSN(),
960 /* E */
961 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
962 BPF_EXIT_INSN(),
963 /* F */
964 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
965 BPF_EXIT_INSN(),
966 /* G */
967 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
968 BPF_EXIT_INSN(),
969 /* H */
970 BPF_MOV64_IMM(BPF_REG_0, 0),
971 BPF_EXIT_INSN(),
973 .prog_type = BPF_PROG_TYPE_XDP,
974 .errstr = "call stack",
975 .result = REJECT,
978 "calls: spill into caller stack frame",
979 .insns = {
980 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
983 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
984 BPF_EXIT_INSN(),
985 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
986 BPF_MOV64_IMM(BPF_REG_0, 0),
987 BPF_EXIT_INSN(),
989 .prog_type = BPF_PROG_TYPE_XDP,
990 .errstr = "cannot spill",
991 .result = REJECT,
994 "calls: write into caller stack frame",
995 .insns = {
996 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
998 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
999 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1000 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1001 BPF_EXIT_INSN(),
1002 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1003 BPF_MOV64_IMM(BPF_REG_0, 0),
1004 BPF_EXIT_INSN(),
1006 .prog_type = BPF_PROG_TYPE_XDP,
1007 .result = ACCEPT,
1008 .retval = 42,
1011 "calls: write into callee stack frame",
1012 .insns = {
1013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1014 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1015 BPF_EXIT_INSN(),
1016 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1017 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1018 BPF_EXIT_INSN(),
1020 .prog_type = BPF_PROG_TYPE_XDP,
1021 .errstr = "cannot return stack pointer",
1022 .result = REJECT,
1025 "calls: two calls with stack write and void return",
1026 .insns = {
1027 /* main prog */
1028 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1029 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1031 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1033 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1034 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1035 BPF_EXIT_INSN(),
1037 /* subprog 1 */
1038 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1039 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1041 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1042 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1043 BPF_EXIT_INSN(),
1045 /* subprog 2 */
1046 /* write into stack frame of main prog */
1047 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1048 BPF_EXIT_INSN(), /* void return */
1050 .prog_type = BPF_PROG_TYPE_XDP,
1051 .result = ACCEPT,
1054 "calls: ambiguous return value",
1055 .insns = {
1056 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1057 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1058 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1059 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1060 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1061 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1062 BPF_EXIT_INSN(),
1063 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1064 BPF_MOV64_IMM(BPF_REG_0, 0),
1065 BPF_EXIT_INSN(),
1067 .errstr_unpriv = "allowed for root only",
1068 .result_unpriv = REJECT,
1069 .errstr = "R0 !read_ok",
1070 .result = REJECT,
1073 "calls: two calls that return map_value",
1074 .insns = {
1075 /* main prog */
1076 /* pass fp-16, fp-8 into a function */
1077 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1078 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1079 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1081 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1083 /* fetch map_value_ptr from the stack of this function */
1084 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1086 /* write into map value */
1087 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1088 /* fetch secound map_value_ptr from the stack */
1089 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1090 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1091 /* write into map value */
1092 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1093 BPF_MOV64_IMM(BPF_REG_0, 0),
1094 BPF_EXIT_INSN(),
1096 /* subprog 1 */
1097 /* call 3rd function twice */
1098 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1099 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1100 /* first time with fp-8 */
1101 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1103 /* second time with fp-16 */
1104 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1105 BPF_EXIT_INSN(),
1107 /* subprog 2 */
1108 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1109 /* lookup from map */
1110 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1111 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1113 BPF_LD_MAP_FD(BPF_REG_1, 0),
1114 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1115 /* write map_value_ptr into stack frame of main prog */
1116 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1117 BPF_MOV64_IMM(BPF_REG_0, 0),
1118 BPF_EXIT_INSN(), /* return 0 */
1120 .prog_type = BPF_PROG_TYPE_XDP,
1121 .fixup_map_hash_8b = { 23 },
1122 .result = ACCEPT,
1125 "calls: two calls that return map_value with bool condition",
1126 .insns = {
1127 /* main prog */
1128 /* pass fp-16, fp-8 into a function */
1129 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1133 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1134 BPF_MOV64_IMM(BPF_REG_0, 0),
1135 BPF_EXIT_INSN(),
1137 /* subprog 1 */
1138 /* call 3rd function twice */
1139 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1140 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1141 /* first time with fp-8 */
1142 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1143 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1144 /* fetch map_value_ptr from the stack of this function */
1145 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1146 /* write into map value */
1147 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1149 /* second time with fp-16 */
1150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1151 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1152 /* fetch secound map_value_ptr from the stack */
1153 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1154 /* write into map value */
1155 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1156 BPF_EXIT_INSN(),
1158 /* subprog 2 */
1159 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1160 /* lookup from map */
1161 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1162 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1164 BPF_LD_MAP_FD(BPF_REG_1, 0),
1165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1166 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1167 BPF_MOV64_IMM(BPF_REG_0, 0),
1168 BPF_EXIT_INSN(), /* return 0 */
1169 /* write map_value_ptr into stack frame of main prog */
1170 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1171 BPF_MOV64_IMM(BPF_REG_0, 1),
1172 BPF_EXIT_INSN(), /* return 1 */
1174 .prog_type = BPF_PROG_TYPE_XDP,
1175 .fixup_map_hash_8b = { 23 },
1176 .result = ACCEPT,
1179 "calls: two calls that return map_value with incorrect bool check",
1180 .insns = {
1181 /* main prog */
1182 /* pass fp-16, fp-8 into a function */
1183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1185 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1187 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1188 BPF_MOV64_IMM(BPF_REG_0, 0),
1189 BPF_EXIT_INSN(),
1191 /* subprog 1 */
1192 /* call 3rd function twice */
1193 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1194 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1195 /* first time with fp-8 */
1196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1197 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1198 /* fetch map_value_ptr from the stack of this function */
1199 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1200 /* write into map value */
1201 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1202 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1203 /* second time with fp-16 */
1204 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1205 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1206 /* fetch secound map_value_ptr from the stack */
1207 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1208 /* write into map value */
1209 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1210 BPF_EXIT_INSN(),
1212 /* subprog 2 */
1213 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1214 /* lookup from map */
1215 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1216 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1218 BPF_LD_MAP_FD(BPF_REG_1, 0),
1219 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1220 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1221 BPF_MOV64_IMM(BPF_REG_0, 0),
1222 BPF_EXIT_INSN(), /* return 0 */
1223 /* write map_value_ptr into stack frame of main prog */
1224 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1225 BPF_MOV64_IMM(BPF_REG_0, 1),
1226 BPF_EXIT_INSN(), /* return 1 */
1228 .prog_type = BPF_PROG_TYPE_XDP,
1229 .fixup_map_hash_8b = { 23 },
1230 .result = REJECT,
1231 .errstr = "invalid read from stack off -16+0 size 8",
1234 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1235 .insns = {
1236 /* main prog */
1237 /* pass fp-16, fp-8 into a function */
1238 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1240 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1243 BPF_MOV64_IMM(BPF_REG_0, 0),
1244 BPF_EXIT_INSN(),
1246 /* subprog 1 */
1247 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1248 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1249 /* 1st lookup from map */
1250 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1251 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1253 BPF_LD_MAP_FD(BPF_REG_1, 0),
1254 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1255 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1256 BPF_MOV64_IMM(BPF_REG_8, 0),
1257 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1258 /* write map_value_ptr into stack frame of main prog at fp-8 */
1259 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1260 BPF_MOV64_IMM(BPF_REG_8, 1),
1262 /* 2nd lookup from map */
1263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1265 BPF_LD_MAP_FD(BPF_REG_1, 0),
1266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1267 BPF_FUNC_map_lookup_elem),
1268 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1269 BPF_MOV64_IMM(BPF_REG_9, 0),
1270 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1271 /* write map_value_ptr into stack frame of main prog at fp-16 */
1272 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1273 BPF_MOV64_IMM(BPF_REG_9, 1),
1275 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1276 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1277 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1278 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1279 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1280 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1281 BPF_EXIT_INSN(),
1283 /* subprog 2 */
1284 /* if arg2 == 1 do *arg1 = 0 */
1285 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1286 /* fetch map_value_ptr from the stack of this function */
1287 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1288 /* write into map value */
1289 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1291 /* if arg4 == 1 do *arg3 = 0 */
1292 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1293 /* fetch map_value_ptr from the stack of this function */
1294 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1295 /* write into map value */
1296 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1297 BPF_EXIT_INSN(),
1299 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1300 .fixup_map_hash_8b = { 12, 22 },
1301 .result = REJECT,
1302 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1303 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1306 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1307 .insns = {
1308 /* main prog */
1309 /* pass fp-16, fp-8 into a function */
1310 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1312 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1315 BPF_MOV64_IMM(BPF_REG_0, 0),
1316 BPF_EXIT_INSN(),
1318 /* subprog 1 */
1319 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1320 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1321 /* 1st lookup from map */
1322 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1323 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1325 BPF_LD_MAP_FD(BPF_REG_1, 0),
1326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1327 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1328 BPF_MOV64_IMM(BPF_REG_8, 0),
1329 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1330 /* write map_value_ptr into stack frame of main prog at fp-8 */
1331 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1332 BPF_MOV64_IMM(BPF_REG_8, 1),
1334 /* 2nd lookup from map */
1335 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1337 BPF_LD_MAP_FD(BPF_REG_1, 0),
1338 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1339 BPF_FUNC_map_lookup_elem),
1340 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1341 BPF_MOV64_IMM(BPF_REG_9, 0),
1342 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1343 /* write map_value_ptr into stack frame of main prog at fp-16 */
1344 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1345 BPF_MOV64_IMM(BPF_REG_9, 1),
1347 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1348 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1350 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1351 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1352 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
1353 BPF_EXIT_INSN(),
1355 /* subprog 2 */
1356 /* if arg2 == 1 do *arg1 = 0 */
1357 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1358 /* fetch map_value_ptr from the stack of this function */
1359 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1360 /* write into map value */
1361 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1363 /* if arg4 == 1 do *arg3 = 0 */
1364 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1365 /* fetch map_value_ptr from the stack of this function */
1366 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1367 /* write into map value */
1368 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1369 BPF_EXIT_INSN(),
1371 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1372 .fixup_map_hash_8b = { 12, 22 },
1373 .result = ACCEPT,
1376 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1377 .insns = {
1378 /* main prog */
1379 /* pass fp-16, fp-8 into a function */
1380 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1382 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1384 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1385 BPF_MOV64_IMM(BPF_REG_0, 0),
1386 BPF_EXIT_INSN(),
1388 /* subprog 1 */
1389 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1390 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1391 /* 1st lookup from map */
1392 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1395 BPF_LD_MAP_FD(BPF_REG_1, 0),
1396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1397 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1398 BPF_MOV64_IMM(BPF_REG_8, 0),
1399 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1400 /* write map_value_ptr into stack frame of main prog at fp-8 */
1401 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1402 BPF_MOV64_IMM(BPF_REG_8, 1),
1404 /* 2nd lookup from map */
1405 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1407 BPF_LD_MAP_FD(BPF_REG_1, 0),
1408 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1409 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1410 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
1411 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1412 /* write map_value_ptr into stack frame of main prog at fp-16 */
1413 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1414 BPF_MOV64_IMM(BPF_REG_9, 1),
1416 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1417 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1418 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1419 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1420 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1421 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1422 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1424 /* subprog 2 */
1425 /* if arg2 == 1 do *arg1 = 0 */
1426 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1427 /* fetch map_value_ptr from the stack of this function */
1428 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1429 /* write into map value */
1430 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1432 /* if arg4 == 1 do *arg3 = 0 */
1433 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1434 /* fetch map_value_ptr from the stack of this function */
1435 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1436 /* write into map value */
1437 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1438 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1440 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1441 .fixup_map_hash_8b = { 12, 22 },
1442 .result = REJECT,
1443 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1444 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1447 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1448 .insns = {
1449 /* main prog */
1450 /* pass fp-16, fp-8 into a function */
1451 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1453 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1456 BPF_MOV64_IMM(BPF_REG_0, 0),
1457 BPF_EXIT_INSN(),
1459 /* subprog 1 */
1460 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1461 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1462 /* 1st lookup from map */
1463 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1464 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1466 BPF_LD_MAP_FD(BPF_REG_1, 0),
1467 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1468 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1469 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1470 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1471 BPF_MOV64_IMM(BPF_REG_8, 0),
1472 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1473 BPF_MOV64_IMM(BPF_REG_8, 1),
1475 /* 2nd lookup from map */
1476 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1478 BPF_LD_MAP_FD(BPF_REG_1, 0),
1479 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1480 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1481 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1482 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1483 BPF_MOV64_IMM(BPF_REG_9, 0),
1484 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1485 BPF_MOV64_IMM(BPF_REG_9, 1),
1487 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1488 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1490 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1491 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1492 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1493 BPF_EXIT_INSN(),
1495 /* subprog 2 */
1496 /* if arg2 == 1 do *arg1 = 0 */
1497 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1498 /* fetch map_value_ptr from the stack of this function */
1499 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1500 /* write into map value */
1501 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1503 /* if arg4 == 1 do *arg3 = 0 */
1504 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1505 /* fetch map_value_ptr from the stack of this function */
1506 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1507 /* write into map value */
1508 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1509 BPF_EXIT_INSN(),
1511 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1512 .fixup_map_hash_8b = { 12, 22 },
1513 .result = ACCEPT,
1516 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1517 .insns = {
1518 /* main prog */
1519 /* pass fp-16, fp-8 into a function */
1520 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1522 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1524 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1525 BPF_MOV64_IMM(BPF_REG_0, 0),
1526 BPF_EXIT_INSN(),
1528 /* subprog 1 */
1529 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1530 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1531 /* 1st lookup from map */
1532 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1533 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1535 BPF_LD_MAP_FD(BPF_REG_1, 0),
1536 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1537 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1538 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1539 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1540 BPF_MOV64_IMM(BPF_REG_8, 0),
1541 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1542 BPF_MOV64_IMM(BPF_REG_8, 1),
1544 /* 2nd lookup from map */
1545 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1547 BPF_LD_MAP_FD(BPF_REG_1, 0),
1548 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1549 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1550 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1551 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1552 BPF_MOV64_IMM(BPF_REG_9, 0),
1553 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1554 BPF_MOV64_IMM(BPF_REG_9, 1),
1556 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1557 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1558 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1559 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1560 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1561 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1562 BPF_EXIT_INSN(),
1564 /* subprog 2 */
1565 /* if arg2 == 1 do *arg1 = 0 */
1566 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1567 /* fetch map_value_ptr from the stack of this function */
1568 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1569 /* write into map value */
1570 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1572 /* if arg4 == 0 do *arg3 = 0 */
1573 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1574 /* fetch map_value_ptr from the stack of this function */
1575 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1576 /* write into map value */
1577 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1578 BPF_EXIT_INSN(),
1580 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1581 .fixup_map_hash_8b = { 12, 22 },
1582 .result = REJECT,
1583 .errstr = "R0 invalid mem access 'inv'",
1586 "calls: pkt_ptr spill into caller stack",
1587 .insns = {
1588 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1591 BPF_EXIT_INSN(),
1593 /* subprog 1 */
1594 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1595 offsetof(struct __sk_buff, data)),
1596 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1597 offsetof(struct __sk_buff, data_end)),
1598 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1600 /* spill unchecked pkt_ptr into stack of caller */
1601 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1602 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1603 /* now the pkt range is verified, read pkt_ptr from stack */
1604 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1605 /* write 4 bytes into packet */
1606 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1607 BPF_EXIT_INSN(),
1609 .result = ACCEPT,
1610 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1611 .retval = POINTER_VALUE,
1612 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1615 "calls: pkt_ptr spill into caller stack 2",
1616 .insns = {
1617 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1620 /* Marking is still kept, but not in all cases safe. */
1621 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1622 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1623 BPF_EXIT_INSN(),
1625 /* subprog 1 */
1626 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1627 offsetof(struct __sk_buff, data)),
1628 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1629 offsetof(struct __sk_buff, data_end)),
1630 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1632 /* spill unchecked pkt_ptr into stack of caller */
1633 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1634 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1635 /* now the pkt range is verified, read pkt_ptr from stack */
1636 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1637 /* write 4 bytes into packet */
1638 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1639 BPF_EXIT_INSN(),
1641 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1642 .errstr = "invalid access to packet",
1643 .result = REJECT,
1644 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1647 "calls: pkt_ptr spill into caller stack 3",
1648 .insns = {
1649 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1651 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1652 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1653 /* Marking is still kept and safe here. */
1654 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1655 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1656 BPF_EXIT_INSN(),
1658 /* subprog 1 */
1659 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1660 offsetof(struct __sk_buff, data)),
1661 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1662 offsetof(struct __sk_buff, data_end)),
1663 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1665 /* spill unchecked pkt_ptr into stack of caller */
1666 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1667 BPF_MOV64_IMM(BPF_REG_5, 0),
1668 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1669 BPF_MOV64_IMM(BPF_REG_5, 1),
1670 /* now the pkt range is verified, read pkt_ptr from stack */
1671 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1672 /* write 4 bytes into packet */
1673 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1674 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1675 BPF_EXIT_INSN(),
1677 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1678 .result = ACCEPT,
1679 .retval = 1,
1680 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1683 "calls: pkt_ptr spill into caller stack 4",
1684 .insns = {
1685 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1689 /* Check marking propagated. */
1690 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1691 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1692 BPF_EXIT_INSN(),
1694 /* subprog 1 */
1695 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1696 offsetof(struct __sk_buff, data)),
1697 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1698 offsetof(struct __sk_buff, data_end)),
1699 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1701 /* spill unchecked pkt_ptr into stack of caller */
1702 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1703 BPF_MOV64_IMM(BPF_REG_5, 0),
1704 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1705 BPF_MOV64_IMM(BPF_REG_5, 1),
1706 /* don't read back pkt_ptr from stack here */
1707 /* write 4 bytes into packet */
1708 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1709 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1710 BPF_EXIT_INSN(),
1712 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1713 .result = ACCEPT,
1714 .retval = 1,
1715 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1718 "calls: pkt_ptr spill into caller stack 5",
1719 .insns = {
1720 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1722 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
1723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1724 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1725 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1726 BPF_EXIT_INSN(),
1728 /* subprog 1 */
1729 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1730 offsetof(struct __sk_buff, data)),
1731 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1732 offsetof(struct __sk_buff, data_end)),
1733 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1735 BPF_MOV64_IMM(BPF_REG_5, 0),
1736 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1737 /* spill checked pkt_ptr into stack of caller */
1738 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1739 BPF_MOV64_IMM(BPF_REG_5, 1),
1740 /* don't read back pkt_ptr from stack here */
1741 /* write 4 bytes into packet */
1742 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1743 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1744 BPF_EXIT_INSN(),
1746 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1747 .errstr = "same insn cannot be used with different",
1748 .result = REJECT,
1749 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1752 "calls: pkt_ptr spill into caller stack 6",
1753 .insns = {
1754 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1755 offsetof(struct __sk_buff, data_end)),
1756 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1758 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1759 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1760 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1761 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1762 BPF_EXIT_INSN(),
1764 /* subprog 1 */
1765 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1766 offsetof(struct __sk_buff, data)),
1767 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1768 offsetof(struct __sk_buff, data_end)),
1769 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1771 BPF_MOV64_IMM(BPF_REG_5, 0),
1772 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1773 /* spill checked pkt_ptr into stack of caller */
1774 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1775 BPF_MOV64_IMM(BPF_REG_5, 1),
1776 /* don't read back pkt_ptr from stack here */
1777 /* write 4 bytes into packet */
1778 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1779 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1780 BPF_EXIT_INSN(),
1782 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1783 .errstr = "R4 invalid mem access",
1784 .result = REJECT,
1785 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1788 "calls: pkt_ptr spill into caller stack 7",
1789 .insns = {
1790 BPF_MOV64_IMM(BPF_REG_2, 0),
1791 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1792 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1793 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1794 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1795 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1796 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1797 BPF_EXIT_INSN(),
1799 /* subprog 1 */
1800 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1801 offsetof(struct __sk_buff, data)),
1802 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1803 offsetof(struct __sk_buff, data_end)),
1804 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1806 BPF_MOV64_IMM(BPF_REG_5, 0),
1807 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1808 /* spill checked pkt_ptr into stack of caller */
1809 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1810 BPF_MOV64_IMM(BPF_REG_5, 1),
1811 /* don't read back pkt_ptr from stack here */
1812 /* write 4 bytes into packet */
1813 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1814 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1815 BPF_EXIT_INSN(),
1817 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1818 .errstr = "R4 invalid mem access",
1819 .result = REJECT,
1820 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1823 "calls: pkt_ptr spill into caller stack 8",
1824 .insns = {
1825 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1826 offsetof(struct __sk_buff, data)),
1827 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1828 offsetof(struct __sk_buff, data_end)),
1829 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1831 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1832 BPF_EXIT_INSN(),
1833 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1834 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1835 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1836 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1837 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1838 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1839 BPF_EXIT_INSN(),
1841 /* subprog 1 */
1842 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1843 offsetof(struct __sk_buff, data)),
1844 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1845 offsetof(struct __sk_buff, data_end)),
1846 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1848 BPF_MOV64_IMM(BPF_REG_5, 0),
1849 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1850 /* spill checked pkt_ptr into stack of caller */
1851 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1852 BPF_MOV64_IMM(BPF_REG_5, 1),
1853 /* don't read back pkt_ptr from stack here */
1854 /* write 4 bytes into packet */
1855 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1856 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1857 BPF_EXIT_INSN(),
1859 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1860 .result = ACCEPT,
1861 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1864 "calls: pkt_ptr spill into caller stack 9",
1865 .insns = {
1866 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1867 offsetof(struct __sk_buff, data)),
1868 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1869 offsetof(struct __sk_buff, data_end)),
1870 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1872 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1873 BPF_EXIT_INSN(),
1874 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1876 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1877 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1878 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1879 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1880 BPF_EXIT_INSN(),
1882 /* subprog 1 */
1883 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1884 offsetof(struct __sk_buff, data)),
1885 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1886 offsetof(struct __sk_buff, data_end)),
1887 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1889 BPF_MOV64_IMM(BPF_REG_5, 0),
1890 /* spill unchecked pkt_ptr into stack of caller */
1891 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1892 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1893 BPF_MOV64_IMM(BPF_REG_5, 1),
1894 /* don't read back pkt_ptr from stack here */
1895 /* write 4 bytes into packet */
1896 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1897 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1898 BPF_EXIT_INSN(),
1900 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1901 .errstr = "invalid access to packet",
1902 .result = REJECT,
1903 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1906 "calls: caller stack init to zero or map_value_or_null",
1907 .insns = {
1908 BPF_MOV64_IMM(BPF_REG_0, 0),
1909 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
1910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1912 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1913 /* fetch map_value_or_null or const_zero from stack */
1914 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1915 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1916 /* store into map_value */
1917 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
1918 BPF_EXIT_INSN(),
1920 /* subprog 1 */
1921 /* if (ctx == 0) return; */
1922 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
1923 /* else bpf_map_lookup() and *(fp - 8) = r0 */
1924 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
1925 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1927 BPF_LD_MAP_FD(BPF_REG_1, 0),
1928 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1929 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1930 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1931 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1932 BPF_EXIT_INSN(),
1934 .fixup_map_hash_8b = { 13 },
1935 .result = ACCEPT,
1936 .prog_type = BPF_PROG_TYPE_XDP,
1939 "calls: stack init to zero and pruning",
1940 .insns = {
1941 /* first make allocated_stack 16 byte */
1942 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
1943 /* now fork the execution such that the false branch
1944 * of JGT insn will be verified second and it skisp zero
1945 * init of fp-8 stack slot. If stack liveness marking
1946 * is missing live_read marks from call map_lookup
1947 * processing then pruning will incorrectly assume
1948 * that fp-8 stack slot was unused in the fall-through
1949 * branch and will accept the program incorrectly
1951 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
1952 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1953 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1954 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1956 BPF_LD_MAP_FD(BPF_REG_1, 0),
1957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1958 BPF_EXIT_INSN(),
1960 .fixup_map_hash_48b = { 6 },
1961 .errstr = "invalid indirect read from stack off -8+0 size 8",
1962 .result = REJECT,
1963 .prog_type = BPF_PROG_TYPE_XDP,
1966 "calls: ctx read at start of subprog",
1967 .insns = {
1968 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1969 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1970 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
1971 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1972 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1973 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1974 BPF_EXIT_INSN(),
1975 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1976 BPF_MOV64_IMM(BPF_REG_0, 0),
1977 BPF_EXIT_INSN(),
1979 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1980 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
1981 .result_unpriv = REJECT,
1982 .result = ACCEPT,
1985 "calls: cross frame pruning",
1986 .insns = {
1987 /* r8 = !!random();
1988 * call pruner()
1989 * if (r8)
1990 * do something bad;
1992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1993 BPF_MOV64_IMM(BPF_REG_8, 0),
1994 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1995 BPF_MOV64_IMM(BPF_REG_8, 1),
1996 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
1997 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1998 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
1999 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2000 BPF_MOV64_IMM(BPF_REG_0, 0),
2001 BPF_EXIT_INSN(),
2002 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2003 BPF_EXIT_INSN(),
2005 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2006 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
2007 .errstr = "!read_ok",
2008 .result = REJECT,
2011 "calls: cross frame pruning - liveness propagation",
2012 .insns = {
2013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2014 BPF_MOV64_IMM(BPF_REG_8, 0),
2015 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2016 BPF_MOV64_IMM(BPF_REG_8, 1),
2017 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2018 BPF_MOV64_IMM(BPF_REG_9, 0),
2019 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2020 BPF_MOV64_IMM(BPF_REG_9, 1),
2021 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2022 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2023 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2024 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2025 BPF_MOV64_IMM(BPF_REG_0, 0),
2026 BPF_EXIT_INSN(),
2027 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2028 BPF_EXIT_INSN(),
2030 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2031 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
2032 .errstr = "!read_ok",
2033 .result = REJECT,