1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
12 struct bpf_align_test
{
14 struct bpf_insn insns
[MAX_INSNS
];
20 enum bpf_prog_type prog_type
;
21 /* Matches must be in order of increasing line */
22 struct bpf_reg_match matches
[MAX_MATCHES
];
25 static struct bpf_align_test tests
[] = {
26 /* Four tests of known constants. These aren't staggeringly
27 * interesting since we track exact values now.
32 BPF_MOV64_IMM(BPF_REG_3
, 2),
33 BPF_MOV64_IMM(BPF_REG_3
, 4),
34 BPF_MOV64_IMM(BPF_REG_3
, 8),
35 BPF_MOV64_IMM(BPF_REG_3
, 16),
36 BPF_MOV64_IMM(BPF_REG_3
, 32),
37 BPF_MOV64_IMM(BPF_REG_0
, 0),
40 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
42 {1, "R1=ctx(id=0,off=0,imm=0)"},
54 BPF_MOV64_IMM(BPF_REG_3
, 1),
55 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
56 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
57 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
58 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
59 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_3
, 4),
60 BPF_MOV64_IMM(BPF_REG_4
, 32),
61 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
62 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
63 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
64 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
65 BPF_MOV64_IMM(BPF_REG_0
, 0),
68 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
70 {1, "R1=ctx(id=0,off=0,imm=0)"},
88 BPF_MOV64_IMM(BPF_REG_3
, 4),
89 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 4),
90 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 2),
91 BPF_MOV64_IMM(BPF_REG_4
, 8),
92 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
93 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 2),
94 BPF_MOV64_IMM(BPF_REG_0
, 0),
97 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
99 {1, "R1=ctx(id=0,off=0,imm=0)"},
112 BPF_MOV64_IMM(BPF_REG_3
, 7),
113 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_3
, 1),
114 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_3
, 2),
115 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_3
, 4),
116 BPF_MOV64_IMM(BPF_REG_0
, 0),
119 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
121 {1, "R1=ctx(id=0,off=0,imm=0)"},
130 /* Tests using unknown values */
131 #define PREP_PKT_POINTERS \
132 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
133 offsetof(struct __sk_buff, data)), \
134 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
135 offsetof(struct __sk_buff, data_end))
137 #define LOAD_UNKNOWN(DST_REG) \
139 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
141 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
143 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
146 .descr
= "unknown shift",
148 LOAD_UNKNOWN(BPF_REG_3
),
149 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
150 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
151 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
152 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
153 LOAD_UNKNOWN(BPF_REG_4
),
154 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_4
, 5),
155 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
156 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
157 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
158 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
159 BPF_MOV64_IMM(BPF_REG_0
, 0),
162 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
164 {7, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
165 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
166 {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
167 {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
168 {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
169 {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
170 {18, "R3=pkt_end(id=0,off=0,imm=0)"},
171 {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
172 {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
173 {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
174 {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
175 {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
176 {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
180 .descr
= "unknown mul",
182 LOAD_UNKNOWN(BPF_REG_3
),
183 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_3
),
184 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 1),
185 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_3
),
186 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 2),
187 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_3
),
188 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 4),
189 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_3
),
190 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 8),
191 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 2),
192 BPF_MOV64_IMM(BPF_REG_0
, 0),
195 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
197 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
198 {8, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
199 {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
200 {10, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
201 {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
202 {12, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
203 {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
204 {14, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
205 {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
206 {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
210 .descr
= "packet const offset",
213 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
215 BPF_MOV64_IMM(BPF_REG_0
, 0),
217 /* Skip over ethernet header. */
218 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
219 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
220 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
221 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
224 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_5
, 0),
225 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_5
, 1),
226 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_5
, 2),
227 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_5
, 3),
228 BPF_LDX_MEM(BPF_H
, BPF_REG_4
, BPF_REG_5
, 0),
229 BPF_LDX_MEM(BPF_H
, BPF_REG_4
, BPF_REG_5
, 2),
230 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_5
, 0),
232 BPF_MOV64_IMM(BPF_REG_0
, 0),
235 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
237 {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
238 {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
239 {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
240 {10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
241 {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
242 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
243 {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
244 {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
248 .descr
= "packet variable offset",
250 LOAD_UNKNOWN(BPF_REG_6
),
251 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
253 /* First, add a constant to the R5 packet pointer,
254 * then a variable with a known alignment.
256 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
257 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
258 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
259 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
260 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
261 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
263 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_5
, 0),
265 /* Now, test in the other direction. Adding first
266 * the variable offset to R5, then the constant.
268 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
269 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
270 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
271 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
272 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
273 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
275 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_5
, 0),
277 /* Test multiple accumulations of unknown values
278 * into a packet pointer.
280 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
281 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
282 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
283 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 4),
284 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
285 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
286 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
287 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
289 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_5
, 0),
291 BPF_MOV64_IMM(BPF_REG_0
, 0),
294 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
296 /* Calculated offset in R6 has unknown value, but known
299 {8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
300 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
301 /* Offset is added to packet pointer R5, resulting in
302 * known fixed offset, and variable offset from R6.
304 {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
305 /* At the time the word size load is performed from R5,
306 * it's total offset is NET_IP_ALIGN + reg->off (0) +
307 * reg->aux_off (14) which is 16. Then the variable
308 * offset is considered using reg->aux_off_align which
309 * is 4 and meets the load's requirements.
311 {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
312 {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
313 /* Variable offset is added to R5 packet pointer,
314 * resulting in auxiliary alignment of 4.
316 {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
317 /* Constant offset is added to R5, resulting in
320 {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
321 /* At the time the word size load is performed from R5,
322 * its total fixed offset is NET_IP_ALIGN + reg->off
323 * (14) which is 16. Then the variable offset is 4-byte
324 * aligned, so the total offset is 4-byte aligned and
325 * meets the load's requirements.
327 {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
328 {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
329 /* Constant offset is added to R5 packet pointer,
330 * resulting in reg->off value of 14.
332 {26, "R5_w=pkt(id=0,off=14,r=8"},
333 /* Variable offset is added to R5, resulting in a
334 * variable offset of (4n).
336 {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
337 /* Constant is added to R5 again, setting reg->off to 18. */
338 {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
339 /* And once more we add a variable; resulting var_off
340 * is still (4n), fixed offset is not changed.
341 * Also, we create a new reg->id.
343 {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
344 /* At the time the word size load is performed from R5,
345 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
346 * which is 20. Then the variable offset is (4n), so
347 * the total offset is 4-byte aligned and meets the
348 * load's requirements.
350 {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
351 {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
355 .descr
= "packet variable offset 2",
357 /* Create an unknown offset, (4n+2)-aligned */
358 LOAD_UNKNOWN(BPF_REG_6
),
359 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
360 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 14),
361 /* Add it to the packet pointer */
362 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
363 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
364 /* Check bounds and perform a read */
365 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
366 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
367 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
369 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_5
, 0),
370 /* Make a (4n) offset from the value we just read */
371 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 0xff),
372 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
373 /* Add it to the packet pointer */
374 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
375 /* Check bounds and perform a read */
376 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
377 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
378 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
380 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_5
, 0),
381 BPF_MOV64_IMM(BPF_REG_0
, 0),
384 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
386 /* Calculated offset in R6 has unknown value, but known
389 {8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
390 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
391 /* Adding 14 makes R6 be (4n+2) */
392 {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
393 /* Packet pointer has (4n+2) offset */
394 {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
395 {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
396 /* At the time the word size load is performed from R5,
397 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
398 * which is 2. Then the variable offset is (4n+2), so
399 * the total offset is 4-byte aligned and meets the
400 * load's requirements.
402 {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
403 /* Newly read value in R6 was shifted left by 2, so has
404 * known alignment of 4.
406 {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
407 /* Added (4n) to packet pointer's (4n+2) var_off, giving
410 {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
411 {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
412 /* At the time the word size load is performed from R5,
413 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
414 * which is 2. Then the variable offset is (4n+2), so
415 * the total offset is 4-byte aligned and meets the
416 * load's requirements.
418 {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
422 .descr
= "dubious pointer arithmetic",
425 BPF_MOV64_IMM(BPF_REG_0
, 0),
426 /* (ptr - ptr) << 2 */
427 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
428 BPF_ALU64_REG(BPF_SUB
, BPF_REG_5
, BPF_REG_2
),
429 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_5
, 2),
430 /* We have a (4n) value. Let's make a packet offset
431 * out of it. First add 14, to make it a (4n+2)
433 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
434 /* Then make sure it's nonnegative */
435 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_5
, 0, 1),
437 /* Add it to packet pointer */
438 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
439 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
440 /* Check bounds and perform a read */
441 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_6
),
442 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
443 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
445 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_6
, 0),
448 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
451 {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
452 /* (ptr - ptr) << 2 == unknown, (4n) */
453 {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
454 /* (4n) + 14 == (4n+2). We blow our bounds, because
455 * the add could overflow.
457 {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
459 {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
460 /* packet pointer + nonnegative (4n+2) */
461 {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
462 {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
463 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
464 * We checked the bounds, but it might have been able
465 * to overflow if the packet pointer started in the
466 * upper half of the address space.
467 * So we did not get a 'range' on R6, and the access
470 {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
474 .descr
= "variable subtraction",
476 /* Create an unknown offset, (4n+2)-aligned */
477 LOAD_UNKNOWN(BPF_REG_6
),
478 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_6
),
479 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
480 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 14),
481 /* Create another unknown, (4n)-aligned, and subtract
482 * it from the first one
484 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_7
, 2),
485 BPF_ALU64_REG(BPF_SUB
, BPF_REG_6
, BPF_REG_7
),
486 /* Bounds-check the result */
487 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_6
, 0, 1),
489 /* Add it to the packet pointer */
490 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
491 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
492 /* Check bounds and perform a read */
493 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
494 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
495 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
497 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_5
, 0),
500 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
502 /* Calculated offset in R6 has unknown value, but known
505 {7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
506 {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
507 /* Adding 14 makes R6 be (4n+2) */
508 {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
509 /* New unknown value in R7 is (4n) */
510 {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
511 /* Subtracting it from R6 blows our unsigned bounds */
512 {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
514 {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
515 /* At the time the word size load is performed from R5,
516 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
517 * which is 2. Then the variable offset is (4n+2), so
518 * the total offset is 4-byte aligned and meets the
519 * load's requirements.
521 {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
526 .descr
= "pointer variable subtraction",
528 /* Create an unknown offset, (4n+2)-aligned and bounded
531 LOAD_UNKNOWN(BPF_REG_6
),
532 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_6
),
533 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 0xf),
534 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
535 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 14),
536 /* Subtract it from the packet pointer */
537 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
538 BPF_ALU64_REG(BPF_SUB
, BPF_REG_5
, BPF_REG_6
),
539 /* Create another unknown, (4n)-aligned and >= 74.
540 * That in fact means >= 76, since 74 % 4 == 2
542 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_7
, 2),
543 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, 76),
544 /* Add it to the packet pointer */
545 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_7
),
546 /* Check bounds and perform a read */
547 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
548 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
549 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
551 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_5
, 0),
554 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
556 /* Calculated offset in R6 has unknown value, but known
559 {7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
560 {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
561 /* Adding 14 makes R6 be (4n+2) */
562 {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
563 /* Subtracting from packet pointer overflows ubounds */
564 {13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
565 /* New unknown value in R7 is (4n), >= 76 */
566 {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
567 /* Adding it to packet pointer gives nice bounds again */
568 {16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
569 /* At the time the word size load is performed from R5,
570 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
571 * which is 2. Then the variable offset is (4n+2), so
572 * the total offset is 4-byte aligned and meets the
573 * load's requirements.
575 {20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
580 static int probe_filter_length(const struct bpf_insn
*fp
)
584 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
585 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
590 static char bpf_vlog
[32768];
592 static int do_test_single(struct bpf_align_test
*test
)
594 struct bpf_insn
*prog
= test
->insns
;
595 int prog_type
= test
->prog_type
;
596 char bpf_vlog_copy
[32768];
597 const char *line_ptr
;
603 prog_len
= probe_filter_length(prog
);
604 fd_prog
= bpf_verify_program(prog_type
? : BPF_PROG_TYPE_SOCKET_FILTER
,
605 prog
, prog_len
, BPF_F_STRICT_ALIGNMENT
,
606 "GPL", 0, bpf_vlog
, sizeof(bpf_vlog
), 2);
607 if (fd_prog
< 0 && test
->result
!= REJECT
) {
608 printf("Failed to load program.\n");
609 printf("%s", bpf_vlog
);
611 } else if (fd_prog
>= 0 && test
->result
== REJECT
) {
612 printf("Unexpected success to load!\n");
613 printf("%s", bpf_vlog
);
618 /* We make a local copy so that we can strtok() it */
619 strncpy(bpf_vlog_copy
, bpf_vlog
, sizeof(bpf_vlog_copy
));
620 line_ptr
= strtok(bpf_vlog_copy
, "\n");
621 for (i
= 0; i
< MAX_MATCHES
; i
++) {
622 struct bpf_reg_match m
= test
->matches
[i
];
628 sscanf(line_ptr
, "%u: ", &cur_line
);
629 if (cur_line
== m
.line
)
631 line_ptr
= strtok(NULL
, "\n");
634 printf("Failed to find line %u for match: %s\n",
637 printf("%s", bpf_vlog
);
640 if (!strstr(line_ptr
, m
.match
)) {
641 printf("Failed to find match %u: %s\n",
644 printf("%s", bpf_vlog
);
654 void test_align(void)
658 for (i
= 0; i
< ARRAY_SIZE(tests
); i
++) {
659 struct bpf_align_test
*test
= &tests
[i
];
661 if (!test__start_subtest(test
->descr
))
664 CHECK_FAIL(do_test_single(test
));