2 #include <linux/types.h>
12 #include <sys/resource.h>
14 #include <linux/unistd.h>
15 #include <linux/filter.h>
16 #include <linux/bpf_perf_event.h>
17 #include <linux/bpf.h>
21 #include "../../../include/linux/filter.h"
24 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
28 #define MAX_MATCHES 16
30 struct bpf_reg_match
{
35 struct bpf_align_test
{
37 struct bpf_insn insns
[MAX_INSNS
];
43 enum bpf_prog_type prog_type
;
44 /* Matches must be in order of increasing line */
45 struct bpf_reg_match matches
[MAX_MATCHES
];
48 static struct bpf_align_test tests
[] = {
49 /* Four tests of known constants. These aren't staggeringly
50 * interesting since we track exact values now.
55 BPF_MOV64_IMM(BPF_REG_3
, 2),
56 BPF_MOV64_IMM(BPF_REG_3
, 4),
57 BPF_MOV64_IMM(BPF_REG_3
, 8),
58 BPF_MOV64_IMM(BPF_REG_3
, 16),
59 BPF_MOV64_IMM(BPF_REG_3
, 32),
60 BPF_MOV64_IMM(BPF_REG_0
, 0),
63 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
65 {1, "R1=ctx(id=0,off=0,imm=0)"},
77 BPF_MOV64_IMM(BPF_REG_3
, 1),
78 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
79 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
80 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
81 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
82 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_3
, 4),
83 BPF_MOV64_IMM(BPF_REG_4
, 32),
84 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
85 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
86 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
87 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
88 BPF_MOV64_IMM(BPF_REG_0
, 0),
91 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
93 {1, "R1=ctx(id=0,off=0,imm=0)"},
111 BPF_MOV64_IMM(BPF_REG_3
, 4),
112 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 4),
113 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 2),
114 BPF_MOV64_IMM(BPF_REG_4
, 8),
115 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
116 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 2),
117 BPF_MOV64_IMM(BPF_REG_0
, 0),
120 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
122 {1, "R1=ctx(id=0,off=0,imm=0)"},
135 BPF_MOV64_IMM(BPF_REG_3
, 7),
136 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_3
, 1),
137 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_3
, 2),
138 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_3
, 4),
139 BPF_MOV64_IMM(BPF_REG_0
, 0),
142 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
144 {1, "R1=ctx(id=0,off=0,imm=0)"},
153 /* Tests using unknown values */
154 #define PREP_PKT_POINTERS \
155 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
156 offsetof(struct __sk_buff, data)), \
157 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
158 offsetof(struct __sk_buff, data_end))
160 #define LOAD_UNKNOWN(DST_REG) \
162 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
164 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
166 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
169 .descr
= "unknown shift",
171 LOAD_UNKNOWN(BPF_REG_3
),
172 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
173 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
174 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
175 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_3
, 1),
176 LOAD_UNKNOWN(BPF_REG_4
),
177 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_4
, 5),
178 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
179 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
180 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
181 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 1),
182 BPF_MOV64_IMM(BPF_REG_0
, 0),
185 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
187 {7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
188 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
189 {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
190 {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
191 {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
192 {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
193 {18, "R3=pkt_end(id=0,off=0,imm=0)"},
194 {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
195 {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
196 {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
197 {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
198 {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
199 {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
203 .descr
= "unknown mul",
205 LOAD_UNKNOWN(BPF_REG_3
),
206 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_3
),
207 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 1),
208 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_3
),
209 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 2),
210 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_3
),
211 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 4),
212 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_3
),
213 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 8),
214 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 2),
215 BPF_MOV64_IMM(BPF_REG_0
, 0),
218 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
220 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
221 {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
222 {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
223 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
224 {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
225 {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
226 {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
227 {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
228 {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
229 {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
233 .descr
= "packet const offset",
236 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
238 BPF_MOV64_IMM(BPF_REG_0
, 0),
240 /* Skip over ethernet header. */
241 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
242 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
243 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
244 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
247 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_5
, 0),
248 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_5
, 1),
249 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_5
, 2),
250 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_5
, 3),
251 BPF_LDX_MEM(BPF_H
, BPF_REG_4
, BPF_REG_5
, 0),
252 BPF_LDX_MEM(BPF_H
, BPF_REG_4
, BPF_REG_5
, 2),
253 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_5
, 0),
255 BPF_MOV64_IMM(BPF_REG_0
, 0),
258 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
260 {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
261 {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
262 {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
263 {10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
264 {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
265 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
266 {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
267 {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
271 .descr
= "packet variable offset",
273 LOAD_UNKNOWN(BPF_REG_6
),
274 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
276 /* First, add a constant to the R5 packet pointer,
277 * then a variable with a known alignment.
279 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
280 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
281 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
282 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
283 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
284 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
286 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_5
, 0),
288 /* Now, test in the other direction. Adding first
289 * the variable offset to R5, then the constant.
291 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
292 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
293 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
294 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
295 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
296 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
298 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_5
, 0),
300 /* Test multiple accumulations of unknown values
301 * into a packet pointer.
303 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
304 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
305 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
306 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 4),
307 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
308 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
309 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
310 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
312 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_5
, 0),
314 BPF_MOV64_IMM(BPF_REG_0
, 0),
317 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
319 /* Calculated offset in R6 has unknown value, but known
322 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
323 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
324 /* Offset is added to packet pointer R5, resulting in
325 * known fixed offset, and variable offset from R6.
327 {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
328 /* At the time the word size load is performed from R5,
329 * it's total offset is NET_IP_ALIGN + reg->off (0) +
330 * reg->aux_off (14) which is 16. Then the variable
331 * offset is considered using reg->aux_off_align which
332 * is 4 and meets the load's requirements.
334 {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
335 {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
336 /* Variable offset is added to R5 packet pointer,
337 * resulting in auxiliary alignment of 4.
339 {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
340 /* Constant offset is added to R5, resulting in
343 {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
344 /* At the time the word size load is performed from R5,
345 * its total fixed offset is NET_IP_ALIGN + reg->off
346 * (14) which is 16. Then the variable offset is 4-byte
347 * aligned, so the total offset is 4-byte aligned and
348 * meets the load's requirements.
350 {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
351 {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
352 /* Constant offset is added to R5 packet pointer,
353 * resulting in reg->off value of 14.
355 {26, "R5_w=pkt(id=0,off=14,r=8"},
356 /* Variable offset is added to R5, resulting in a
357 * variable offset of (4n).
359 {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
360 /* Constant is added to R5 again, setting reg->off to 18. */
361 {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
362 /* And once more we add a variable; resulting var_off
363 * is still (4n), fixed offset is not changed.
364 * Also, we create a new reg->id.
366 {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
367 /* At the time the word size load is performed from R5,
368 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
369 * which is 20. Then the variable offset is (4n), so
370 * the total offset is 4-byte aligned and meets the
371 * load's requirements.
373 {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
374 {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
378 .descr
= "packet variable offset 2",
380 /* Create an unknown offset, (4n+2)-aligned */
381 LOAD_UNKNOWN(BPF_REG_6
),
382 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
383 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 14),
384 /* Add it to the packet pointer */
385 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
386 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
387 /* Check bounds and perform a read */
388 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
389 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
390 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
392 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_5
, 0),
393 /* Make a (4n) offset from the value we just read */
394 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 0xff),
395 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
396 /* Add it to the packet pointer */
397 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
398 /* Check bounds and perform a read */
399 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
400 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
401 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
403 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_5
, 0),
404 BPF_MOV64_IMM(BPF_REG_0
, 0),
407 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
409 /* Calculated offset in R6 has unknown value, but known
412 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
413 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
414 /* Adding 14 makes R6 be (4n+2) */
415 {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
416 /* Packet pointer has (4n+2) offset */
417 {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
418 {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
419 /* At the time the word size load is performed from R5,
420 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
421 * which is 2. Then the variable offset is (4n+2), so
422 * the total offset is 4-byte aligned and meets the
423 * load's requirements.
425 {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
426 /* Newly read value in R6 was shifted left by 2, so has
427 * known alignment of 4.
429 {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
430 /* Added (4n) to packet pointer's (4n+2) var_off, giving
433 {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
434 {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
435 /* At the time the word size load is performed from R5,
436 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
437 * which is 2. Then the variable offset is (4n+2), so
438 * the total offset is 4-byte aligned and meets the
439 * load's requirements.
441 {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
445 .descr
= "dubious pointer arithmetic",
448 BPF_MOV64_IMM(BPF_REG_0
, 0),
449 /* (ptr - ptr) << 2 */
450 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
451 BPF_ALU64_REG(BPF_SUB
, BPF_REG_5
, BPF_REG_2
),
452 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_5
, 2),
453 /* We have a (4n) value. Let's make a packet offset
454 * out of it. First add 14, to make it a (4n+2)
456 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
457 /* Then make sure it's nonnegative */
458 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_5
, 0, 1),
460 /* Add it to packet pointer */
461 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
462 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
463 /* Check bounds and perform a read */
464 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_6
),
465 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
466 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
468 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_6
, 0),
471 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
474 {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
475 /* (ptr - ptr) << 2 == unknown, (4n) */
476 {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
477 /* (4n) + 14 == (4n+2). We blow our bounds, because
478 * the add could overflow.
480 {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
482 {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
483 /* packet pointer + nonnegative (4n+2) */
484 {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
485 {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
486 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
487 * We checked the bounds, but it might have been able
488 * to overflow if the packet pointer started in the
489 * upper half of the address space.
490 * So we did not get a 'range' on R6, and the access
493 {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
497 .descr
= "variable subtraction",
499 /* Create an unknown offset, (4n+2)-aligned */
500 LOAD_UNKNOWN(BPF_REG_6
),
501 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_6
),
502 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
503 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 14),
504 /* Create another unknown, (4n)-aligned, and subtract
505 * it from the first one
507 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_7
, 2),
508 BPF_ALU64_REG(BPF_SUB
, BPF_REG_6
, BPF_REG_7
),
509 /* Bounds-check the result */
510 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_6
, 0, 1),
512 /* Add it to the packet pointer */
513 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
514 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_6
),
515 /* Check bounds and perform a read */
516 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
517 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
518 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
520 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_5
, 0),
523 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
525 /* Calculated offset in R6 has unknown value, but known
528 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
529 {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
530 /* Adding 14 makes R6 be (4n+2) */
531 {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
532 /* New unknown value in R7 is (4n) */
533 {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
534 /* Subtracting it from R6 blows our unsigned bounds */
535 {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
537 {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
538 /* At the time the word size load is performed from R5,
539 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
540 * which is 2. Then the variable offset is (4n+2), so
541 * the total offset is 4-byte aligned and meets the
542 * load's requirements.
544 {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
548 .descr
= "pointer variable subtraction",
550 /* Create an unknown offset, (4n+2)-aligned and bounded
553 LOAD_UNKNOWN(BPF_REG_6
),
554 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_6
),
555 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 0xf),
556 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_6
, 2),
557 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 14),
558 /* Subtract it from the packet pointer */
559 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
560 BPF_ALU64_REG(BPF_SUB
, BPF_REG_5
, BPF_REG_6
),
561 /* Create another unknown, (4n)-aligned and >= 74.
562 * That in fact means >= 76, since 74 % 4 == 2
564 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_7
, 2),
565 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, 76),
566 /* Add it to the packet pointer */
567 BPF_ALU64_REG(BPF_ADD
, BPF_REG_5
, BPF_REG_7
),
568 /* Check bounds and perform a read */
569 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_5
),
570 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
571 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_4
, 1),
573 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_5
, 0),
576 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
578 /* Calculated offset in R6 has unknown value, but known
581 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
582 {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
583 /* Adding 14 makes R6 be (4n+2) */
584 {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
585 /* Subtracting from packet pointer overflows ubounds */
586 {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
587 /* New unknown value in R7 is (4n), >= 76 */
588 {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
589 /* Adding it to packet pointer gives nice bounds again */
590 {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
591 /* At the time the word size load is performed from R5,
592 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
593 * which is 2. Then the variable offset is (4n+2), so
594 * the total offset is 4-byte aligned and meets the
595 * load's requirements.
597 {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
602 static int probe_filter_length(const struct bpf_insn
*fp
)
606 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
607 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
612 static char bpf_vlog
[32768];
614 static int do_test_single(struct bpf_align_test
*test
)
616 struct bpf_insn
*prog
= test
->insns
;
617 int prog_type
= test
->prog_type
;
618 char bpf_vlog_copy
[32768];
619 const char *line_ptr
;
625 prog_len
= probe_filter_length(prog
);
626 fd_prog
= bpf_verify_program(prog_type
? : BPF_PROG_TYPE_SOCKET_FILTER
,
627 prog
, prog_len
, 1, "GPL", 0,
628 bpf_vlog
, sizeof(bpf_vlog
), 2);
629 if (fd_prog
< 0 && test
->result
!= REJECT
) {
630 printf("Failed to load program.\n");
631 printf("%s", bpf_vlog
);
633 } else if (fd_prog
>= 0 && test
->result
== REJECT
) {
634 printf("Unexpected success to load!\n");
635 printf("%s", bpf_vlog
);
640 /* We make a local copy so that we can strtok() it */
641 strncpy(bpf_vlog_copy
, bpf_vlog
, sizeof(bpf_vlog_copy
));
642 line_ptr
= strtok(bpf_vlog_copy
, "\n");
643 for (i
= 0; i
< MAX_MATCHES
; i
++) {
644 struct bpf_reg_match m
= test
->matches
[i
];
650 sscanf(line_ptr
, "%u: ", &cur_line
);
651 if (cur_line
== m
.line
)
653 line_ptr
= strtok(NULL
, "\n");
656 printf("Failed to find line %u for match: %s\n",
659 printf("%s", bpf_vlog
);
662 if (!strstr(line_ptr
, m
.match
)) {
663 printf("Failed to find match %u: %s\n",
666 printf("%s", bpf_vlog
);
676 static int do_test(unsigned int from
, unsigned int to
)
682 for (i
= from
; i
< to
; i
++) {
683 struct bpf_align_test
*test
= &tests
[i
];
686 printf("Test %3d: %s ... ",
688 fail
= do_test_single(test
);
697 printf("Results: %d pass %d fail\n",
699 return all_fail
? EXIT_FAILURE
: EXIT_SUCCESS
;
702 int main(int argc
, char **argv
)
704 unsigned int from
= 0, to
= ARRAY_SIZE(tests
);
705 struct rlimit rinf
= { RLIM_INFINITY
, RLIM_INFINITY
};
707 setrlimit(RLIMIT_MEMLOCK
, &rinf
);
710 unsigned int l
= atoi(argv
[argc
- 2]);
711 unsigned int u
= atoi(argv
[argc
- 1]);
713 if (l
< to
&& u
< to
) {
717 } else if (argc
== 2) {
718 unsigned int t
= atoi(argv
[argc
- 1]);
725 return do_test(from
, to
);