1 // SPDX-License-Identifier: GPL-2.0-only
3 * Testsuite for eBPF verifier
5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * Copyright (c) 2017 Facebook
7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
11 #include <asm/types.h>
12 #include <linux/types.h>
25 #include <sys/capability.h>
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
31 #include <linux/if_ether.h>
32 #include <linux/btf.h>
35 #include <bpf/libbpf.h>
38 # include "autoconf.h"
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
44 #include "bpf_rlimit.h"
48 #include "../../../include/linux/filter.h"
50 #define MAX_INSNS BPF_MAXINSNS
51 #define MAX_TEST_INSNS 1000000
53 #define MAX_NR_MAPS 18
54 #define MAX_TEST_RUNS 8
55 #define POINTER_VALUE 0xcafe4all
56 #define TEST_DATA_LEN 64
58 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
59 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
61 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
62 static bool unpriv_disabled
= false;
67 struct bpf_insn insns
[MAX_INSNS
];
68 struct bpf_insn
*fill_insns
;
69 int fixup_map_hash_8b
[MAX_FIXUPS
];
70 int fixup_map_hash_48b
[MAX_FIXUPS
];
71 int fixup_map_hash_16b
[MAX_FIXUPS
];
72 int fixup_map_array_48b
[MAX_FIXUPS
];
73 int fixup_map_sockmap
[MAX_FIXUPS
];
74 int fixup_map_sockhash
[MAX_FIXUPS
];
75 int fixup_map_xskmap
[MAX_FIXUPS
];
76 int fixup_map_stacktrace
[MAX_FIXUPS
];
77 int fixup_prog1
[MAX_FIXUPS
];
78 int fixup_prog2
[MAX_FIXUPS
];
79 int fixup_map_in_map
[MAX_FIXUPS
];
80 int fixup_cgroup_storage
[MAX_FIXUPS
];
81 int fixup_percpu_cgroup_storage
[MAX_FIXUPS
];
82 int fixup_map_spin_lock
[MAX_FIXUPS
];
83 int fixup_map_array_ro
[MAX_FIXUPS
];
84 int fixup_map_array_wo
[MAX_FIXUPS
];
85 int fixup_map_array_small
[MAX_FIXUPS
];
86 int fixup_sk_storage_map
[MAX_FIXUPS
];
88 const char *errstr_unpriv
;
89 uint32_t insn_processed
;
95 } result
, result_unpriv
;
96 enum bpf_prog_type prog_type
;
98 void (*fill_helper
)(struct bpf_test
*self
);
100 #define bpf_testdata_struct_t \
102 uint32_t retval, retval_unpriv; \
104 __u8 data[TEST_DATA_LEN]; \
105 __u64 data64[TEST_DATA_LEN / 8]; \
109 bpf_testdata_struct_t
;
110 bpf_testdata_struct_t retvals
[MAX_TEST_RUNS
];
112 enum bpf_attach_type expected_attach_type
;
115 /* Note we want this to be 64 bit aligned so that the end of our array is
116 * actually the end of the structure.
118 #define MAX_ENTRIES 11
122 int foo
[MAX_ENTRIES
];
130 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test
*self
)
132 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
134 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
135 unsigned int len
= (1 << 15) - PUSH_CNT
* 2 * 5 * 6;
136 struct bpf_insn
*insn
= self
->fill_insns
;
139 insn
[i
++] = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
141 for (j
= 0; j
< PUSH_CNT
; j
++) {
142 insn
[i
++] = BPF_LD_ABS(BPF_B
, 0);
143 /* jump to error label */
144 insn
[i
] = BPF_JMP32_IMM(BPF_JNE
, BPF_REG_0
, 0x34, len
- i
- 3);
146 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
147 insn
[i
++] = BPF_MOV64_IMM(BPF_REG_2
, 1);
148 insn
[i
++] = BPF_MOV64_IMM(BPF_REG_3
, 2);
149 insn
[i
++] = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
150 BPF_FUNC_skb_vlan_push
),
151 insn
[i
] = BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, len
- i
- 3);
155 for (j
= 0; j
< PUSH_CNT
; j
++) {
156 insn
[i
++] = BPF_LD_ABS(BPF_B
, 0);
157 insn
[i
] = BPF_JMP32_IMM(BPF_JNE
, BPF_REG_0
, 0x34, len
- i
- 3);
159 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
160 insn
[i
++] = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
161 BPF_FUNC_skb_vlan_pop
),
162 insn
[i
] = BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, len
- i
- 3);
168 for (; i
< len
- 3; i
++)
169 insn
[i
] = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_0
, 0xbef);
170 insn
[len
- 3] = BPF_JMP_A(1);
172 insn
[len
- 2] = BPF_MOV32_IMM(BPF_REG_0
, 0);
173 insn
[len
- 1] = BPF_EXIT_INSN();
174 self
->prog_len
= len
;
177 static void bpf_fill_jump_around_ld_abs(struct bpf_test
*self
)
179 struct bpf_insn
*insn
= self
->fill_insns
;
180 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
181 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
182 * to extend the error value of the inlined ld_abs sequence which then
183 * contains 7 insns. so, set the dividend to 7 so the testcase could
184 * work on all arches.
186 unsigned int len
= (1 << 15) / 7;
189 insn
[i
++] = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
190 insn
[i
++] = BPF_LD_ABS(BPF_B
, 0);
191 insn
[i
] = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 10, len
- i
- 2);
194 insn
[i
++] = BPF_LD_ABS(BPF_B
, 1);
195 insn
[i
] = BPF_EXIT_INSN();
196 self
->prog_len
= i
+ 1;
199 static void bpf_fill_rand_ld_dw(struct bpf_test
*self
)
201 struct bpf_insn
*insn
= self
->fill_insns
;
205 insn
[i
++] = BPF_MOV32_IMM(BPF_REG_0
, 0);
206 while (i
< self
->retval
) {
207 uint64_t val
= bpf_semi_rand_get();
208 struct bpf_insn tmp
[2] = { BPF_LD_IMM64(BPF_REG_1
, val
) };
213 insn
[i
++] = BPF_ALU64_REG(BPF_XOR
, BPF_REG_0
, BPF_REG_1
);
215 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
);
216 insn
[i
++] = BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 32);
217 insn
[i
++] = BPF_ALU64_REG(BPF_XOR
, BPF_REG_0
, BPF_REG_1
);
218 insn
[i
] = BPF_EXIT_INSN();
219 self
->prog_len
= i
+ 1;
221 self
->retval
= (uint32_t)res
;
224 #define MAX_JMP_SEQ 8192
226 /* test the sequence of 8k jumps */
227 static void bpf_fill_scale1(struct bpf_test
*self
)
229 struct bpf_insn
*insn
= self
->fill_insns
;
232 insn
[i
++] = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
233 /* test to check that the long sequence of jumps is acceptable */
234 while (k
++ < MAX_JMP_SEQ
) {
235 insn
[i
++] = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
236 BPF_FUNC_get_prandom_u32
);
237 insn
[i
++] = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, bpf_semi_rand_get(), 2);
238 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
);
239 insn
[i
++] = BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
,
242 /* is_state_visited() doesn't allocate state for pruning for every jump.
243 * Hence multiply jmps by 4 to accommodate that heuristic
245 while (i
< MAX_TEST_INSNS
- MAX_JMP_SEQ
* 4)
246 insn
[i
++] = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_0
, 42);
247 insn
[i
] = BPF_EXIT_INSN();
248 self
->prog_len
= i
+ 1;
252 /* test the sequence of 8k jumps in inner most function (function depth 8)*/
253 static void bpf_fill_scale2(struct bpf_test
*self
)
255 struct bpf_insn
*insn
= self
->fill_insns
;
259 for (k
= 0; k
< FUNC_NEST
; k
++) {
260 insn
[i
++] = BPF_CALL_REL(1);
261 insn
[i
++] = BPF_EXIT_INSN();
263 insn
[i
++] = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
264 /* test to check that the long sequence of jumps is acceptable */
266 while (k
++ < MAX_JMP_SEQ
) {
267 insn
[i
++] = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
268 BPF_FUNC_get_prandom_u32
);
269 insn
[i
++] = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, bpf_semi_rand_get(), 2);
270 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
);
271 insn
[i
++] = BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
,
272 -8 * (k
% (64 - 4 * FUNC_NEST
) + 1));
274 while (i
< MAX_TEST_INSNS
- MAX_JMP_SEQ
* 4)
275 insn
[i
++] = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_0
, 42);
276 insn
[i
] = BPF_EXIT_INSN();
277 self
->prog_len
= i
+ 1;
281 static void bpf_fill_scale(struct bpf_test
*self
)
283 switch (self
->retval
) {
285 return bpf_fill_scale1(self
);
287 return bpf_fill_scale2(self
);
294 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
295 #define BPF_SK_LOOKUP(func) \
296 /* struct bpf_sock_tuple tuple = {} */ \
297 BPF_MOV64_IMM(BPF_REG_2, 0), \
298 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
299 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
300 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
301 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
302 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
304 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
305 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
307 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
308 BPF_MOV64_IMM(BPF_REG_4, 0), \
309 BPF_MOV64_IMM(BPF_REG_5, 0), \
310 BPF_EMIT_CALL(BPF_FUNC_ ## func)
312 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
313 * value into 0 and does necessary preparation for direct packet access
314 * through r2. The allowed access range is 8 bytes.
316 #define BPF_DIRECT_PKT_R2 \
317 BPF_MOV64_IMM(BPF_REG_0, 0), \
318 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
319 offsetof(struct __sk_buff, data)), \
320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
321 offsetof(struct __sk_buff, data_end)), \
322 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
324 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
327 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
328 * positive u32, and zero-extend it into 64-bit.
330 #define BPF_RAND_UEXT_R7 \
331 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
332 BPF_FUNC_get_prandom_u32), \
333 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
334 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
335 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
337 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
338 * negative u32, and sign-extend it into 64-bit.
340 #define BPF_RAND_SEXT_R7 \
341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
342 BPF_FUNC_get_prandom_u32), \
343 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
344 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
345 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
346 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
348 static struct bpf_test tests
[] = {
350 #include <verifier/tests.h>
354 static int probe_filter_length(const struct bpf_insn
*fp
)
358 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
359 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
364 static bool skip_unsupported_map(enum bpf_map_type map_type
)
366 if (!bpf_probe_map_type(map_type
, 0)) {
367 printf("SKIP (unsupported map type %d)\n", map_type
);
374 static int __create_map(uint32_t type
, uint32_t size_key
,
375 uint32_t size_value
, uint32_t max_elem
,
376 uint32_t extra_flags
)
380 fd
= bpf_create_map(type
, size_key
, size_value
, max_elem
,
381 (type
== BPF_MAP_TYPE_HASH
?
382 BPF_F_NO_PREALLOC
: 0) | extra_flags
);
384 if (skip_unsupported_map(type
))
386 printf("Failed to create hash map '%s'!\n", strerror(errno
));
392 static int create_map(uint32_t type
, uint32_t size_key
,
393 uint32_t size_value
, uint32_t max_elem
)
395 return __create_map(type
, size_key
, size_value
, max_elem
, 0);
398 static void update_map(int fd
, int index
)
400 struct test_val value
= {
401 .index
= (6 + 1) * sizeof(int),
402 .foo
[6] = 0xabcdef12,
405 assert(!bpf_map_update_elem(fd
, &index
, &value
, 0));
408 static int create_prog_dummy1(enum bpf_prog_type prog_type
)
410 struct bpf_insn prog
[] = {
411 BPF_MOV64_IMM(BPF_REG_0
, 42),
415 return bpf_load_program(prog_type
, prog
,
416 ARRAY_SIZE(prog
), "GPL", 0, NULL
, 0);
419 static int create_prog_dummy2(enum bpf_prog_type prog_type
, int mfd
, int idx
)
421 struct bpf_insn prog
[] = {
422 BPF_MOV64_IMM(BPF_REG_3
, idx
),
423 BPF_LD_MAP_FD(BPF_REG_2
, mfd
),
424 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
426 BPF_MOV64_IMM(BPF_REG_0
, 41),
430 return bpf_load_program(prog_type
, prog
,
431 ARRAY_SIZE(prog
), "GPL", 0, NULL
, 0);
434 static int create_prog_array(enum bpf_prog_type prog_type
, uint32_t max_elem
,
440 mfd
= bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY
, sizeof(int),
441 sizeof(int), max_elem
, 0);
443 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY
))
445 printf("Failed to create prog array '%s'!\n", strerror(errno
));
449 p1fd
= create_prog_dummy1(prog_type
);
450 p2fd
= create_prog_dummy2(prog_type
, mfd
, p2key
);
451 if (p1fd
< 0 || p2fd
< 0)
453 if (bpf_map_update_elem(mfd
, &p1key
, &p1fd
, BPF_ANY
) < 0)
455 if (bpf_map_update_elem(mfd
, &p2key
, &p2fd
, BPF_ANY
) < 0)
468 static int create_map_in_map(void)
470 int inner_map_fd
, outer_map_fd
;
472 inner_map_fd
= bpf_create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
474 if (inner_map_fd
< 0) {
475 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY
))
477 printf("Failed to create array '%s'!\n", strerror(errno
));
481 outer_map_fd
= bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS
, NULL
,
482 sizeof(int), inner_map_fd
, 1, 0);
483 if (outer_map_fd
< 0) {
484 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS
))
486 printf("Failed to create array of maps '%s'!\n",
495 static int create_cgroup_storage(bool percpu
)
497 enum bpf_map_type type
= percpu
? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
:
498 BPF_MAP_TYPE_CGROUP_STORAGE
;
501 fd
= bpf_create_map(type
, sizeof(struct bpf_cgroup_storage_key
),
502 TEST_DATA_LEN
, 0, 0);
504 if (skip_unsupported_map(type
))
506 printf("Failed to create cgroup storage '%s'!\n",
513 /* struct bpf_spin_lock {
518 * struct bpf_spin_lock l;
521 static const char btf_str_sec
[] = "\0bpf_spin_lock\0val\0cnt\0l";
522 static __u32 btf_raw_types
[] = {
524 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED
, 0, 32, 4), /* [1] */
525 /* struct bpf_spin_lock */ /* [2] */
526 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 1), 4),
527 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
528 /* struct val */ /* [3] */
529 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 2), 8),
530 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
531 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
534 static int load_btf(void)
536 struct btf_header hdr
= {
538 .version
= BTF_VERSION
,
539 .hdr_len
= sizeof(struct btf_header
),
540 .type_len
= sizeof(btf_raw_types
),
541 .str_off
= sizeof(btf_raw_types
),
542 .str_len
= sizeof(btf_str_sec
),
547 ptr
= raw_btf
= malloc(sizeof(hdr
) + sizeof(btf_raw_types
) +
548 sizeof(btf_str_sec
));
550 memcpy(ptr
, &hdr
, sizeof(hdr
));
552 memcpy(ptr
, btf_raw_types
, hdr
.type_len
);
554 memcpy(ptr
, btf_str_sec
, hdr
.str_len
);
557 btf_fd
= bpf_load_btf(raw_btf
, ptr
- raw_btf
, 0, 0, 0);
564 static int create_map_spin_lock(void)
566 struct bpf_create_map_attr attr
= {
568 .map_type
= BPF_MAP_TYPE_ARRAY
,
572 .btf_key_type_id
= 1,
573 .btf_value_type_id
= 3,
580 attr
.btf_fd
= btf_fd
;
581 fd
= bpf_create_map_xattr(&attr
);
583 printf("Failed to create map with spin_lock\n");
587 static int create_sk_storage_map(void)
589 struct bpf_create_map_attr attr
= {
591 .map_type
= BPF_MAP_TYPE_SK_STORAGE
,
595 .map_flags
= BPF_F_NO_PREALLOC
,
596 .btf_key_type_id
= 1,
597 .btf_value_type_id
= 3,
604 attr
.btf_fd
= btf_fd
;
605 fd
= bpf_create_map_xattr(&attr
);
608 printf("Failed to create sk_storage_map\n");
612 static char bpf_vlog
[UINT_MAX
>> 8];
614 static void do_test_fixup(struct bpf_test
*test
, enum bpf_prog_type prog_type
,
615 struct bpf_insn
*prog
, int *map_fds
)
617 int *fixup_map_hash_8b
= test
->fixup_map_hash_8b
;
618 int *fixup_map_hash_48b
= test
->fixup_map_hash_48b
;
619 int *fixup_map_hash_16b
= test
->fixup_map_hash_16b
;
620 int *fixup_map_array_48b
= test
->fixup_map_array_48b
;
621 int *fixup_map_sockmap
= test
->fixup_map_sockmap
;
622 int *fixup_map_sockhash
= test
->fixup_map_sockhash
;
623 int *fixup_map_xskmap
= test
->fixup_map_xskmap
;
624 int *fixup_map_stacktrace
= test
->fixup_map_stacktrace
;
625 int *fixup_prog1
= test
->fixup_prog1
;
626 int *fixup_prog2
= test
->fixup_prog2
;
627 int *fixup_map_in_map
= test
->fixup_map_in_map
;
628 int *fixup_cgroup_storage
= test
->fixup_cgroup_storage
;
629 int *fixup_percpu_cgroup_storage
= test
->fixup_percpu_cgroup_storage
;
630 int *fixup_map_spin_lock
= test
->fixup_map_spin_lock
;
631 int *fixup_map_array_ro
= test
->fixup_map_array_ro
;
632 int *fixup_map_array_wo
= test
->fixup_map_array_wo
;
633 int *fixup_map_array_small
= test
->fixup_map_array_small
;
634 int *fixup_sk_storage_map
= test
->fixup_sk_storage_map
;
636 if (test
->fill_helper
) {
637 test
->fill_insns
= calloc(MAX_TEST_INSNS
, sizeof(struct bpf_insn
));
638 test
->fill_helper(test
);
641 /* Allocating HTs with 1 elem is fine here, since we only test
642 * for verifier and not do a runtime lookup, so the only thing
643 * that really matters is value size in this case.
645 if (*fixup_map_hash_8b
) {
646 map_fds
[0] = create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
647 sizeof(long long), 1);
649 prog
[*fixup_map_hash_8b
].imm
= map_fds
[0];
651 } while (*fixup_map_hash_8b
);
654 if (*fixup_map_hash_48b
) {
655 map_fds
[1] = create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
656 sizeof(struct test_val
), 1);
658 prog
[*fixup_map_hash_48b
].imm
= map_fds
[1];
659 fixup_map_hash_48b
++;
660 } while (*fixup_map_hash_48b
);
663 if (*fixup_map_hash_16b
) {
664 map_fds
[2] = create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
665 sizeof(struct other_val
), 1);
667 prog
[*fixup_map_hash_16b
].imm
= map_fds
[2];
668 fixup_map_hash_16b
++;
669 } while (*fixup_map_hash_16b
);
672 if (*fixup_map_array_48b
) {
673 map_fds
[3] = create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
674 sizeof(struct test_val
), 1);
675 update_map(map_fds
[3], 0);
677 prog
[*fixup_map_array_48b
].imm
= map_fds
[3];
678 fixup_map_array_48b
++;
679 } while (*fixup_map_array_48b
);
683 map_fds
[4] = create_prog_array(prog_type
, 4, 0);
685 prog
[*fixup_prog1
].imm
= map_fds
[4];
687 } while (*fixup_prog1
);
691 map_fds
[5] = create_prog_array(prog_type
, 8, 7);
693 prog
[*fixup_prog2
].imm
= map_fds
[5];
695 } while (*fixup_prog2
);
698 if (*fixup_map_in_map
) {
699 map_fds
[6] = create_map_in_map();
701 prog
[*fixup_map_in_map
].imm
= map_fds
[6];
703 } while (*fixup_map_in_map
);
706 if (*fixup_cgroup_storage
) {
707 map_fds
[7] = create_cgroup_storage(false);
709 prog
[*fixup_cgroup_storage
].imm
= map_fds
[7];
710 fixup_cgroup_storage
++;
711 } while (*fixup_cgroup_storage
);
714 if (*fixup_percpu_cgroup_storage
) {
715 map_fds
[8] = create_cgroup_storage(true);
717 prog
[*fixup_percpu_cgroup_storage
].imm
= map_fds
[8];
718 fixup_percpu_cgroup_storage
++;
719 } while (*fixup_percpu_cgroup_storage
);
721 if (*fixup_map_sockmap
) {
722 map_fds
[9] = create_map(BPF_MAP_TYPE_SOCKMAP
, sizeof(int),
725 prog
[*fixup_map_sockmap
].imm
= map_fds
[9];
727 } while (*fixup_map_sockmap
);
729 if (*fixup_map_sockhash
) {
730 map_fds
[10] = create_map(BPF_MAP_TYPE_SOCKHASH
, sizeof(int),
733 prog
[*fixup_map_sockhash
].imm
= map_fds
[10];
734 fixup_map_sockhash
++;
735 } while (*fixup_map_sockhash
);
737 if (*fixup_map_xskmap
) {
738 map_fds
[11] = create_map(BPF_MAP_TYPE_XSKMAP
, sizeof(int),
741 prog
[*fixup_map_xskmap
].imm
= map_fds
[11];
743 } while (*fixup_map_xskmap
);
745 if (*fixup_map_stacktrace
) {
746 map_fds
[12] = create_map(BPF_MAP_TYPE_STACK_TRACE
, sizeof(u32
),
749 prog
[*fixup_map_stacktrace
].imm
= map_fds
[12];
750 fixup_map_stacktrace
++;
751 } while (*fixup_map_stacktrace
);
753 if (*fixup_map_spin_lock
) {
754 map_fds
[13] = create_map_spin_lock();
756 prog
[*fixup_map_spin_lock
].imm
= map_fds
[13];
757 fixup_map_spin_lock
++;
758 } while (*fixup_map_spin_lock
);
760 if (*fixup_map_array_ro
) {
761 map_fds
[14] = __create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
762 sizeof(struct test_val
), 1,
764 update_map(map_fds
[14], 0);
766 prog
[*fixup_map_array_ro
].imm
= map_fds
[14];
767 fixup_map_array_ro
++;
768 } while (*fixup_map_array_ro
);
770 if (*fixup_map_array_wo
) {
771 map_fds
[15] = __create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
772 sizeof(struct test_val
), 1,
774 update_map(map_fds
[15], 0);
776 prog
[*fixup_map_array_wo
].imm
= map_fds
[15];
777 fixup_map_array_wo
++;
778 } while (*fixup_map_array_wo
);
780 if (*fixup_map_array_small
) {
781 map_fds
[16] = __create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
783 update_map(map_fds
[16], 0);
785 prog
[*fixup_map_array_small
].imm
= map_fds
[16];
786 fixup_map_array_small
++;
787 } while (*fixup_map_array_small
);
789 if (*fixup_sk_storage_map
) {
790 map_fds
[17] = create_sk_storage_map();
792 prog
[*fixup_sk_storage_map
].imm
= map_fds
[17];
793 fixup_sk_storage_map
++;
794 } while (*fixup_sk_storage_map
);
798 static int set_admin(bool admin
)
801 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
804 caps
= cap_get_proc();
806 perror("cap_get_proc");
809 if (cap_set_flag(caps
, CAP_EFFECTIVE
, 1, &cap_val
,
810 admin
? CAP_SET
: CAP_CLEAR
)) {
811 perror("cap_set_flag");
814 if (cap_set_proc(caps
)) {
815 perror("cap_set_proc");
825 static int do_prog_test_run(int fd_prog
, bool unpriv
, uint32_t expected_val
,
826 void *data
, size_t size_data
)
828 __u8 tmp
[TEST_DATA_LEN
<< 2];
829 __u32 size_tmp
= sizeof(tmp
);
835 err
= bpf_prog_test_run(fd_prog
, 1, data
, size_data
,
836 tmp
, &size_tmp
, &retval
, NULL
);
839 if (err
&& errno
!= 524/*ENOTSUPP*/ && errno
!= EPERM
) {
840 printf("Unexpected bpf_prog_test_run error ");
843 if (!err
&& retval
!= expected_val
&&
844 expected_val
!= POINTER_VALUE
) {
845 printf("FAIL retval %d != %d ", retval
, expected_val
);
852 static void do_test_single(struct bpf_test
*test
, bool unpriv
,
853 int *passes
, int *errors
)
855 int fd_prog
, expected_ret
, alignment_prevented_execution
;
856 int prog_len
, prog_type
= test
->prog_type
;
857 struct bpf_insn
*prog
= test
->insns
;
858 struct bpf_load_program_attr attr
;
859 int run_errs
, run_successes
;
860 int map_fds
[MAX_NR_MAPS
];
861 const char *expected_err
;
866 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
870 prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
;
872 do_test_fixup(test
, prog_type
, prog
, map_fds
);
873 if (test
->fill_insns
) {
874 prog
= test
->fill_insns
;
875 prog_len
= test
->prog_len
;
877 prog_len
= probe_filter_length(prog
);
879 /* If there were some map skips during fixup due to missing bpf
880 * features, skip this test.
882 if (fixup_skips
!= skips
)
885 pflags
= BPF_F_TEST_RND_HI32
;
886 if (test
->flags
& F_LOAD_WITH_STRICT_ALIGNMENT
)
887 pflags
|= BPF_F_STRICT_ALIGNMENT
;
888 if (test
->flags
& F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
)
889 pflags
|= BPF_F_ANY_ALIGNMENT
;
891 memset(&attr
, 0, sizeof(attr
));
892 attr
.prog_type
= prog_type
;
893 attr
.expected_attach_type
= test
->expected_attach_type
;
895 attr
.insns_cnt
= prog_len
;
896 attr
.license
= "GPL";
898 attr
.prog_flags
= pflags
;
900 fd_prog
= bpf_load_program_xattr(&attr
, bpf_vlog
, sizeof(bpf_vlog
));
901 if (fd_prog
< 0 && !bpf_probe_prog_type(prog_type
, 0)) {
902 printf("SKIP (unsupported program type %d)\n", prog_type
);
907 expected_ret
= unpriv
&& test
->result_unpriv
!= UNDEF
?
908 test
->result_unpriv
: test
->result
;
909 expected_err
= unpriv
&& test
->errstr_unpriv
?
910 test
->errstr_unpriv
: test
->errstr
;
912 alignment_prevented_execution
= 0;
914 if (expected_ret
== ACCEPT
) {
916 printf("FAIL\nFailed to load prog '%s'!\n",
920 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
922 (test
->flags
& F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
))
923 alignment_prevented_execution
= 1;
927 printf("FAIL\nUnexpected success to load!\n");
930 if (!expected_err
|| !strstr(bpf_vlog
, expected_err
)) {
931 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
932 expected_err
, bpf_vlog
);
937 if (test
->insn_processed
) {
938 uint32_t insn_processed
;
941 proc
= strstr(bpf_vlog
, "processed ");
942 insn_processed
= atoi(proc
+ 10);
943 if (test
->insn_processed
!= insn_processed
) {
944 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
945 insn_processed
, test
->insn_processed
);
952 if (!alignment_prevented_execution
&& fd_prog
>= 0) {
953 uint32_t expected_val
;
959 for (i
= 0; i
< test
->runs
; i
++) {
960 if (unpriv
&& test
->retvals
[i
].retval_unpriv
)
961 expected_val
= test
->retvals
[i
].retval_unpriv
;
963 expected_val
= test
->retvals
[i
].retval
;
965 err
= do_prog_test_run(fd_prog
, unpriv
, expected_val
,
966 test
->retvals
[i
].data
,
967 sizeof(test
->retvals
[i
].data
));
969 printf("(run %d/%d) ", i
+ 1, test
->runs
);
979 if (run_successes
> 1)
980 printf("%d cases ", run_successes
);
982 if (alignment_prevented_execution
)
983 printf(" (NOTE: not executed due to unknown alignment)");
990 if (test
->fill_insns
)
991 free(test
->fill_insns
);
993 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
999 printf("%s", bpf_vlog
);
1003 static bool is_admin(void)
1006 cap_flag_value_t sysadmin
= CAP_CLEAR
;
1007 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
1009 #ifdef CAP_IS_SUPPORTED
1010 if (!CAP_IS_SUPPORTED(CAP_SETFCAP
)) {
1011 perror("cap_get_flag");
1015 caps
= cap_get_proc();
1017 perror("cap_get_proc");
1020 if (cap_get_flag(caps
, cap_val
, CAP_EFFECTIVE
, &sysadmin
))
1021 perror("cap_get_flag");
1024 return (sysadmin
== CAP_SET
);
1027 static void get_unpriv_disabled()
1032 fd
= fopen("/proc/sys/"UNPRIV_SYSCTL
, "r");
1034 perror("fopen /proc/sys/"UNPRIV_SYSCTL
);
1035 unpriv_disabled
= true;
1038 if (fgets(buf
, 2, fd
) == buf
&& atoi(buf
))
1039 unpriv_disabled
= true;
1043 static bool test_as_unpriv(struct bpf_test
*test
)
1045 return !test
->prog_type
||
1046 test
->prog_type
== BPF_PROG_TYPE_SOCKET_FILTER
||
1047 test
->prog_type
== BPF_PROG_TYPE_CGROUP_SKB
;
1050 static int do_test(bool unpriv
, unsigned int from
, unsigned int to
)
1052 int i
, passes
= 0, errors
= 0;
1054 for (i
= from
; i
< to
; i
++) {
1055 struct bpf_test
*test
= &tests
[i
];
1057 /* Program types that are not supported by non-root we
1060 if (test_as_unpriv(test
) && unpriv_disabled
) {
1061 printf("#%d/u %s SKIP\n", i
, test
->descr
);
1063 } else if (test_as_unpriv(test
)) {
1066 printf("#%d/u %s ", i
, test
->descr
);
1067 do_test_single(test
, true, &passes
, &errors
);
1073 printf("#%d/p %s SKIP\n", i
, test
->descr
);
1076 printf("#%d/p %s ", i
, test
->descr
);
1077 do_test_single(test
, false, &passes
, &errors
);
1081 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes
,
1083 return errors
? EXIT_FAILURE
: EXIT_SUCCESS
;
1086 int main(int argc
, char **argv
)
1088 unsigned int from
= 0, to
= ARRAY_SIZE(tests
);
1089 bool unpriv
= !is_admin();
1092 unsigned int l
= atoi(argv
[argc
- 2]);
1093 unsigned int u
= atoi(argv
[argc
- 1]);
1095 if (l
< to
&& u
< to
) {
1099 } else if (argc
== 2) {
1100 unsigned int t
= atoi(argv
[argc
- 1]);
1108 get_unpriv_disabled();
1109 if (unpriv
&& unpriv_disabled
) {
1110 printf("Cannot run as unprivileged user with sysctl %s.\n",
1112 return EXIT_FAILURE
;
1115 bpf_semi_rand_init();
1116 return do_test(unpriv
, from
, to
);