staging: rtl8192u: remove redundant assignment to pointer crypt
[linux/fpc-iii.git] / tools / testing / selftests / bpf / test_verifier.c
blob84135d5f4b351043a212c9c2c24bde4fb0e04418
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Testsuite for eBPF verifier
5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * Copyright (c) 2017 Facebook
7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 */
10 #include <endian.h>
11 #include <asm/types.h>
12 #include <linux/types.h>
13 #include <stdint.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <errno.h>
18 #include <string.h>
19 #include <stddef.h>
20 #include <stdbool.h>
21 #include <sched.h>
22 #include <limits.h>
23 #include <assert.h>
25 #include <sys/capability.h>
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
31 #include <linux/if_ether.h>
32 #include <linux/btf.h>
34 #include <bpf/bpf.h>
35 #include <bpf/libbpf.h>
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rlimit.h"
45 #include "bpf_rand.h"
46 #include "bpf_util.h"
47 #include "test_btf.h"
48 #include "../../../include/linux/filter.h"
50 #define MAX_INSNS BPF_MAXINSNS
51 #define MAX_TEST_INSNS 1000000
52 #define MAX_FIXUPS 8
53 #define MAX_NR_MAPS 18
54 #define MAX_TEST_RUNS 8
55 #define POINTER_VALUE 0xcafe4all
56 #define TEST_DATA_LEN 64
58 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
59 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
61 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
62 static bool unpriv_disabled = false;
63 static int skips;
65 struct bpf_test {
66 const char *descr;
67 struct bpf_insn insns[MAX_INSNS];
68 struct bpf_insn *fill_insns;
69 int fixup_map_hash_8b[MAX_FIXUPS];
70 int fixup_map_hash_48b[MAX_FIXUPS];
71 int fixup_map_hash_16b[MAX_FIXUPS];
72 int fixup_map_array_48b[MAX_FIXUPS];
73 int fixup_map_sockmap[MAX_FIXUPS];
74 int fixup_map_sockhash[MAX_FIXUPS];
75 int fixup_map_xskmap[MAX_FIXUPS];
76 int fixup_map_stacktrace[MAX_FIXUPS];
77 int fixup_prog1[MAX_FIXUPS];
78 int fixup_prog2[MAX_FIXUPS];
79 int fixup_map_in_map[MAX_FIXUPS];
80 int fixup_cgroup_storage[MAX_FIXUPS];
81 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
82 int fixup_map_spin_lock[MAX_FIXUPS];
83 int fixup_map_array_ro[MAX_FIXUPS];
84 int fixup_map_array_wo[MAX_FIXUPS];
85 int fixup_map_array_small[MAX_FIXUPS];
86 int fixup_sk_storage_map[MAX_FIXUPS];
87 const char *errstr;
88 const char *errstr_unpriv;
89 uint32_t insn_processed;
90 int prog_len;
91 enum {
92 UNDEF,
93 ACCEPT,
94 REJECT
95 } result, result_unpriv;
96 enum bpf_prog_type prog_type;
97 uint8_t flags;
98 void (*fill_helper)(struct bpf_test *self);
99 uint8_t runs;
100 #define bpf_testdata_struct_t \
101 struct { \
102 uint32_t retval, retval_unpriv; \
103 union { \
104 __u8 data[TEST_DATA_LEN]; \
105 __u64 data64[TEST_DATA_LEN / 8]; \
106 }; \
108 union {
109 bpf_testdata_struct_t;
110 bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
112 enum bpf_attach_type expected_attach_type;
115 /* Note we want this to be 64 bit aligned so that the end of our array is
116 * actually the end of the structure.
118 #define MAX_ENTRIES 11
120 struct test_val {
121 unsigned int index;
122 int foo[MAX_ENTRIES];
125 struct other_val {
126 long long foo;
127 long long bar;
130 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
132 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
133 #define PUSH_CNT 51
134 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
135 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
136 struct bpf_insn *insn = self->fill_insns;
137 int i = 0, j, k = 0;
139 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
140 loop:
141 for (j = 0; j < PUSH_CNT; j++) {
142 insn[i++] = BPF_LD_ABS(BPF_B, 0);
143 /* jump to error label */
144 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
145 i++;
146 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
147 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
148 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
149 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
150 BPF_FUNC_skb_vlan_push),
151 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
152 i++;
155 for (j = 0; j < PUSH_CNT; j++) {
156 insn[i++] = BPF_LD_ABS(BPF_B, 0);
157 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
158 i++;
159 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
160 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
161 BPF_FUNC_skb_vlan_pop),
162 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
163 i++;
165 if (++k < 5)
166 goto loop;
168 for (; i < len - 3; i++)
169 insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
170 insn[len - 3] = BPF_JMP_A(1);
171 /* error label */
172 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
173 insn[len - 1] = BPF_EXIT_INSN();
174 self->prog_len = len;
177 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
179 struct bpf_insn *insn = self->fill_insns;
180 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
181 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
182 * to extend the error value of the inlined ld_abs sequence which then
183 * contains 7 insns. so, set the dividend to 7 so the testcase could
184 * work on all arches.
186 unsigned int len = (1 << 15) / 7;
187 int i = 0;
189 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
190 insn[i++] = BPF_LD_ABS(BPF_B, 0);
191 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
192 i++;
193 while (i < len - 1)
194 insn[i++] = BPF_LD_ABS(BPF_B, 1);
195 insn[i] = BPF_EXIT_INSN();
196 self->prog_len = i + 1;
199 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
201 struct bpf_insn *insn = self->fill_insns;
202 uint64_t res = 0;
203 int i = 0;
205 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
206 while (i < self->retval) {
207 uint64_t val = bpf_semi_rand_get();
208 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
210 res ^= val;
211 insn[i++] = tmp[0];
212 insn[i++] = tmp[1];
213 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
215 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
216 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
217 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
218 insn[i] = BPF_EXIT_INSN();
219 self->prog_len = i + 1;
220 res ^= (res >> 32);
221 self->retval = (uint32_t)res;
224 #define MAX_JMP_SEQ 8192
226 /* test the sequence of 8k jumps */
227 static void bpf_fill_scale1(struct bpf_test *self)
229 struct bpf_insn *insn = self->fill_insns;
230 int i = 0, k = 0;
232 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
233 /* test to check that the long sequence of jumps is acceptable */
234 while (k++ < MAX_JMP_SEQ) {
235 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
236 BPF_FUNC_get_prandom_u32);
237 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
238 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
239 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
240 -8 * (k % 64 + 1));
242 /* is_state_visited() doesn't allocate state for pruning for every jump.
243 * Hence multiply jmps by 4 to accommodate that heuristic
245 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
246 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
247 insn[i] = BPF_EXIT_INSN();
248 self->prog_len = i + 1;
249 self->retval = 42;
252 /* test the sequence of 8k jumps in inner most function (function depth 8)*/
253 static void bpf_fill_scale2(struct bpf_test *self)
255 struct bpf_insn *insn = self->fill_insns;
256 int i = 0, k = 0;
258 #define FUNC_NEST 7
259 for (k = 0; k < FUNC_NEST; k++) {
260 insn[i++] = BPF_CALL_REL(1);
261 insn[i++] = BPF_EXIT_INSN();
263 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
264 /* test to check that the long sequence of jumps is acceptable */
265 k = 0;
266 while (k++ < MAX_JMP_SEQ) {
267 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
268 BPF_FUNC_get_prandom_u32);
269 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
270 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
271 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
272 -8 * (k % (64 - 4 * FUNC_NEST) + 1));
274 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
275 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
276 insn[i] = BPF_EXIT_INSN();
277 self->prog_len = i + 1;
278 self->retval = 42;
281 static void bpf_fill_scale(struct bpf_test *self)
283 switch (self->retval) {
284 case 1:
285 return bpf_fill_scale1(self);
286 case 2:
287 return bpf_fill_scale2(self);
288 default:
289 self->prog_len = 0;
290 break;
294 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
295 #define BPF_SK_LOOKUP(func) \
296 /* struct bpf_sock_tuple tuple = {} */ \
297 BPF_MOV64_IMM(BPF_REG_2, 0), \
298 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
299 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
300 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
301 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
302 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
304 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
305 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
307 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
308 BPF_MOV64_IMM(BPF_REG_4, 0), \
309 BPF_MOV64_IMM(BPF_REG_5, 0), \
310 BPF_EMIT_CALL(BPF_FUNC_ ## func)
312 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
313 * value into 0 and does necessary preparation for direct packet access
314 * through r2. The allowed access range is 8 bytes.
316 #define BPF_DIRECT_PKT_R2 \
317 BPF_MOV64_IMM(BPF_REG_0, 0), \
318 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
319 offsetof(struct __sk_buff, data)), \
320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
321 offsetof(struct __sk_buff, data_end)), \
322 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
324 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
325 BPF_EXIT_INSN()
327 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
328 * positive u32, and zero-extend it into 64-bit.
330 #define BPF_RAND_UEXT_R7 \
331 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
332 BPF_FUNC_get_prandom_u32), \
333 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
334 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
335 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
337 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
338 * negative u32, and sign-extend it into 64-bit.
340 #define BPF_RAND_SEXT_R7 \
341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
342 BPF_FUNC_get_prandom_u32), \
343 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
344 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
345 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
346 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
348 static struct bpf_test tests[] = {
349 #define FILL_ARRAY
350 #include <verifier/tests.h>
351 #undef FILL_ARRAY
354 static int probe_filter_length(const struct bpf_insn *fp)
356 int len;
358 for (len = MAX_INSNS - 1; len > 0; --len)
359 if (fp[len].code != 0 || fp[len].imm != 0)
360 break;
361 return len + 1;
364 static bool skip_unsupported_map(enum bpf_map_type map_type)
366 if (!bpf_probe_map_type(map_type, 0)) {
367 printf("SKIP (unsupported map type %d)\n", map_type);
368 skips++;
369 return true;
371 return false;
374 static int __create_map(uint32_t type, uint32_t size_key,
375 uint32_t size_value, uint32_t max_elem,
376 uint32_t extra_flags)
378 int fd;
380 fd = bpf_create_map(type, size_key, size_value, max_elem,
381 (type == BPF_MAP_TYPE_HASH ?
382 BPF_F_NO_PREALLOC : 0) | extra_flags);
383 if (fd < 0) {
384 if (skip_unsupported_map(type))
385 return -1;
386 printf("Failed to create hash map '%s'!\n", strerror(errno));
389 return fd;
392 static int create_map(uint32_t type, uint32_t size_key,
393 uint32_t size_value, uint32_t max_elem)
395 return __create_map(type, size_key, size_value, max_elem, 0);
398 static void update_map(int fd, int index)
400 struct test_val value = {
401 .index = (6 + 1) * sizeof(int),
402 .foo[6] = 0xabcdef12,
405 assert(!bpf_map_update_elem(fd, &index, &value, 0));
408 static int create_prog_dummy1(enum bpf_prog_type prog_type)
410 struct bpf_insn prog[] = {
411 BPF_MOV64_IMM(BPF_REG_0, 42),
412 BPF_EXIT_INSN(),
415 return bpf_load_program(prog_type, prog,
416 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
419 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
421 struct bpf_insn prog[] = {
422 BPF_MOV64_IMM(BPF_REG_3, idx),
423 BPF_LD_MAP_FD(BPF_REG_2, mfd),
424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
425 BPF_FUNC_tail_call),
426 BPF_MOV64_IMM(BPF_REG_0, 41),
427 BPF_EXIT_INSN(),
430 return bpf_load_program(prog_type, prog,
431 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
434 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
435 int p1key)
437 int p2key = 1;
438 int mfd, p1fd, p2fd;
440 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
441 sizeof(int), max_elem, 0);
442 if (mfd < 0) {
443 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
444 return -1;
445 printf("Failed to create prog array '%s'!\n", strerror(errno));
446 return -1;
449 p1fd = create_prog_dummy1(prog_type);
450 p2fd = create_prog_dummy2(prog_type, mfd, p2key);
451 if (p1fd < 0 || p2fd < 0)
452 goto out;
453 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
454 goto out;
455 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
456 goto out;
457 close(p2fd);
458 close(p1fd);
460 return mfd;
461 out:
462 close(p2fd);
463 close(p1fd);
464 close(mfd);
465 return -1;
468 static int create_map_in_map(void)
470 int inner_map_fd, outer_map_fd;
472 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
473 sizeof(int), 1, 0);
474 if (inner_map_fd < 0) {
475 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
476 return -1;
477 printf("Failed to create array '%s'!\n", strerror(errno));
478 return inner_map_fd;
481 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
482 sizeof(int), inner_map_fd, 1, 0);
483 if (outer_map_fd < 0) {
484 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
485 return -1;
486 printf("Failed to create array of maps '%s'!\n",
487 strerror(errno));
490 close(inner_map_fd);
492 return outer_map_fd;
495 static int create_cgroup_storage(bool percpu)
497 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
498 BPF_MAP_TYPE_CGROUP_STORAGE;
499 int fd;
501 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
502 TEST_DATA_LEN, 0, 0);
503 if (fd < 0) {
504 if (skip_unsupported_map(type))
505 return -1;
506 printf("Failed to create cgroup storage '%s'!\n",
507 strerror(errno));
510 return fd;
513 /* struct bpf_spin_lock {
514 * int val;
515 * };
516 * struct val {
517 * int cnt;
518 * struct bpf_spin_lock l;
519 * };
521 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
522 static __u32 btf_raw_types[] = {
523 /* int */
524 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
525 /* struct bpf_spin_lock */ /* [2] */
526 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
527 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
528 /* struct val */ /* [3] */
529 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
530 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
531 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
534 static int load_btf(void)
536 struct btf_header hdr = {
537 .magic = BTF_MAGIC,
538 .version = BTF_VERSION,
539 .hdr_len = sizeof(struct btf_header),
540 .type_len = sizeof(btf_raw_types),
541 .str_off = sizeof(btf_raw_types),
542 .str_len = sizeof(btf_str_sec),
544 void *ptr, *raw_btf;
545 int btf_fd;
547 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
548 sizeof(btf_str_sec));
550 memcpy(ptr, &hdr, sizeof(hdr));
551 ptr += sizeof(hdr);
552 memcpy(ptr, btf_raw_types, hdr.type_len);
553 ptr += hdr.type_len;
554 memcpy(ptr, btf_str_sec, hdr.str_len);
555 ptr += hdr.str_len;
557 btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
558 free(raw_btf);
559 if (btf_fd < 0)
560 return -1;
561 return btf_fd;
564 static int create_map_spin_lock(void)
566 struct bpf_create_map_attr attr = {
567 .name = "test_map",
568 .map_type = BPF_MAP_TYPE_ARRAY,
569 .key_size = 4,
570 .value_size = 8,
571 .max_entries = 1,
572 .btf_key_type_id = 1,
573 .btf_value_type_id = 3,
575 int fd, btf_fd;
577 btf_fd = load_btf();
578 if (btf_fd < 0)
579 return -1;
580 attr.btf_fd = btf_fd;
581 fd = bpf_create_map_xattr(&attr);
582 if (fd < 0)
583 printf("Failed to create map with spin_lock\n");
584 return fd;
587 static int create_sk_storage_map(void)
589 struct bpf_create_map_attr attr = {
590 .name = "test_map",
591 .map_type = BPF_MAP_TYPE_SK_STORAGE,
592 .key_size = 4,
593 .value_size = 8,
594 .max_entries = 0,
595 .map_flags = BPF_F_NO_PREALLOC,
596 .btf_key_type_id = 1,
597 .btf_value_type_id = 3,
599 int fd, btf_fd;
601 btf_fd = load_btf();
602 if (btf_fd < 0)
603 return -1;
604 attr.btf_fd = btf_fd;
605 fd = bpf_create_map_xattr(&attr);
606 close(attr.btf_fd);
607 if (fd < 0)
608 printf("Failed to create sk_storage_map\n");
609 return fd;
612 static char bpf_vlog[UINT_MAX >> 8];
614 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
615 struct bpf_insn *prog, int *map_fds)
617 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
618 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
619 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
620 int *fixup_map_array_48b = test->fixup_map_array_48b;
621 int *fixup_map_sockmap = test->fixup_map_sockmap;
622 int *fixup_map_sockhash = test->fixup_map_sockhash;
623 int *fixup_map_xskmap = test->fixup_map_xskmap;
624 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
625 int *fixup_prog1 = test->fixup_prog1;
626 int *fixup_prog2 = test->fixup_prog2;
627 int *fixup_map_in_map = test->fixup_map_in_map;
628 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
629 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
630 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
631 int *fixup_map_array_ro = test->fixup_map_array_ro;
632 int *fixup_map_array_wo = test->fixup_map_array_wo;
633 int *fixup_map_array_small = test->fixup_map_array_small;
634 int *fixup_sk_storage_map = test->fixup_sk_storage_map;
636 if (test->fill_helper) {
637 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
638 test->fill_helper(test);
641 /* Allocating HTs with 1 elem is fine here, since we only test
642 * for verifier and not do a runtime lookup, so the only thing
643 * that really matters is value size in this case.
645 if (*fixup_map_hash_8b) {
646 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
647 sizeof(long long), 1);
648 do {
649 prog[*fixup_map_hash_8b].imm = map_fds[0];
650 fixup_map_hash_8b++;
651 } while (*fixup_map_hash_8b);
654 if (*fixup_map_hash_48b) {
655 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
656 sizeof(struct test_val), 1);
657 do {
658 prog[*fixup_map_hash_48b].imm = map_fds[1];
659 fixup_map_hash_48b++;
660 } while (*fixup_map_hash_48b);
663 if (*fixup_map_hash_16b) {
664 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
665 sizeof(struct other_val), 1);
666 do {
667 prog[*fixup_map_hash_16b].imm = map_fds[2];
668 fixup_map_hash_16b++;
669 } while (*fixup_map_hash_16b);
672 if (*fixup_map_array_48b) {
673 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
674 sizeof(struct test_val), 1);
675 update_map(map_fds[3], 0);
676 do {
677 prog[*fixup_map_array_48b].imm = map_fds[3];
678 fixup_map_array_48b++;
679 } while (*fixup_map_array_48b);
682 if (*fixup_prog1) {
683 map_fds[4] = create_prog_array(prog_type, 4, 0);
684 do {
685 prog[*fixup_prog1].imm = map_fds[4];
686 fixup_prog1++;
687 } while (*fixup_prog1);
690 if (*fixup_prog2) {
691 map_fds[5] = create_prog_array(prog_type, 8, 7);
692 do {
693 prog[*fixup_prog2].imm = map_fds[5];
694 fixup_prog2++;
695 } while (*fixup_prog2);
698 if (*fixup_map_in_map) {
699 map_fds[6] = create_map_in_map();
700 do {
701 prog[*fixup_map_in_map].imm = map_fds[6];
702 fixup_map_in_map++;
703 } while (*fixup_map_in_map);
706 if (*fixup_cgroup_storage) {
707 map_fds[7] = create_cgroup_storage(false);
708 do {
709 prog[*fixup_cgroup_storage].imm = map_fds[7];
710 fixup_cgroup_storage++;
711 } while (*fixup_cgroup_storage);
714 if (*fixup_percpu_cgroup_storage) {
715 map_fds[8] = create_cgroup_storage(true);
716 do {
717 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
718 fixup_percpu_cgroup_storage++;
719 } while (*fixup_percpu_cgroup_storage);
721 if (*fixup_map_sockmap) {
722 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
723 sizeof(int), 1);
724 do {
725 prog[*fixup_map_sockmap].imm = map_fds[9];
726 fixup_map_sockmap++;
727 } while (*fixup_map_sockmap);
729 if (*fixup_map_sockhash) {
730 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
731 sizeof(int), 1);
732 do {
733 prog[*fixup_map_sockhash].imm = map_fds[10];
734 fixup_map_sockhash++;
735 } while (*fixup_map_sockhash);
737 if (*fixup_map_xskmap) {
738 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
739 sizeof(int), 1);
740 do {
741 prog[*fixup_map_xskmap].imm = map_fds[11];
742 fixup_map_xskmap++;
743 } while (*fixup_map_xskmap);
745 if (*fixup_map_stacktrace) {
746 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
747 sizeof(u64), 1);
748 do {
749 prog[*fixup_map_stacktrace].imm = map_fds[12];
750 fixup_map_stacktrace++;
751 } while (*fixup_map_stacktrace);
753 if (*fixup_map_spin_lock) {
754 map_fds[13] = create_map_spin_lock();
755 do {
756 prog[*fixup_map_spin_lock].imm = map_fds[13];
757 fixup_map_spin_lock++;
758 } while (*fixup_map_spin_lock);
760 if (*fixup_map_array_ro) {
761 map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
762 sizeof(struct test_val), 1,
763 BPF_F_RDONLY_PROG);
764 update_map(map_fds[14], 0);
765 do {
766 prog[*fixup_map_array_ro].imm = map_fds[14];
767 fixup_map_array_ro++;
768 } while (*fixup_map_array_ro);
770 if (*fixup_map_array_wo) {
771 map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
772 sizeof(struct test_val), 1,
773 BPF_F_WRONLY_PROG);
774 update_map(map_fds[15], 0);
775 do {
776 prog[*fixup_map_array_wo].imm = map_fds[15];
777 fixup_map_array_wo++;
778 } while (*fixup_map_array_wo);
780 if (*fixup_map_array_small) {
781 map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
782 1, 1, 0);
783 update_map(map_fds[16], 0);
784 do {
785 prog[*fixup_map_array_small].imm = map_fds[16];
786 fixup_map_array_small++;
787 } while (*fixup_map_array_small);
789 if (*fixup_sk_storage_map) {
790 map_fds[17] = create_sk_storage_map();
791 do {
792 prog[*fixup_sk_storage_map].imm = map_fds[17];
793 fixup_sk_storage_map++;
794 } while (*fixup_sk_storage_map);
798 static int set_admin(bool admin)
800 cap_t caps;
801 const cap_value_t cap_val = CAP_SYS_ADMIN;
802 int ret = -1;
804 caps = cap_get_proc();
805 if (!caps) {
806 perror("cap_get_proc");
807 return -1;
809 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
810 admin ? CAP_SET : CAP_CLEAR)) {
811 perror("cap_set_flag");
812 goto out;
814 if (cap_set_proc(caps)) {
815 perror("cap_set_proc");
816 goto out;
818 ret = 0;
819 out:
820 if (cap_free(caps))
821 perror("cap_free");
822 return ret;
825 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
826 void *data, size_t size_data)
828 __u8 tmp[TEST_DATA_LEN << 2];
829 __u32 size_tmp = sizeof(tmp);
830 uint32_t retval;
831 int err;
833 if (unpriv)
834 set_admin(true);
835 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
836 tmp, &size_tmp, &retval, NULL);
837 if (unpriv)
838 set_admin(false);
839 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
840 printf("Unexpected bpf_prog_test_run error ");
841 return err;
843 if (!err && retval != expected_val &&
844 expected_val != POINTER_VALUE) {
845 printf("FAIL retval %d != %d ", retval, expected_val);
846 return 1;
849 return 0;
852 static void do_test_single(struct bpf_test *test, bool unpriv,
853 int *passes, int *errors)
855 int fd_prog, expected_ret, alignment_prevented_execution;
856 int prog_len, prog_type = test->prog_type;
857 struct bpf_insn *prog = test->insns;
858 struct bpf_load_program_attr attr;
859 int run_errs, run_successes;
860 int map_fds[MAX_NR_MAPS];
861 const char *expected_err;
862 int fixup_skips;
863 __u32 pflags;
864 int i, err;
866 for (i = 0; i < MAX_NR_MAPS; i++)
867 map_fds[i] = -1;
869 if (!prog_type)
870 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
871 fixup_skips = skips;
872 do_test_fixup(test, prog_type, prog, map_fds);
873 if (test->fill_insns) {
874 prog = test->fill_insns;
875 prog_len = test->prog_len;
876 } else {
877 prog_len = probe_filter_length(prog);
879 /* If there were some map skips during fixup due to missing bpf
880 * features, skip this test.
882 if (fixup_skips != skips)
883 return;
885 pflags = BPF_F_TEST_RND_HI32;
886 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
887 pflags |= BPF_F_STRICT_ALIGNMENT;
888 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
889 pflags |= BPF_F_ANY_ALIGNMENT;
891 memset(&attr, 0, sizeof(attr));
892 attr.prog_type = prog_type;
893 attr.expected_attach_type = test->expected_attach_type;
894 attr.insns = prog;
895 attr.insns_cnt = prog_len;
896 attr.license = "GPL";
897 attr.log_level = 4;
898 attr.prog_flags = pflags;
900 fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
901 if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
902 printf("SKIP (unsupported program type %d)\n", prog_type);
903 skips++;
904 goto close_fds;
907 expected_ret = unpriv && test->result_unpriv != UNDEF ?
908 test->result_unpriv : test->result;
909 expected_err = unpriv && test->errstr_unpriv ?
910 test->errstr_unpriv : test->errstr;
912 alignment_prevented_execution = 0;
914 if (expected_ret == ACCEPT) {
915 if (fd_prog < 0) {
916 printf("FAIL\nFailed to load prog '%s'!\n",
917 strerror(errno));
918 goto fail_log;
920 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
921 if (fd_prog >= 0 &&
922 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
923 alignment_prevented_execution = 1;
924 #endif
925 } else {
926 if (fd_prog >= 0) {
927 printf("FAIL\nUnexpected success to load!\n");
928 goto fail_log;
930 if (!expected_err || !strstr(bpf_vlog, expected_err)) {
931 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
932 expected_err, bpf_vlog);
933 goto fail_log;
937 if (test->insn_processed) {
938 uint32_t insn_processed;
939 char *proc;
941 proc = strstr(bpf_vlog, "processed ");
942 insn_processed = atoi(proc + 10);
943 if (test->insn_processed != insn_processed) {
944 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
945 insn_processed, test->insn_processed);
946 goto fail_log;
950 run_errs = 0;
951 run_successes = 0;
952 if (!alignment_prevented_execution && fd_prog >= 0) {
953 uint32_t expected_val;
954 int i;
956 if (!test->runs)
957 test->runs = 1;
959 for (i = 0; i < test->runs; i++) {
960 if (unpriv && test->retvals[i].retval_unpriv)
961 expected_val = test->retvals[i].retval_unpriv;
962 else
963 expected_val = test->retvals[i].retval;
965 err = do_prog_test_run(fd_prog, unpriv, expected_val,
966 test->retvals[i].data,
967 sizeof(test->retvals[i].data));
968 if (err) {
969 printf("(run %d/%d) ", i + 1, test->runs);
970 run_errs++;
971 } else {
972 run_successes++;
977 if (!run_errs) {
978 (*passes)++;
979 if (run_successes > 1)
980 printf("%d cases ", run_successes);
981 printf("OK");
982 if (alignment_prevented_execution)
983 printf(" (NOTE: not executed due to unknown alignment)");
984 printf("\n");
985 } else {
986 printf("\n");
987 goto fail_log;
989 close_fds:
990 if (test->fill_insns)
991 free(test->fill_insns);
992 close(fd_prog);
993 for (i = 0; i < MAX_NR_MAPS; i++)
994 close(map_fds[i]);
995 sched_yield();
996 return;
997 fail_log:
998 (*errors)++;
999 printf("%s", bpf_vlog);
1000 goto close_fds;
1003 static bool is_admin(void)
1005 cap_t caps;
1006 cap_flag_value_t sysadmin = CAP_CLEAR;
1007 const cap_value_t cap_val = CAP_SYS_ADMIN;
1009 #ifdef CAP_IS_SUPPORTED
1010 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
1011 perror("cap_get_flag");
1012 return false;
1014 #endif
1015 caps = cap_get_proc();
1016 if (!caps) {
1017 perror("cap_get_proc");
1018 return false;
1020 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
1021 perror("cap_get_flag");
1022 if (cap_free(caps))
1023 perror("cap_free");
1024 return (sysadmin == CAP_SET);
1027 static void get_unpriv_disabled()
1029 char buf[2];
1030 FILE *fd;
1032 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
1033 if (!fd) {
1034 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
1035 unpriv_disabled = true;
1036 return;
1038 if (fgets(buf, 2, fd) == buf && atoi(buf))
1039 unpriv_disabled = true;
1040 fclose(fd);
1043 static bool test_as_unpriv(struct bpf_test *test)
1045 return !test->prog_type ||
1046 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1047 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1050 static int do_test(bool unpriv, unsigned int from, unsigned int to)
1052 int i, passes = 0, errors = 0;
1054 for (i = from; i < to; i++) {
1055 struct bpf_test *test = &tests[i];
1057 /* Program types that are not supported by non-root we
1058 * skip right away.
1060 if (test_as_unpriv(test) && unpriv_disabled) {
1061 printf("#%d/u %s SKIP\n", i, test->descr);
1062 skips++;
1063 } else if (test_as_unpriv(test)) {
1064 if (!unpriv)
1065 set_admin(false);
1066 printf("#%d/u %s ", i, test->descr);
1067 do_test_single(test, true, &passes, &errors);
1068 if (!unpriv)
1069 set_admin(true);
1072 if (unpriv) {
1073 printf("#%d/p %s SKIP\n", i, test->descr);
1074 skips++;
1075 } else {
1076 printf("#%d/p %s ", i, test->descr);
1077 do_test_single(test, false, &passes, &errors);
1081 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1082 skips, errors);
1083 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1086 int main(int argc, char **argv)
1088 unsigned int from = 0, to = ARRAY_SIZE(tests);
1089 bool unpriv = !is_admin();
1091 if (argc == 3) {
1092 unsigned int l = atoi(argv[argc - 2]);
1093 unsigned int u = atoi(argv[argc - 1]);
1095 if (l < to && u < to) {
1096 from = l;
1097 to = u + 1;
1099 } else if (argc == 2) {
1100 unsigned int t = atoi(argv[argc - 1]);
1102 if (t < to) {
1103 from = t;
1104 to = t + 1;
1108 get_unpriv_disabled();
1109 if (unpriv && unpriv_disabled) {
1110 printf("Cannot run as unprivileged user with sysctl %s.\n",
1111 UNPRIV_SYSCTL);
1112 return EXIT_FAILURE;
1115 bpf_semi_rand_init();
1116 return do_test(unpriv, from, to);