WIP FPC-III support
[linux/fpc-iii.git] / tools / testing / selftests / bpf / progs / map_ptr_kern.c
blobd8850bc6a9f1f306242235b9dae64b15b1cf2397
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
7 #define LOOP_BOUND 0xf
8 #define MAX_ENTRIES 8
9 #define HALF_ENTRIES (MAX_ENTRIES >> 1)
11 _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
13 enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
14 __u32 g_line = 0;
16 #define VERIFY_TYPE(type, func) ({ \
17 g_map_type = type; \
18 if (!func()) \
19 return 0; \
23 #define VERIFY(expr) ({ \
24 g_line = __LINE__; \
25 if (!(expr)) \
26 return 0; \
29 struct bpf_map {
30 enum bpf_map_type map_type;
31 __u32 key_size;
32 __u32 value_size;
33 __u32 max_entries;
34 __u32 id;
35 } __attribute__((preserve_access_index));
37 static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
38 __u32 value_size, __u32 max_entries)
40 VERIFY(map->map_type == g_map_type);
41 VERIFY(map->key_size == key_size);
42 VERIFY(map->value_size == value_size);
43 VERIFY(map->max_entries == max_entries);
44 VERIFY(map->id > 0);
46 return 1;
49 static inline int check_bpf_map_ptr(struct bpf_map *indirect,
50 struct bpf_map *direct)
52 VERIFY(indirect->map_type == direct->map_type);
53 VERIFY(indirect->key_size == direct->key_size);
54 VERIFY(indirect->value_size == direct->value_size);
55 VERIFY(indirect->max_entries == direct->max_entries);
56 VERIFY(indirect->id == direct->id);
58 return 1;
61 static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
62 __u32 key_size, __u32 value_size, __u32 max_entries)
64 VERIFY(check_bpf_map_ptr(indirect, direct));
65 VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
66 max_entries));
67 return 1;
70 static inline int check_default(struct bpf_map *indirect,
71 struct bpf_map *direct)
73 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
74 MAX_ENTRIES));
75 return 1;
78 static __noinline int
79 check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
81 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
82 MAX_ENTRIES));
83 return 1;
86 typedef struct {
87 int counter;
88 } atomic_t;
90 struct bpf_htab {
91 struct bpf_map map;
92 atomic_t count;
93 __u32 n_buckets;
94 __u32 elem_size;
95 } __attribute__((preserve_access_index));
97 struct {
98 __uint(type, BPF_MAP_TYPE_HASH);
99 __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
100 __uint(max_entries, MAX_ENTRIES);
101 __type(key, __u32);
102 __type(value, __u32);
103 } m_hash SEC(".maps");
105 static inline int check_hash(void)
107 struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
108 struct bpf_map *map = (struct bpf_map *)&m_hash;
109 int i;
111 VERIFY(check_default_noinline(&hash->map, map));
113 VERIFY(hash->n_buckets == MAX_ENTRIES);
114 VERIFY(hash->elem_size == 64);
116 VERIFY(hash->count.counter == 0);
117 for (i = 0; i < HALF_ENTRIES; ++i) {
118 const __u32 key = i;
119 const __u32 val = 1;
121 if (bpf_map_update_elem(hash, &key, &val, 0))
122 return 0;
124 VERIFY(hash->count.counter == HALF_ENTRIES);
126 return 1;
129 struct bpf_array {
130 struct bpf_map map;
131 __u32 elem_size;
132 } __attribute__((preserve_access_index));
134 struct {
135 __uint(type, BPF_MAP_TYPE_ARRAY);
136 __uint(max_entries, MAX_ENTRIES);
137 __type(key, __u32);
138 __type(value, __u32);
139 } m_array SEC(".maps");
141 static inline int check_array(void)
143 struct bpf_array *array = (struct bpf_array *)&m_array;
144 struct bpf_map *map = (struct bpf_map *)&m_array;
145 int i, n_lookups = 0, n_keys = 0;
147 VERIFY(check_default(&array->map, map));
149 VERIFY(array->elem_size == 8);
151 for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
152 const __u32 key = i;
153 __u32 *val = bpf_map_lookup_elem(array, &key);
155 ++n_lookups;
156 if (val)
157 ++n_keys;
160 VERIFY(n_lookups == MAX_ENTRIES);
161 VERIFY(n_keys == MAX_ENTRIES);
163 return 1;
166 struct {
167 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
168 __uint(max_entries, MAX_ENTRIES);
169 __type(key, __u32);
170 __type(value, __u32);
171 } m_prog_array SEC(".maps");
173 static inline int check_prog_array(void)
175 struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
176 struct bpf_map *map = (struct bpf_map *)&m_prog_array;
178 VERIFY(check_default(&prog_array->map, map));
180 return 1;
183 struct {
184 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
185 __uint(max_entries, MAX_ENTRIES);
186 __type(key, __u32);
187 __type(value, __u32);
188 } m_perf_event_array SEC(".maps");
190 static inline int check_perf_event_array(void)
192 struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
193 struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
195 VERIFY(check_default(&perf_event_array->map, map));
197 return 1;
200 struct {
201 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
202 __uint(max_entries, MAX_ENTRIES);
203 __type(key, __u32);
204 __type(value, __u32);
205 } m_percpu_hash SEC(".maps");
207 static inline int check_percpu_hash(void)
209 struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
210 struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
212 VERIFY(check_default(&percpu_hash->map, map));
214 return 1;
217 struct {
218 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
219 __uint(max_entries, MAX_ENTRIES);
220 __type(key, __u32);
221 __type(value, __u32);
222 } m_percpu_array SEC(".maps");
224 static inline int check_percpu_array(void)
226 struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
227 struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
229 VERIFY(check_default(&percpu_array->map, map));
231 return 1;
234 struct bpf_stack_map {
235 struct bpf_map map;
236 } __attribute__((preserve_access_index));
238 struct {
239 __uint(type, BPF_MAP_TYPE_STACK_TRACE);
240 __uint(max_entries, MAX_ENTRIES);
241 __type(key, __u32);
242 __type(value, __u64);
243 } m_stack_trace SEC(".maps");
245 static inline int check_stack_trace(void)
247 struct bpf_stack_map *stack_trace =
248 (struct bpf_stack_map *)&m_stack_trace;
249 struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
251 VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
252 MAX_ENTRIES));
254 return 1;
257 struct {
258 __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
259 __uint(max_entries, MAX_ENTRIES);
260 __type(key, __u32);
261 __type(value, __u32);
262 } m_cgroup_array SEC(".maps");
264 static inline int check_cgroup_array(void)
266 struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
267 struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
269 VERIFY(check_default(&cgroup_array->map, map));
271 return 1;
274 struct {
275 __uint(type, BPF_MAP_TYPE_LRU_HASH);
276 __uint(max_entries, MAX_ENTRIES);
277 __type(key, __u32);
278 __type(value, __u32);
279 } m_lru_hash SEC(".maps");
281 static inline int check_lru_hash(void)
283 struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
284 struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
286 VERIFY(check_default(&lru_hash->map, map));
288 return 1;
291 struct {
292 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
293 __uint(max_entries, MAX_ENTRIES);
294 __type(key, __u32);
295 __type(value, __u32);
296 } m_lru_percpu_hash SEC(".maps");
298 static inline int check_lru_percpu_hash(void)
300 struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
301 struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
303 VERIFY(check_default(&lru_percpu_hash->map, map));
305 return 1;
308 struct lpm_trie {
309 struct bpf_map map;
310 } __attribute__((preserve_access_index));
312 struct lpm_key {
313 struct bpf_lpm_trie_key trie_key;
314 __u32 data;
317 struct {
318 __uint(type, BPF_MAP_TYPE_LPM_TRIE);
319 __uint(map_flags, BPF_F_NO_PREALLOC);
320 __uint(max_entries, MAX_ENTRIES);
321 __type(key, struct lpm_key);
322 __type(value, __u32);
323 } m_lpm_trie SEC(".maps");
325 static inline int check_lpm_trie(void)
327 struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
328 struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
330 VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
331 MAX_ENTRIES));
333 return 1;
336 struct inner_map {
337 __uint(type, BPF_MAP_TYPE_ARRAY);
338 __uint(max_entries, 1);
339 __type(key, __u32);
340 __type(value, __u32);
341 } inner_map SEC(".maps");
343 struct {
344 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
345 __uint(max_entries, MAX_ENTRIES);
346 __type(key, __u32);
347 __type(value, __u32);
348 __array(values, struct {
349 __uint(type, BPF_MAP_TYPE_ARRAY);
350 __uint(max_entries, 1);
351 __type(key, __u32);
352 __type(value, __u32);
354 } m_array_of_maps SEC(".maps") = {
355 .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
358 static inline int check_array_of_maps(void)
360 struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
361 struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
363 VERIFY(check_default(&array_of_maps->map, map));
365 return 1;
368 struct {
369 __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
370 __uint(max_entries, MAX_ENTRIES);
371 __type(key, __u32);
372 __type(value, __u32);
373 __array(values, struct inner_map);
374 } m_hash_of_maps SEC(".maps") = {
375 .values = {
376 [2] = &inner_map,
380 static inline int check_hash_of_maps(void)
382 struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
383 struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
385 VERIFY(check_default(&hash_of_maps->map, map));
387 return 1;
390 struct bpf_dtab {
391 struct bpf_map map;
392 } __attribute__((preserve_access_index));
394 struct {
395 __uint(type, BPF_MAP_TYPE_DEVMAP);
396 __uint(max_entries, MAX_ENTRIES);
397 __type(key, __u32);
398 __type(value, __u32);
399 } m_devmap SEC(".maps");
401 static inline int check_devmap(void)
403 struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
404 struct bpf_map *map = (struct bpf_map *)&m_devmap;
406 VERIFY(check_default(&devmap->map, map));
408 return 1;
411 struct bpf_stab {
412 struct bpf_map map;
413 } __attribute__((preserve_access_index));
415 struct {
416 __uint(type, BPF_MAP_TYPE_SOCKMAP);
417 __uint(max_entries, MAX_ENTRIES);
418 __type(key, __u32);
419 __type(value, __u32);
420 } m_sockmap SEC(".maps");
422 static inline int check_sockmap(void)
424 struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
425 struct bpf_map *map = (struct bpf_map *)&m_sockmap;
427 VERIFY(check_default(&sockmap->map, map));
429 return 1;
432 struct bpf_cpu_map {
433 struct bpf_map map;
434 } __attribute__((preserve_access_index));
436 struct {
437 __uint(type, BPF_MAP_TYPE_CPUMAP);
438 __uint(max_entries, MAX_ENTRIES);
439 __type(key, __u32);
440 __type(value, __u32);
441 } m_cpumap SEC(".maps");
443 static inline int check_cpumap(void)
445 struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
446 struct bpf_map *map = (struct bpf_map *)&m_cpumap;
448 VERIFY(check_default(&cpumap->map, map));
450 return 1;
453 struct xsk_map {
454 struct bpf_map map;
455 } __attribute__((preserve_access_index));
457 struct {
458 __uint(type, BPF_MAP_TYPE_XSKMAP);
459 __uint(max_entries, MAX_ENTRIES);
460 __type(key, __u32);
461 __type(value, __u32);
462 } m_xskmap SEC(".maps");
464 static inline int check_xskmap(void)
466 struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
467 struct bpf_map *map = (struct bpf_map *)&m_xskmap;
469 VERIFY(check_default(&xskmap->map, map));
471 return 1;
474 struct bpf_shtab {
475 struct bpf_map map;
476 } __attribute__((preserve_access_index));
478 struct {
479 __uint(type, BPF_MAP_TYPE_SOCKHASH);
480 __uint(max_entries, MAX_ENTRIES);
481 __type(key, __u32);
482 __type(value, __u32);
483 } m_sockhash SEC(".maps");
485 static inline int check_sockhash(void)
487 struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
488 struct bpf_map *map = (struct bpf_map *)&m_sockhash;
490 VERIFY(check_default(&sockhash->map, map));
492 return 1;
495 struct bpf_cgroup_storage_map {
496 struct bpf_map map;
497 } __attribute__((preserve_access_index));
499 struct {
500 __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
501 __type(key, struct bpf_cgroup_storage_key);
502 __type(value, __u32);
503 } m_cgroup_storage SEC(".maps");
505 static inline int check_cgroup_storage(void)
507 struct bpf_cgroup_storage_map *cgroup_storage =
508 (struct bpf_cgroup_storage_map *)&m_cgroup_storage;
509 struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
511 VERIFY(check(&cgroup_storage->map, map,
512 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
514 return 1;
517 struct reuseport_array {
518 struct bpf_map map;
519 } __attribute__((preserve_access_index));
521 struct {
522 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
523 __uint(max_entries, MAX_ENTRIES);
524 __type(key, __u32);
525 __type(value, __u32);
526 } m_reuseport_sockarray SEC(".maps");
528 static inline int check_reuseport_sockarray(void)
530 struct reuseport_array *reuseport_sockarray =
531 (struct reuseport_array *)&m_reuseport_sockarray;
532 struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
534 VERIFY(check_default(&reuseport_sockarray->map, map));
536 return 1;
539 struct {
540 __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
541 __type(key, struct bpf_cgroup_storage_key);
542 __type(value, __u32);
543 } m_percpu_cgroup_storage SEC(".maps");
545 static inline int check_percpu_cgroup_storage(void)
547 struct bpf_cgroup_storage_map *percpu_cgroup_storage =
548 (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
549 struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
551 VERIFY(check(&percpu_cgroup_storage->map, map,
552 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
554 return 1;
557 struct bpf_queue_stack {
558 struct bpf_map map;
559 } __attribute__((preserve_access_index));
561 struct {
562 __uint(type, BPF_MAP_TYPE_QUEUE);
563 __uint(max_entries, MAX_ENTRIES);
564 __type(value, __u32);
565 } m_queue SEC(".maps");
567 static inline int check_queue(void)
569 struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
570 struct bpf_map *map = (struct bpf_map *)&m_queue;
572 VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
574 return 1;
577 struct {
578 __uint(type, BPF_MAP_TYPE_STACK);
579 __uint(max_entries, MAX_ENTRIES);
580 __type(value, __u32);
581 } m_stack SEC(".maps");
583 static inline int check_stack(void)
585 struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
586 struct bpf_map *map = (struct bpf_map *)&m_stack;
588 VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
590 return 1;
593 struct bpf_local_storage_map {
594 struct bpf_map map;
595 } __attribute__((preserve_access_index));
597 struct {
598 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
599 __uint(map_flags, BPF_F_NO_PREALLOC);
600 __type(key, __u32);
601 __type(value, __u32);
602 } m_sk_storage SEC(".maps");
604 static inline int check_sk_storage(void)
606 struct bpf_local_storage_map *sk_storage =
607 (struct bpf_local_storage_map *)&m_sk_storage;
608 struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
610 VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
612 return 1;
615 struct {
616 __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
617 __uint(max_entries, MAX_ENTRIES);
618 __type(key, __u32);
619 __type(value, __u32);
620 } m_devmap_hash SEC(".maps");
622 static inline int check_devmap_hash(void)
624 struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
625 struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
627 VERIFY(check_default(&devmap_hash->map, map));
629 return 1;
632 struct bpf_ringbuf_map {
633 struct bpf_map map;
634 } __attribute__((preserve_access_index));
636 struct {
637 __uint(type, BPF_MAP_TYPE_RINGBUF);
638 __uint(max_entries, 1 << 12);
639 } m_ringbuf SEC(".maps");
641 static inline int check_ringbuf(void)
643 struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
644 struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
646 VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
648 return 1;
651 SEC("cgroup_skb/egress")
652 int cg_skb(void *ctx)
654 VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
655 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
656 VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
657 VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
658 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
659 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
660 VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
661 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
662 VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
663 VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
664 VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
665 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
666 VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
667 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
668 VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
669 VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
670 VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
671 VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
672 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
673 VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
674 check_reuseport_sockarray);
675 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
676 check_percpu_cgroup_storage);
677 VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
678 VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
679 VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
680 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
681 VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
683 return 1;
686 __u32 _version SEC("version") = 1;
687 char _license[] SEC("license") = "GPL";