1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
8 #include <linux/slab.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
14 #include "map_in_map.h"
16 #define ARRAY_CREATE_FLAG_MASK \
17 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
19 static void bpf_array_free_percpu(struct bpf_array
*array
)
23 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
24 free_percpu(array
->pptrs
[i
]);
29 static int bpf_array_alloc_percpu(struct bpf_array
*array
)
34 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
35 ptr
= __alloc_percpu_gfp(array
->elem_size
, 8,
36 GFP_USER
| __GFP_NOWARN
);
38 bpf_array_free_percpu(array
);
41 array
->pptrs
[i
] = ptr
;
48 /* Called from syscall */
49 int array_map_alloc_check(union bpf_attr
*attr
)
51 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
52 int numa_node
= bpf_map_attr_numa_node(attr
);
54 /* check sanity of attributes */
55 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
56 attr
->value_size
== 0 ||
57 attr
->map_flags
& ~ARRAY_CREATE_FLAG_MASK
||
58 !bpf_map_flags_access_ok(attr
->map_flags
) ||
59 (percpu
&& numa_node
!= NUMA_NO_NODE
))
62 if (attr
->value_size
> KMALLOC_MAX_SIZE
)
63 /* if value_size is bigger, the user space won't be able to
64 * access the elements.
71 static struct bpf_map
*array_map_alloc(union bpf_attr
*attr
)
73 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
74 int ret
, numa_node
= bpf_map_attr_numa_node(attr
);
75 u32 elem_size
, index_mask
, max_entries
;
76 bool unpriv
= !capable(CAP_SYS_ADMIN
);
77 u64 cost
, array_size
, mask64
;
78 struct bpf_array
*array
;
80 elem_size
= round_up(attr
->value_size
, 8);
82 max_entries
= attr
->max_entries
;
84 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
85 * upper most bit set in u32 space is undefined behavior due to
86 * resulting 1U << 32, so do it manually here in u64 space.
88 mask64
= fls_long(max_entries
- 1);
89 mask64
= 1ULL << mask64
;
94 /* round up array size to nearest power of 2,
95 * since cpu will speculate within index_mask limits
97 max_entries
= index_mask
+ 1;
98 /* Check for overflows. */
99 if (max_entries
< attr
->max_entries
)
100 return ERR_PTR(-E2BIG
);
103 array_size
= sizeof(*array
);
105 array_size
+= (u64
) max_entries
* sizeof(void *);
107 array_size
+= (u64
) max_entries
* elem_size
;
109 /* make sure there is no u32 overflow later in round_up() */
111 if (cost
>= U32_MAX
- PAGE_SIZE
)
112 return ERR_PTR(-ENOMEM
);
114 cost
+= (u64
)attr
->max_entries
* elem_size
* num_possible_cpus();
115 if (cost
>= U32_MAX
- PAGE_SIZE
)
116 return ERR_PTR(-ENOMEM
);
118 cost
= round_up(cost
, PAGE_SIZE
) >> PAGE_SHIFT
;
120 ret
= bpf_map_precharge_memlock(cost
);
124 /* allocate all map elements and zero-initialize them */
125 array
= bpf_map_area_alloc(array_size
, numa_node
);
127 return ERR_PTR(-ENOMEM
);
128 array
->index_mask
= index_mask
;
129 array
->map
.unpriv_array
= unpriv
;
131 /* copy mandatory map attributes */
132 bpf_map_init_from_attr(&array
->map
, attr
);
133 array
->map
.pages
= cost
;
134 array
->elem_size
= elem_size
;
136 if (percpu
&& bpf_array_alloc_percpu(array
)) {
137 bpf_map_area_free(array
);
138 return ERR_PTR(-ENOMEM
);
144 /* Called from syscall or from eBPF program */
145 static void *array_map_lookup_elem(struct bpf_map
*map
, void *key
)
147 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
148 u32 index
= *(u32
*)key
;
150 if (unlikely(index
>= array
->map
.max_entries
))
153 return array
->value
+ array
->elem_size
* (index
& array
->index_mask
);
156 static int array_map_direct_value_addr(const struct bpf_map
*map
, u64
*imm
,
159 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
161 if (map
->max_entries
!= 1)
163 if (off
>= map
->value_size
)
166 *imm
= (unsigned long)array
->value
;
170 static int array_map_direct_value_meta(const struct bpf_map
*map
, u64 imm
,
173 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
174 u64 base
= (unsigned long)array
->value
;
175 u64 range
= array
->elem_size
;
177 if (map
->max_entries
!= 1)
179 if (imm
< base
|| imm
>= base
+ range
)
186 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
187 static u32
array_map_gen_lookup(struct bpf_map
*map
, struct bpf_insn
*insn_buf
)
189 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
190 struct bpf_insn
*insn
= insn_buf
;
191 u32 elem_size
= round_up(map
->value_size
, 8);
192 const int ret
= BPF_REG_0
;
193 const int map_ptr
= BPF_REG_1
;
194 const int index
= BPF_REG_2
;
196 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
197 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
198 if (map
->unpriv_array
) {
199 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 4);
200 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
202 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 3);
205 if (is_power_of_2(elem_size
)) {
206 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
208 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
210 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
211 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
212 *insn
++ = BPF_MOV64_IMM(ret
, 0);
213 return insn
- insn_buf
;
216 /* Called from eBPF program */
217 static void *percpu_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
219 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
220 u32 index
= *(u32
*)key
;
222 if (unlikely(index
>= array
->map
.max_entries
))
225 return this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]);
228 int bpf_percpu_array_copy(struct bpf_map
*map
, void *key
, void *value
)
230 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
231 u32 index
= *(u32
*)key
;
236 if (unlikely(index
>= array
->map
.max_entries
))
239 /* per_cpu areas are zero-filled and bpf programs can only
240 * access 'value_size' of them, so copying rounded areas
241 * will not leak any kernel data
243 size
= round_up(map
->value_size
, 8);
245 pptr
= array
->pptrs
[index
& array
->index_mask
];
246 for_each_possible_cpu(cpu
) {
247 bpf_long_memcpy(value
+ off
, per_cpu_ptr(pptr
, cpu
), size
);
254 /* Called from syscall */
255 static int array_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
257 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
258 u32 index
= key
? *(u32
*)key
: U32_MAX
;
259 u32
*next
= (u32
*)next_key
;
261 if (index
>= array
->map
.max_entries
) {
266 if (index
== array
->map
.max_entries
- 1)
273 /* Called from syscall or from eBPF program */
274 static int array_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
277 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
278 u32 index
= *(u32
*)key
;
281 if (unlikely((map_flags
& ~BPF_F_LOCK
) > BPF_EXIST
))
285 if (unlikely(index
>= array
->map
.max_entries
))
286 /* all elements were pre-allocated, cannot insert a new one */
289 if (unlikely(map_flags
& BPF_NOEXIST
))
290 /* all elements already exist */
293 if (unlikely((map_flags
& BPF_F_LOCK
) &&
294 !map_value_has_spin_lock(map
)))
297 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
298 memcpy(this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]),
299 value
, map
->value_size
);
302 array
->elem_size
* (index
& array
->index_mask
);
303 if (map_flags
& BPF_F_LOCK
)
304 copy_map_value_locked(map
, val
, value
, false);
306 copy_map_value(map
, val
, value
);
311 int bpf_percpu_array_update(struct bpf_map
*map
, void *key
, void *value
,
314 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
315 u32 index
= *(u32
*)key
;
320 if (unlikely(map_flags
> BPF_EXIST
))
324 if (unlikely(index
>= array
->map
.max_entries
))
325 /* all elements were pre-allocated, cannot insert a new one */
328 if (unlikely(map_flags
== BPF_NOEXIST
))
329 /* all elements already exist */
332 /* the user space will provide round_up(value_size, 8) bytes that
333 * will be copied into per-cpu area. bpf programs can only access
334 * value_size of it. During lookup the same extra bytes will be
335 * returned or zeros which were zero-filled by percpu_alloc,
336 * so no kernel data leaks possible
338 size
= round_up(map
->value_size
, 8);
340 pptr
= array
->pptrs
[index
& array
->index_mask
];
341 for_each_possible_cpu(cpu
) {
342 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
), value
+ off
, size
);
349 /* Called from syscall or from eBPF program */
350 static int array_map_delete_elem(struct bpf_map
*map
, void *key
)
355 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
356 static void array_map_free(struct bpf_map
*map
)
358 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
360 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
361 * so the programs (can be more than one that used this map) were
362 * disconnected from events. Wait for outstanding programs to complete
367 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
368 bpf_array_free_percpu(array
);
370 bpf_map_area_free(array
);
373 static void array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
380 value
= array_map_lookup_elem(map
, key
);
386 if (map
->btf_key_type_id
)
387 seq_printf(m
, "%u: ", *(u32
*)key
);
388 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
, value
, m
);
394 static void percpu_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
397 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
398 u32 index
= *(u32
*)key
;
404 seq_printf(m
, "%u: {\n", *(u32
*)key
);
405 pptr
= array
->pptrs
[index
& array
->index_mask
];
406 for_each_possible_cpu(cpu
) {
407 seq_printf(m
, "\tcpu%d: ", cpu
);
408 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
409 per_cpu_ptr(pptr
, cpu
), m
);
417 static int array_map_check_btf(const struct bpf_map
*map
,
418 const struct btf
*btf
,
419 const struct btf_type
*key_type
,
420 const struct btf_type
*value_type
)
424 /* One exception for keyless BTF: .bss/.data/.rodata map */
425 if (btf_type_is_void(key_type
)) {
426 if (map
->map_type
!= BPF_MAP_TYPE_ARRAY
||
427 map
->max_entries
!= 1)
430 if (BTF_INFO_KIND(value_type
->info
) != BTF_KIND_DATASEC
)
436 if (BTF_INFO_KIND(key_type
->info
) != BTF_KIND_INT
)
439 int_data
= *(u32
*)(key_type
+ 1);
440 /* bpf array can only take a u32 key. This check makes sure
441 * that the btf matches the attr used during map_create.
443 if (BTF_INT_BITS(int_data
) != 32 || BTF_INT_OFFSET(int_data
))
449 const struct bpf_map_ops array_map_ops
= {
450 .map_alloc_check
= array_map_alloc_check
,
451 .map_alloc
= array_map_alloc
,
452 .map_free
= array_map_free
,
453 .map_get_next_key
= array_map_get_next_key
,
454 .map_lookup_elem
= array_map_lookup_elem
,
455 .map_update_elem
= array_map_update_elem
,
456 .map_delete_elem
= array_map_delete_elem
,
457 .map_gen_lookup
= array_map_gen_lookup
,
458 .map_direct_value_addr
= array_map_direct_value_addr
,
459 .map_direct_value_meta
= array_map_direct_value_meta
,
460 .map_seq_show_elem
= array_map_seq_show_elem
,
461 .map_check_btf
= array_map_check_btf
,
464 const struct bpf_map_ops percpu_array_map_ops
= {
465 .map_alloc_check
= array_map_alloc_check
,
466 .map_alloc
= array_map_alloc
,
467 .map_free
= array_map_free
,
468 .map_get_next_key
= array_map_get_next_key
,
469 .map_lookup_elem
= percpu_array_map_lookup_elem
,
470 .map_update_elem
= array_map_update_elem
,
471 .map_delete_elem
= array_map_delete_elem
,
472 .map_seq_show_elem
= percpu_array_map_seq_show_elem
,
473 .map_check_btf
= array_map_check_btf
,
476 static int fd_array_map_alloc_check(union bpf_attr
*attr
)
478 /* only file descriptors can be stored in this type of map */
479 if (attr
->value_size
!= sizeof(u32
))
481 /* Program read-only/write-only not supported for special maps yet. */
482 if (attr
->map_flags
& (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
))
484 return array_map_alloc_check(attr
);
487 static void fd_array_map_free(struct bpf_map
*map
)
489 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
494 /* make sure it's empty */
495 for (i
= 0; i
< array
->map
.max_entries
; i
++)
496 BUG_ON(array
->ptrs
[i
] != NULL
);
498 bpf_map_area_free(array
);
501 static void *fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
503 return ERR_PTR(-EOPNOTSUPP
);
506 /* only called from syscall */
507 int bpf_fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
)
512 if (!map
->ops
->map_fd_sys_lookup_elem
)
516 elem
= array_map_lookup_elem(map
, key
);
517 if (elem
&& (ptr
= READ_ONCE(*elem
)))
518 *value
= map
->ops
->map_fd_sys_lookup_elem(ptr
);
526 /* only called from syscall */
527 int bpf_fd_array_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
528 void *key
, void *value
, u64 map_flags
)
530 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
531 void *new_ptr
, *old_ptr
;
532 u32 index
= *(u32
*)key
, ufd
;
534 if (map_flags
!= BPF_ANY
)
537 if (index
>= array
->map
.max_entries
)
541 new_ptr
= map
->ops
->map_fd_get_ptr(map
, map_file
, ufd
);
543 return PTR_ERR(new_ptr
);
545 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
547 map
->ops
->map_fd_put_ptr(old_ptr
);
552 static int fd_array_map_delete_elem(struct bpf_map
*map
, void *key
)
554 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
556 u32 index
= *(u32
*)key
;
558 if (index
>= array
->map
.max_entries
)
561 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
563 map
->ops
->map_fd_put_ptr(old_ptr
);
570 static void *prog_fd_array_get_ptr(struct bpf_map
*map
,
571 struct file
*map_file
, int fd
)
573 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
574 struct bpf_prog
*prog
= bpf_prog_get(fd
);
579 if (!bpf_prog_array_compatible(array
, prog
)) {
581 return ERR_PTR(-EINVAL
);
587 static void prog_fd_array_put_ptr(void *ptr
)
592 static u32
prog_fd_array_sys_lookup_elem(void *ptr
)
594 return ((struct bpf_prog
*)ptr
)->aux
->id
;
597 /* decrement refcnt of all bpf_progs that are stored in this map */
598 static void bpf_fd_array_map_clear(struct bpf_map
*map
)
600 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
603 for (i
= 0; i
< array
->map
.max_entries
; i
++)
604 fd_array_map_delete_elem(map
, &i
);
607 static void prog_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
615 elem
= array_map_lookup_elem(map
, key
);
617 ptr
= READ_ONCE(*elem
);
619 seq_printf(m
, "%u: ", *(u32
*)key
);
620 prog_id
= prog_fd_array_sys_lookup_elem(ptr
);
621 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
630 const struct bpf_map_ops prog_array_map_ops
= {
631 .map_alloc_check
= fd_array_map_alloc_check
,
632 .map_alloc
= array_map_alloc
,
633 .map_free
= fd_array_map_free
,
634 .map_get_next_key
= array_map_get_next_key
,
635 .map_lookup_elem
= fd_array_map_lookup_elem
,
636 .map_delete_elem
= fd_array_map_delete_elem
,
637 .map_fd_get_ptr
= prog_fd_array_get_ptr
,
638 .map_fd_put_ptr
= prog_fd_array_put_ptr
,
639 .map_fd_sys_lookup_elem
= prog_fd_array_sys_lookup_elem
,
640 .map_release_uref
= bpf_fd_array_map_clear
,
641 .map_seq_show_elem
= prog_array_map_seq_show_elem
,
644 static struct bpf_event_entry
*bpf_event_entry_gen(struct file
*perf_file
,
645 struct file
*map_file
)
647 struct bpf_event_entry
*ee
;
649 ee
= kzalloc(sizeof(*ee
), GFP_ATOMIC
);
651 ee
->event
= perf_file
->private_data
;
652 ee
->perf_file
= perf_file
;
653 ee
->map_file
= map_file
;
659 static void __bpf_event_entry_free(struct rcu_head
*rcu
)
661 struct bpf_event_entry
*ee
;
663 ee
= container_of(rcu
, struct bpf_event_entry
, rcu
);
668 static void bpf_event_entry_free_rcu(struct bpf_event_entry
*ee
)
670 call_rcu(&ee
->rcu
, __bpf_event_entry_free
);
673 static void *perf_event_fd_array_get_ptr(struct bpf_map
*map
,
674 struct file
*map_file
, int fd
)
676 struct bpf_event_entry
*ee
;
677 struct perf_event
*event
;
678 struct file
*perf_file
;
681 perf_file
= perf_event_get(fd
);
682 if (IS_ERR(perf_file
))
685 ee
= ERR_PTR(-EOPNOTSUPP
);
686 event
= perf_file
->private_data
;
687 if (perf_event_read_local(event
, &value
, NULL
, NULL
) == -EOPNOTSUPP
)
690 ee
= bpf_event_entry_gen(perf_file
, map_file
);
693 ee
= ERR_PTR(-ENOMEM
);
699 static void perf_event_fd_array_put_ptr(void *ptr
)
701 bpf_event_entry_free_rcu(ptr
);
704 static void perf_event_fd_array_release(struct bpf_map
*map
,
705 struct file
*map_file
)
707 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
708 struct bpf_event_entry
*ee
;
712 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
713 ee
= READ_ONCE(array
->ptrs
[i
]);
714 if (ee
&& ee
->map_file
== map_file
)
715 fd_array_map_delete_elem(map
, &i
);
720 const struct bpf_map_ops perf_event_array_map_ops
= {
721 .map_alloc_check
= fd_array_map_alloc_check
,
722 .map_alloc
= array_map_alloc
,
723 .map_free
= fd_array_map_free
,
724 .map_get_next_key
= array_map_get_next_key
,
725 .map_lookup_elem
= fd_array_map_lookup_elem
,
726 .map_delete_elem
= fd_array_map_delete_elem
,
727 .map_fd_get_ptr
= perf_event_fd_array_get_ptr
,
728 .map_fd_put_ptr
= perf_event_fd_array_put_ptr
,
729 .map_release
= perf_event_fd_array_release
,
730 .map_check_btf
= map_check_no_btf
,
733 #ifdef CONFIG_CGROUPS
734 static void *cgroup_fd_array_get_ptr(struct bpf_map
*map
,
735 struct file
*map_file
/* not used */,
738 return cgroup_get_from_fd(fd
);
741 static void cgroup_fd_array_put_ptr(void *ptr
)
743 /* cgroup_put free cgrp after a rcu grace period */
747 static void cgroup_fd_array_free(struct bpf_map
*map
)
749 bpf_fd_array_map_clear(map
);
750 fd_array_map_free(map
);
753 const struct bpf_map_ops cgroup_array_map_ops
= {
754 .map_alloc_check
= fd_array_map_alloc_check
,
755 .map_alloc
= array_map_alloc
,
756 .map_free
= cgroup_fd_array_free
,
757 .map_get_next_key
= array_map_get_next_key
,
758 .map_lookup_elem
= fd_array_map_lookup_elem
,
759 .map_delete_elem
= fd_array_map_delete_elem
,
760 .map_fd_get_ptr
= cgroup_fd_array_get_ptr
,
761 .map_fd_put_ptr
= cgroup_fd_array_put_ptr
,
762 .map_check_btf
= map_check_no_btf
,
766 static struct bpf_map
*array_of_map_alloc(union bpf_attr
*attr
)
768 struct bpf_map
*map
, *inner_map_meta
;
770 inner_map_meta
= bpf_map_meta_alloc(attr
->inner_map_fd
);
771 if (IS_ERR(inner_map_meta
))
772 return inner_map_meta
;
774 map
= array_map_alloc(attr
);
776 bpf_map_meta_free(inner_map_meta
);
780 map
->inner_map_meta
= inner_map_meta
;
785 static void array_of_map_free(struct bpf_map
*map
)
787 /* map->inner_map_meta is only accessed by syscall which
788 * is protected by fdget/fdput.
790 bpf_map_meta_free(map
->inner_map_meta
);
791 bpf_fd_array_map_clear(map
);
792 fd_array_map_free(map
);
795 static void *array_of_map_lookup_elem(struct bpf_map
*map
, void *key
)
797 struct bpf_map
**inner_map
= array_map_lookup_elem(map
, key
);
802 return READ_ONCE(*inner_map
);
805 static u32
array_of_map_gen_lookup(struct bpf_map
*map
,
806 struct bpf_insn
*insn_buf
)
808 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
809 u32 elem_size
= round_up(map
->value_size
, 8);
810 struct bpf_insn
*insn
= insn_buf
;
811 const int ret
= BPF_REG_0
;
812 const int map_ptr
= BPF_REG_1
;
813 const int index
= BPF_REG_2
;
815 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
816 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
817 if (map
->unpriv_array
) {
818 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 6);
819 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
821 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 5);
823 if (is_power_of_2(elem_size
))
824 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
826 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
827 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
828 *insn
++ = BPF_LDX_MEM(BPF_DW
, ret
, ret
, 0);
829 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, ret
, 0, 1);
830 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
831 *insn
++ = BPF_MOV64_IMM(ret
, 0);
833 return insn
- insn_buf
;
836 const struct bpf_map_ops array_of_maps_map_ops
= {
837 .map_alloc_check
= fd_array_map_alloc_check
,
838 .map_alloc
= array_of_map_alloc
,
839 .map_free
= array_of_map_free
,
840 .map_get_next_key
= array_map_get_next_key
,
841 .map_lookup_elem
= array_of_map_lookup_elem
,
842 .map_delete_elem
= fd_array_map_delete_elem
,
843 .map_fd_get_ptr
= bpf_map_fd_get_ptr
,
844 .map_fd_put_ptr
= bpf_map_fd_put_ptr
,
845 .map_fd_sys_lookup_elem
= bpf_map_fd_sys_lookup_elem
,
846 .map_gen_lookup
= array_of_map_gen_lookup
,
847 .map_check_btf
= map_check_no_btf
,