1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/btf.h>
15 #include <linux/err.h>
16 #include <linux/slab.h>
18 #include <linux/filter.h>
19 #include <linux/perf_event.h>
20 #include <uapi/linux/btf.h>
22 #include "map_in_map.h"
24 #define ARRAY_CREATE_FLAG_MASK \
25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
27 static void bpf_array_free_percpu(struct bpf_array
*array
)
31 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
32 free_percpu(array
->pptrs
[i
]);
37 static int bpf_array_alloc_percpu(struct bpf_array
*array
)
42 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
43 ptr
= __alloc_percpu_gfp(array
->elem_size
, 8,
44 GFP_USER
| __GFP_NOWARN
);
46 bpf_array_free_percpu(array
);
49 array
->pptrs
[i
] = ptr
;
56 /* Called from syscall */
57 int array_map_alloc_check(union bpf_attr
*attr
)
59 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
60 int numa_node
= bpf_map_attr_numa_node(attr
);
62 /* check sanity of attributes */
63 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
64 attr
->value_size
== 0 ||
65 attr
->map_flags
& ~ARRAY_CREATE_FLAG_MASK
||
66 (percpu
&& numa_node
!= NUMA_NO_NODE
))
69 if (attr
->value_size
> KMALLOC_MAX_SIZE
)
70 /* if value_size is bigger, the user space won't be able to
71 * access the elements.
78 static struct bpf_map
*array_map_alloc(union bpf_attr
*attr
)
80 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
81 int ret
, numa_node
= bpf_map_attr_numa_node(attr
);
82 u32 elem_size
, index_mask
, max_entries
;
83 bool unpriv
= !capable(CAP_SYS_ADMIN
);
84 u64 cost
, array_size
, mask64
;
85 struct bpf_array
*array
;
87 elem_size
= round_up(attr
->value_size
, 8);
89 max_entries
= attr
->max_entries
;
91 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
92 * upper most bit set in u32 space is undefined behavior due to
93 * resulting 1U << 32, so do it manually here in u64 space.
95 mask64
= fls_long(max_entries
- 1);
96 mask64
= 1ULL << mask64
;
101 /* round up array size to nearest power of 2,
102 * since cpu will speculate within index_mask limits
104 max_entries
= index_mask
+ 1;
105 /* Check for overflows. */
106 if (max_entries
< attr
->max_entries
)
107 return ERR_PTR(-E2BIG
);
110 array_size
= sizeof(*array
);
112 array_size
+= (u64
) max_entries
* sizeof(void *);
114 array_size
+= (u64
) max_entries
* elem_size
;
116 /* make sure there is no u32 overflow later in round_up() */
118 if (cost
>= U32_MAX
- PAGE_SIZE
)
119 return ERR_PTR(-ENOMEM
);
121 cost
+= (u64
)attr
->max_entries
* elem_size
* num_possible_cpus();
122 if (cost
>= U32_MAX
- PAGE_SIZE
)
123 return ERR_PTR(-ENOMEM
);
125 cost
= round_up(cost
, PAGE_SIZE
) >> PAGE_SHIFT
;
127 ret
= bpf_map_precharge_memlock(cost
);
131 /* allocate all map elements and zero-initialize them */
132 array
= bpf_map_area_alloc(array_size
, numa_node
);
134 return ERR_PTR(-ENOMEM
);
135 array
->index_mask
= index_mask
;
136 array
->map
.unpriv_array
= unpriv
;
138 /* copy mandatory map attributes */
139 bpf_map_init_from_attr(&array
->map
, attr
);
140 array
->map
.pages
= cost
;
141 array
->elem_size
= elem_size
;
143 if (percpu
&& bpf_array_alloc_percpu(array
)) {
144 bpf_map_area_free(array
);
145 return ERR_PTR(-ENOMEM
);
151 /* Called from syscall or from eBPF program */
152 static void *array_map_lookup_elem(struct bpf_map
*map
, void *key
)
154 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
155 u32 index
= *(u32
*)key
;
157 if (unlikely(index
>= array
->map
.max_entries
))
160 return array
->value
+ array
->elem_size
* (index
& array
->index_mask
);
163 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
164 static u32
array_map_gen_lookup(struct bpf_map
*map
, struct bpf_insn
*insn_buf
)
166 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
167 struct bpf_insn
*insn
= insn_buf
;
168 u32 elem_size
= round_up(map
->value_size
, 8);
169 const int ret
= BPF_REG_0
;
170 const int map_ptr
= BPF_REG_1
;
171 const int index
= BPF_REG_2
;
173 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
174 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
175 if (map
->unpriv_array
) {
176 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 4);
177 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
179 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 3);
182 if (is_power_of_2(elem_size
)) {
183 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
185 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
187 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
188 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
189 *insn
++ = BPF_MOV64_IMM(ret
, 0);
190 return insn
- insn_buf
;
193 /* Called from eBPF program */
194 static void *percpu_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
196 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
197 u32 index
= *(u32
*)key
;
199 if (unlikely(index
>= array
->map
.max_entries
))
202 return this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]);
205 int bpf_percpu_array_copy(struct bpf_map
*map
, void *key
, void *value
)
207 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
208 u32 index
= *(u32
*)key
;
213 if (unlikely(index
>= array
->map
.max_entries
))
216 /* per_cpu areas are zero-filled and bpf programs can only
217 * access 'value_size' of them, so copying rounded areas
218 * will not leak any kernel data
220 size
= round_up(map
->value_size
, 8);
222 pptr
= array
->pptrs
[index
& array
->index_mask
];
223 for_each_possible_cpu(cpu
) {
224 bpf_long_memcpy(value
+ off
, per_cpu_ptr(pptr
, cpu
), size
);
231 /* Called from syscall */
232 static int array_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
234 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
235 u32 index
= key
? *(u32
*)key
: U32_MAX
;
236 u32
*next
= (u32
*)next_key
;
238 if (index
>= array
->map
.max_entries
) {
243 if (index
== array
->map
.max_entries
- 1)
250 /* Called from syscall or from eBPF program */
251 static int array_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
254 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
255 u32 index
= *(u32
*)key
;
258 if (unlikely((map_flags
& ~BPF_F_LOCK
) > BPF_EXIST
))
262 if (unlikely(index
>= array
->map
.max_entries
))
263 /* all elements were pre-allocated, cannot insert a new one */
266 if (unlikely(map_flags
& BPF_NOEXIST
))
267 /* all elements already exist */
270 if (unlikely((map_flags
& BPF_F_LOCK
) &&
271 !map_value_has_spin_lock(map
)))
274 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
275 memcpy(this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]),
276 value
, map
->value_size
);
279 array
->elem_size
* (index
& array
->index_mask
);
280 if (map_flags
& BPF_F_LOCK
)
281 copy_map_value_locked(map
, val
, value
, false);
283 copy_map_value(map
, val
, value
);
288 int bpf_percpu_array_update(struct bpf_map
*map
, void *key
, void *value
,
291 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
292 u32 index
= *(u32
*)key
;
297 if (unlikely(map_flags
> BPF_EXIST
))
301 if (unlikely(index
>= array
->map
.max_entries
))
302 /* all elements were pre-allocated, cannot insert a new one */
305 if (unlikely(map_flags
== BPF_NOEXIST
))
306 /* all elements already exist */
309 /* the user space will provide round_up(value_size, 8) bytes that
310 * will be copied into per-cpu area. bpf programs can only access
311 * value_size of it. During lookup the same extra bytes will be
312 * returned or zeros which were zero-filled by percpu_alloc,
313 * so no kernel data leaks possible
315 size
= round_up(map
->value_size
, 8);
317 pptr
= array
->pptrs
[index
& array
->index_mask
];
318 for_each_possible_cpu(cpu
) {
319 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
), value
+ off
, size
);
326 /* Called from syscall or from eBPF program */
327 static int array_map_delete_elem(struct bpf_map
*map
, void *key
)
332 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
333 static void array_map_free(struct bpf_map
*map
)
335 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
337 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
338 * so the programs (can be more than one that used this map) were
339 * disconnected from events. Wait for outstanding programs to complete
344 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
345 bpf_array_free_percpu(array
);
347 bpf_map_area_free(array
);
350 static void array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
357 value
= array_map_lookup_elem(map
, key
);
363 seq_printf(m
, "%u: ", *(u32
*)key
);
364 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
, value
, m
);
370 static void percpu_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
373 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
374 u32 index
= *(u32
*)key
;
380 seq_printf(m
, "%u: {\n", *(u32
*)key
);
381 pptr
= array
->pptrs
[index
& array
->index_mask
];
382 for_each_possible_cpu(cpu
) {
383 seq_printf(m
, "\tcpu%d: ", cpu
);
384 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
385 per_cpu_ptr(pptr
, cpu
), m
);
393 static int array_map_check_btf(const struct bpf_map
*map
,
394 const struct btf
*btf
,
395 const struct btf_type
*key_type
,
396 const struct btf_type
*value_type
)
400 if (BTF_INFO_KIND(key_type
->info
) != BTF_KIND_INT
)
403 int_data
= *(u32
*)(key_type
+ 1);
404 /* bpf array can only take a u32 key. This check makes sure
405 * that the btf matches the attr used during map_create.
407 if (BTF_INT_BITS(int_data
) != 32 || BTF_INT_OFFSET(int_data
))
413 const struct bpf_map_ops array_map_ops
= {
414 .map_alloc_check
= array_map_alloc_check
,
415 .map_alloc
= array_map_alloc
,
416 .map_free
= array_map_free
,
417 .map_get_next_key
= array_map_get_next_key
,
418 .map_lookup_elem
= array_map_lookup_elem
,
419 .map_update_elem
= array_map_update_elem
,
420 .map_delete_elem
= array_map_delete_elem
,
421 .map_gen_lookup
= array_map_gen_lookup
,
422 .map_seq_show_elem
= array_map_seq_show_elem
,
423 .map_check_btf
= array_map_check_btf
,
426 const struct bpf_map_ops percpu_array_map_ops
= {
427 .map_alloc_check
= array_map_alloc_check
,
428 .map_alloc
= array_map_alloc
,
429 .map_free
= array_map_free
,
430 .map_get_next_key
= array_map_get_next_key
,
431 .map_lookup_elem
= percpu_array_map_lookup_elem
,
432 .map_update_elem
= array_map_update_elem
,
433 .map_delete_elem
= array_map_delete_elem
,
434 .map_seq_show_elem
= percpu_array_map_seq_show_elem
,
435 .map_check_btf
= array_map_check_btf
,
438 static int fd_array_map_alloc_check(union bpf_attr
*attr
)
440 /* only file descriptors can be stored in this type of map */
441 if (attr
->value_size
!= sizeof(u32
))
443 return array_map_alloc_check(attr
);
446 static void fd_array_map_free(struct bpf_map
*map
)
448 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
453 /* make sure it's empty */
454 for (i
= 0; i
< array
->map
.max_entries
; i
++)
455 BUG_ON(array
->ptrs
[i
] != NULL
);
457 bpf_map_area_free(array
);
460 static void *fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
462 return ERR_PTR(-EOPNOTSUPP
);
465 /* only called from syscall */
466 int bpf_fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
)
471 if (!map
->ops
->map_fd_sys_lookup_elem
)
475 elem
= array_map_lookup_elem(map
, key
);
476 if (elem
&& (ptr
= READ_ONCE(*elem
)))
477 *value
= map
->ops
->map_fd_sys_lookup_elem(ptr
);
485 /* only called from syscall */
486 int bpf_fd_array_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
487 void *key
, void *value
, u64 map_flags
)
489 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
490 void *new_ptr
, *old_ptr
;
491 u32 index
= *(u32
*)key
, ufd
;
493 if (map_flags
!= BPF_ANY
)
496 if (index
>= array
->map
.max_entries
)
500 new_ptr
= map
->ops
->map_fd_get_ptr(map
, map_file
, ufd
);
502 return PTR_ERR(new_ptr
);
504 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
506 map
->ops
->map_fd_put_ptr(old_ptr
);
511 static int fd_array_map_delete_elem(struct bpf_map
*map
, void *key
)
513 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
515 u32 index
= *(u32
*)key
;
517 if (index
>= array
->map
.max_entries
)
520 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
522 map
->ops
->map_fd_put_ptr(old_ptr
);
529 static void *prog_fd_array_get_ptr(struct bpf_map
*map
,
530 struct file
*map_file
, int fd
)
532 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
533 struct bpf_prog
*prog
= bpf_prog_get(fd
);
538 if (!bpf_prog_array_compatible(array
, prog
)) {
540 return ERR_PTR(-EINVAL
);
546 static void prog_fd_array_put_ptr(void *ptr
)
551 static u32
prog_fd_array_sys_lookup_elem(void *ptr
)
553 return ((struct bpf_prog
*)ptr
)->aux
->id
;
556 /* decrement refcnt of all bpf_progs that are stored in this map */
557 static void bpf_fd_array_map_clear(struct bpf_map
*map
)
559 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
562 for (i
= 0; i
< array
->map
.max_entries
; i
++)
563 fd_array_map_delete_elem(map
, &i
);
566 static void prog_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
574 elem
= array_map_lookup_elem(map
, key
);
576 ptr
= READ_ONCE(*elem
);
578 seq_printf(m
, "%u: ", *(u32
*)key
);
579 prog_id
= prog_fd_array_sys_lookup_elem(ptr
);
580 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
589 const struct bpf_map_ops prog_array_map_ops
= {
590 .map_alloc_check
= fd_array_map_alloc_check
,
591 .map_alloc
= array_map_alloc
,
592 .map_free
= fd_array_map_free
,
593 .map_get_next_key
= array_map_get_next_key
,
594 .map_lookup_elem
= fd_array_map_lookup_elem
,
595 .map_delete_elem
= fd_array_map_delete_elem
,
596 .map_fd_get_ptr
= prog_fd_array_get_ptr
,
597 .map_fd_put_ptr
= prog_fd_array_put_ptr
,
598 .map_fd_sys_lookup_elem
= prog_fd_array_sys_lookup_elem
,
599 .map_release_uref
= bpf_fd_array_map_clear
,
600 .map_seq_show_elem
= prog_array_map_seq_show_elem
,
603 static struct bpf_event_entry
*bpf_event_entry_gen(struct file
*perf_file
,
604 struct file
*map_file
)
606 struct bpf_event_entry
*ee
;
608 ee
= kzalloc(sizeof(*ee
), GFP_ATOMIC
);
610 ee
->event
= perf_file
->private_data
;
611 ee
->perf_file
= perf_file
;
612 ee
->map_file
= map_file
;
618 static void __bpf_event_entry_free(struct rcu_head
*rcu
)
620 struct bpf_event_entry
*ee
;
622 ee
= container_of(rcu
, struct bpf_event_entry
, rcu
);
627 static void bpf_event_entry_free_rcu(struct bpf_event_entry
*ee
)
629 call_rcu(&ee
->rcu
, __bpf_event_entry_free
);
632 static void *perf_event_fd_array_get_ptr(struct bpf_map
*map
,
633 struct file
*map_file
, int fd
)
635 struct bpf_event_entry
*ee
;
636 struct perf_event
*event
;
637 struct file
*perf_file
;
640 perf_file
= perf_event_get(fd
);
641 if (IS_ERR(perf_file
))
644 ee
= ERR_PTR(-EOPNOTSUPP
);
645 event
= perf_file
->private_data
;
646 if (perf_event_read_local(event
, &value
, NULL
, NULL
) == -EOPNOTSUPP
)
649 ee
= bpf_event_entry_gen(perf_file
, map_file
);
652 ee
= ERR_PTR(-ENOMEM
);
658 static void perf_event_fd_array_put_ptr(void *ptr
)
660 bpf_event_entry_free_rcu(ptr
);
663 static void perf_event_fd_array_release(struct bpf_map
*map
,
664 struct file
*map_file
)
666 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
667 struct bpf_event_entry
*ee
;
671 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
672 ee
= READ_ONCE(array
->ptrs
[i
]);
673 if (ee
&& ee
->map_file
== map_file
)
674 fd_array_map_delete_elem(map
, &i
);
679 const struct bpf_map_ops perf_event_array_map_ops
= {
680 .map_alloc_check
= fd_array_map_alloc_check
,
681 .map_alloc
= array_map_alloc
,
682 .map_free
= fd_array_map_free
,
683 .map_get_next_key
= array_map_get_next_key
,
684 .map_lookup_elem
= fd_array_map_lookup_elem
,
685 .map_delete_elem
= fd_array_map_delete_elem
,
686 .map_fd_get_ptr
= perf_event_fd_array_get_ptr
,
687 .map_fd_put_ptr
= perf_event_fd_array_put_ptr
,
688 .map_release
= perf_event_fd_array_release
,
689 .map_check_btf
= map_check_no_btf
,
692 #ifdef CONFIG_CGROUPS
693 static void *cgroup_fd_array_get_ptr(struct bpf_map
*map
,
694 struct file
*map_file
/* not used */,
697 return cgroup_get_from_fd(fd
);
700 static void cgroup_fd_array_put_ptr(void *ptr
)
702 /* cgroup_put free cgrp after a rcu grace period */
706 static void cgroup_fd_array_free(struct bpf_map
*map
)
708 bpf_fd_array_map_clear(map
);
709 fd_array_map_free(map
);
712 const struct bpf_map_ops cgroup_array_map_ops
= {
713 .map_alloc_check
= fd_array_map_alloc_check
,
714 .map_alloc
= array_map_alloc
,
715 .map_free
= cgroup_fd_array_free
,
716 .map_get_next_key
= array_map_get_next_key
,
717 .map_lookup_elem
= fd_array_map_lookup_elem
,
718 .map_delete_elem
= fd_array_map_delete_elem
,
719 .map_fd_get_ptr
= cgroup_fd_array_get_ptr
,
720 .map_fd_put_ptr
= cgroup_fd_array_put_ptr
,
721 .map_check_btf
= map_check_no_btf
,
725 static struct bpf_map
*array_of_map_alloc(union bpf_attr
*attr
)
727 struct bpf_map
*map
, *inner_map_meta
;
729 inner_map_meta
= bpf_map_meta_alloc(attr
->inner_map_fd
);
730 if (IS_ERR(inner_map_meta
))
731 return inner_map_meta
;
733 map
= array_map_alloc(attr
);
735 bpf_map_meta_free(inner_map_meta
);
739 map
->inner_map_meta
= inner_map_meta
;
744 static void array_of_map_free(struct bpf_map
*map
)
746 /* map->inner_map_meta is only accessed by syscall which
747 * is protected by fdget/fdput.
749 bpf_map_meta_free(map
->inner_map_meta
);
750 bpf_fd_array_map_clear(map
);
751 fd_array_map_free(map
);
754 static void *array_of_map_lookup_elem(struct bpf_map
*map
, void *key
)
756 struct bpf_map
**inner_map
= array_map_lookup_elem(map
, key
);
761 return READ_ONCE(*inner_map
);
764 static u32
array_of_map_gen_lookup(struct bpf_map
*map
,
765 struct bpf_insn
*insn_buf
)
767 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
768 u32 elem_size
= round_up(map
->value_size
, 8);
769 struct bpf_insn
*insn
= insn_buf
;
770 const int ret
= BPF_REG_0
;
771 const int map_ptr
= BPF_REG_1
;
772 const int index
= BPF_REG_2
;
774 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
775 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
776 if (map
->unpriv_array
) {
777 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 6);
778 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
780 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 5);
782 if (is_power_of_2(elem_size
))
783 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
785 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
786 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
787 *insn
++ = BPF_LDX_MEM(BPF_DW
, ret
, ret
, 0);
788 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, ret
, 0, 1);
789 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
790 *insn
++ = BPF_MOV64_IMM(ret
, 0);
792 return insn
- insn_buf
;
795 const struct bpf_map_ops array_of_maps_map_ops
= {
796 .map_alloc_check
= fd_array_map_alloc_check
,
797 .map_alloc
= array_of_map_alloc
,
798 .map_free
= array_of_map_free
,
799 .map_get_next_key
= array_map_get_next_key
,
800 .map_lookup_elem
= array_of_map_lookup_elem
,
801 .map_delete_elem
= fd_array_map_delete_elem
,
802 .map_fd_get_ptr
= bpf_map_fd_get_ptr
,
803 .map_fd_put_ptr
= bpf_map_fd_put_ptr
,
804 .map_fd_sys_lookup_elem
= bpf_map_fd_sys_lookup_elem
,
805 .map_gen_lookup
= array_of_map_gen_lookup
,
806 .map_check_btf
= map_check_no_btf
,