1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
8 #include <linux/slab.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14 #include <linux/btf_ids.h>
16 #include "map_in_map.h"
18 #define ARRAY_CREATE_FLAG_MASK \
19 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
20 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
22 static void bpf_array_free_percpu(struct bpf_array
*array
)
26 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
27 free_percpu(array
->pptrs
[i
]);
32 static int bpf_array_alloc_percpu(struct bpf_array
*array
)
37 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
38 ptr
= bpf_map_alloc_percpu(&array
->map
, array
->elem_size
, 8,
39 GFP_USER
| __GFP_NOWARN
);
41 bpf_array_free_percpu(array
);
44 array
->pptrs
[i
] = ptr
;
51 /* Called from syscall */
52 int array_map_alloc_check(union bpf_attr
*attr
)
54 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
55 int numa_node
= bpf_map_attr_numa_node(attr
);
57 /* check sanity of attributes */
58 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
59 attr
->value_size
== 0 ||
60 attr
->map_flags
& ~ARRAY_CREATE_FLAG_MASK
||
61 !bpf_map_flags_access_ok(attr
->map_flags
) ||
62 (percpu
&& numa_node
!= NUMA_NO_NODE
))
65 if (attr
->map_type
!= BPF_MAP_TYPE_ARRAY
&&
66 attr
->map_flags
& (BPF_F_MMAPABLE
| BPF_F_INNER_MAP
))
69 if (attr
->map_type
!= BPF_MAP_TYPE_PERF_EVENT_ARRAY
&&
70 attr
->map_flags
& BPF_F_PRESERVE_ELEMS
)
73 /* avoid overflow on round_up(map->value_size) */
74 if (attr
->value_size
> INT_MAX
)
76 /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
77 if (percpu
&& round_up(attr
->value_size
, 8) > PCPU_MIN_UNIT_SIZE
)
83 static struct bpf_map
*array_map_alloc(union bpf_attr
*attr
)
85 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
86 int numa_node
= bpf_map_attr_numa_node(attr
);
87 u32 elem_size
, index_mask
, max_entries
;
88 bool bypass_spec_v1
= bpf_bypass_spec_v1(NULL
);
89 u64 array_size
, mask64
;
90 struct bpf_array
*array
;
92 elem_size
= round_up(attr
->value_size
, 8);
94 max_entries
= attr
->max_entries
;
96 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
97 * upper most bit set in u32 space is undefined behavior due to
98 * resulting 1U << 32, so do it manually here in u64 space.
100 mask64
= fls_long(max_entries
- 1);
101 mask64
= 1ULL << mask64
;
105 if (!bypass_spec_v1
) {
106 /* round up array size to nearest power of 2,
107 * since cpu will speculate within index_mask limits
109 max_entries
= index_mask
+ 1;
110 /* Check for overflows. */
111 if (max_entries
< attr
->max_entries
)
112 return ERR_PTR(-E2BIG
);
115 array_size
= sizeof(*array
);
117 array_size
+= (u64
) max_entries
* sizeof(void *);
119 /* rely on vmalloc() to return page-aligned memory and
120 * ensure array->value is exactly page-aligned
122 if (attr
->map_flags
& BPF_F_MMAPABLE
) {
123 array_size
= PAGE_ALIGN(array_size
);
124 array_size
+= PAGE_ALIGN((u64
) max_entries
* elem_size
);
126 array_size
+= (u64
) max_entries
* elem_size
;
130 /* allocate all map elements and zero-initialize them */
131 if (attr
->map_flags
& BPF_F_MMAPABLE
) {
134 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
135 data
= bpf_map_area_mmapable_alloc(array_size
, numa_node
);
137 return ERR_PTR(-ENOMEM
);
138 array
= data
+ PAGE_ALIGN(sizeof(struct bpf_array
))
139 - offsetof(struct bpf_array
, value
);
141 array
= bpf_map_area_alloc(array_size
, numa_node
);
144 return ERR_PTR(-ENOMEM
);
145 array
->index_mask
= index_mask
;
146 array
->map
.bypass_spec_v1
= bypass_spec_v1
;
148 /* copy mandatory map attributes */
149 bpf_map_init_from_attr(&array
->map
, attr
);
150 array
->elem_size
= elem_size
;
152 if (percpu
&& bpf_array_alloc_percpu(array
)) {
153 bpf_map_area_free(array
);
154 return ERR_PTR(-ENOMEM
);
160 static void *array_map_elem_ptr(struct bpf_array
* array
, u32 index
)
162 return array
->value
+ (u64
)array
->elem_size
* index
;
165 /* Called from syscall or from eBPF program */
166 static void *array_map_lookup_elem(struct bpf_map
*map
, void *key
)
168 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
169 u32 index
= *(u32
*)key
;
171 if (unlikely(index
>= array
->map
.max_entries
))
174 return array
->value
+ (u64
)array
->elem_size
* (index
& array
->index_mask
);
177 static int array_map_direct_value_addr(const struct bpf_map
*map
, u64
*imm
,
180 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
182 if (map
->max_entries
!= 1)
184 if (off
>= map
->value_size
)
187 *imm
= (unsigned long)array
->value
;
191 static int array_map_direct_value_meta(const struct bpf_map
*map
, u64 imm
,
194 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
195 u64 base
= (unsigned long)array
->value
;
196 u64 range
= array
->elem_size
;
198 if (map
->max_entries
!= 1)
200 if (imm
< base
|| imm
>= base
+ range
)
207 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
208 static int array_map_gen_lookup(struct bpf_map
*map
, struct bpf_insn
*insn_buf
)
210 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
211 struct bpf_insn
*insn
= insn_buf
;
212 u32 elem_size
= array
->elem_size
;
213 const int ret
= BPF_REG_0
;
214 const int map_ptr
= BPF_REG_1
;
215 const int index
= BPF_REG_2
;
217 if (map
->map_flags
& BPF_F_INNER_MAP
)
220 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
221 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
222 if (!map
->bypass_spec_v1
) {
223 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 4);
224 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
226 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 3);
229 if (is_power_of_2(elem_size
)) {
230 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
232 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
234 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
235 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
236 *insn
++ = BPF_MOV64_IMM(ret
, 0);
237 return insn
- insn_buf
;
240 /* Called from eBPF program */
241 static void *percpu_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
243 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
244 u32 index
= *(u32
*)key
;
246 if (unlikely(index
>= array
->map
.max_entries
))
249 return this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]);
252 /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
253 static int percpu_array_map_gen_lookup(struct bpf_map
*map
, struct bpf_insn
*insn_buf
)
255 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
256 struct bpf_insn
*insn
= insn_buf
;
258 if (!bpf_jit_supports_percpu_insn())
261 if (map
->map_flags
& BPF_F_INNER_MAP
)
264 BUILD_BUG_ON(offsetof(struct bpf_array
, map
) != 0);
265 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, offsetof(struct bpf_array
, pptrs
));
267 *insn
++ = BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_2
, 0);
268 if (!map
->bypass_spec_v1
) {
269 *insn
++ = BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, map
->max_entries
, 6);
270 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_0
, array
->index_mask
);
272 *insn
++ = BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, map
->max_entries
, 5);
275 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, BPF_REG_0
, 3);
276 *insn
++ = BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
);
277 *insn
++ = BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0);
278 *insn
++ = BPF_MOV64_PERCPU_REG(BPF_REG_0
, BPF_REG_0
);
279 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
280 *insn
++ = BPF_MOV64_IMM(BPF_REG_0
, 0);
281 return insn
- insn_buf
;
284 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map
*map
, void *key
, u32 cpu
)
286 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
287 u32 index
= *(u32
*)key
;
289 if (cpu
>= nr_cpu_ids
)
292 if (unlikely(index
>= array
->map
.max_entries
))
295 return per_cpu_ptr(array
->pptrs
[index
& array
->index_mask
], cpu
);
298 int bpf_percpu_array_copy(struct bpf_map
*map
, void *key
, void *value
)
300 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
301 u32 index
= *(u32
*)key
;
306 if (unlikely(index
>= array
->map
.max_entries
))
309 /* per_cpu areas are zero-filled and bpf programs can only
310 * access 'value_size' of them, so copying rounded areas
311 * will not leak any kernel data
313 size
= array
->elem_size
;
315 pptr
= array
->pptrs
[index
& array
->index_mask
];
316 for_each_possible_cpu(cpu
) {
317 copy_map_value_long(map
, value
+ off
, per_cpu_ptr(pptr
, cpu
));
318 check_and_init_map_value(map
, value
+ off
);
325 /* Called from syscall */
326 static int array_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
328 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
329 u32 index
= key
? *(u32
*)key
: U32_MAX
;
330 u32
*next
= (u32
*)next_key
;
332 if (index
>= array
->map
.max_entries
) {
337 if (index
== array
->map
.max_entries
- 1)
344 /* Called from syscall or from eBPF program */
345 static long array_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
348 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
349 u32 index
= *(u32
*)key
;
352 if (unlikely((map_flags
& ~BPF_F_LOCK
) > BPF_EXIST
))
356 if (unlikely(index
>= array
->map
.max_entries
))
357 /* all elements were pre-allocated, cannot insert a new one */
360 if (unlikely(map_flags
& BPF_NOEXIST
))
361 /* all elements already exist */
364 if (unlikely((map_flags
& BPF_F_LOCK
) &&
365 !btf_record_has_field(map
->record
, BPF_SPIN_LOCK
)))
368 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
369 val
= this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]);
370 copy_map_value(map
, val
, value
);
371 bpf_obj_free_fields(array
->map
.record
, val
);
374 (u64
)array
->elem_size
* (index
& array
->index_mask
);
375 if (map_flags
& BPF_F_LOCK
)
376 copy_map_value_locked(map
, val
, value
, false);
378 copy_map_value(map
, val
, value
);
379 bpf_obj_free_fields(array
->map
.record
, val
);
384 int bpf_percpu_array_update(struct bpf_map
*map
, void *key
, void *value
,
387 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
388 u32 index
= *(u32
*)key
;
393 if (unlikely(map_flags
> BPF_EXIST
))
397 if (unlikely(index
>= array
->map
.max_entries
))
398 /* all elements were pre-allocated, cannot insert a new one */
401 if (unlikely(map_flags
== BPF_NOEXIST
))
402 /* all elements already exist */
405 /* the user space will provide round_up(value_size, 8) bytes that
406 * will be copied into per-cpu area. bpf programs can only access
407 * value_size of it. During lookup the same extra bytes will be
408 * returned or zeros which were zero-filled by percpu_alloc,
409 * so no kernel data leaks possible
411 size
= array
->elem_size
;
413 pptr
= array
->pptrs
[index
& array
->index_mask
];
414 for_each_possible_cpu(cpu
) {
415 copy_map_value_long(map
, per_cpu_ptr(pptr
, cpu
), value
+ off
);
416 bpf_obj_free_fields(array
->map
.record
, per_cpu_ptr(pptr
, cpu
));
423 /* Called from syscall or from eBPF program */
424 static long array_map_delete_elem(struct bpf_map
*map
, void *key
)
429 static void *array_map_vmalloc_addr(struct bpf_array
*array
)
431 return (void *)round_down((unsigned long)array
, PAGE_SIZE
);
434 static void array_map_free_timers_wq(struct bpf_map
*map
)
436 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
439 /* We don't reset or free fields other than timer and workqueue
440 * on uref dropping to zero.
442 if (btf_record_has_field(map
->record
, BPF_TIMER
| BPF_WORKQUEUE
)) {
443 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
444 if (btf_record_has_field(map
->record
, BPF_TIMER
))
445 bpf_obj_free_timer(map
->record
, array_map_elem_ptr(array
, i
));
446 if (btf_record_has_field(map
->record
, BPF_WORKQUEUE
))
447 bpf_obj_free_workqueue(map
->record
, array_map_elem_ptr(array
, i
));
452 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
453 static void array_map_free(struct bpf_map
*map
)
455 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
458 if (!IS_ERR_OR_NULL(map
->record
)) {
459 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
460 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
461 void __percpu
*pptr
= array
->pptrs
[i
& array
->index_mask
];
464 for_each_possible_cpu(cpu
) {
465 bpf_obj_free_fields(map
->record
, per_cpu_ptr(pptr
, cpu
));
470 for (i
= 0; i
< array
->map
.max_entries
; i
++)
471 bpf_obj_free_fields(map
->record
, array_map_elem_ptr(array
, i
));
475 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
476 bpf_array_free_percpu(array
);
478 if (array
->map
.map_flags
& BPF_F_MMAPABLE
)
479 bpf_map_area_free(array_map_vmalloc_addr(array
));
481 bpf_map_area_free(array
);
484 static void array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
491 value
= array_map_lookup_elem(map
, key
);
497 if (map
->btf_key_type_id
)
498 seq_printf(m
, "%u: ", *(u32
*)key
);
499 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
, value
, m
);
505 static void percpu_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
508 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
509 u32 index
= *(u32
*)key
;
515 seq_printf(m
, "%u: {\n", *(u32
*)key
);
516 pptr
= array
->pptrs
[index
& array
->index_mask
];
517 for_each_possible_cpu(cpu
) {
518 seq_printf(m
, "\tcpu%d: ", cpu
);
519 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
520 per_cpu_ptr(pptr
, cpu
), m
);
528 static int array_map_check_btf(const struct bpf_map
*map
,
529 const struct btf
*btf
,
530 const struct btf_type
*key_type
,
531 const struct btf_type
*value_type
)
535 /* One exception for keyless BTF: .bss/.data/.rodata map */
536 if (btf_type_is_void(key_type
)) {
537 if (map
->map_type
!= BPF_MAP_TYPE_ARRAY
||
538 map
->max_entries
!= 1)
541 if (BTF_INFO_KIND(value_type
->info
) != BTF_KIND_DATASEC
)
547 if (BTF_INFO_KIND(key_type
->info
) != BTF_KIND_INT
)
550 int_data
= *(u32
*)(key_type
+ 1);
551 /* bpf array can only take a u32 key. This check makes sure
552 * that the btf matches the attr used during map_create.
554 if (BTF_INT_BITS(int_data
) != 32 || BTF_INT_OFFSET(int_data
))
560 static int array_map_mmap(struct bpf_map
*map
, struct vm_area_struct
*vma
)
562 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
563 pgoff_t pgoff
= PAGE_ALIGN(sizeof(*array
)) >> PAGE_SHIFT
;
565 if (!(map
->map_flags
& BPF_F_MMAPABLE
))
568 if (vma
->vm_pgoff
* PAGE_SIZE
+ (vma
->vm_end
- vma
->vm_start
) >
569 PAGE_ALIGN((u64
)array
->map
.max_entries
* array
->elem_size
))
572 return remap_vmalloc_range(vma
, array_map_vmalloc_addr(array
),
573 vma
->vm_pgoff
+ pgoff
);
576 static bool array_map_meta_equal(const struct bpf_map
*meta0
,
577 const struct bpf_map
*meta1
)
579 if (!bpf_map_meta_equal(meta0
, meta1
))
581 return meta0
->map_flags
& BPF_F_INNER_MAP
? true :
582 meta0
->max_entries
== meta1
->max_entries
;
585 struct bpf_iter_seq_array_map_info
{
587 void *percpu_value_buf
;
591 static void *bpf_array_map_seq_start(struct seq_file
*seq
, loff_t
*pos
)
593 struct bpf_iter_seq_array_map_info
*info
= seq
->private;
594 struct bpf_map
*map
= info
->map
;
595 struct bpf_array
*array
;
598 if (info
->index
>= map
->max_entries
)
603 array
= container_of(map
, struct bpf_array
, map
);
604 index
= info
->index
& array
->index_mask
;
605 if (info
->percpu_value_buf
)
606 return (void *)(uintptr_t)array
->pptrs
[index
];
607 return array_map_elem_ptr(array
, index
);
610 static void *bpf_array_map_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
612 struct bpf_iter_seq_array_map_info
*info
= seq
->private;
613 struct bpf_map
*map
= info
->map
;
614 struct bpf_array
*array
;
619 if (info
->index
>= map
->max_entries
)
622 array
= container_of(map
, struct bpf_array
, map
);
623 index
= info
->index
& array
->index_mask
;
624 if (info
->percpu_value_buf
)
625 return (void *)(uintptr_t)array
->pptrs
[index
];
626 return array_map_elem_ptr(array
, index
);
629 static int __bpf_array_map_seq_show(struct seq_file
*seq
, void *v
)
631 struct bpf_iter_seq_array_map_info
*info
= seq
->private;
632 struct bpf_iter__bpf_map_elem ctx
= {};
633 struct bpf_map
*map
= info
->map
;
634 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
635 struct bpf_iter_meta meta
;
636 struct bpf_prog
*prog
;
637 int off
= 0, cpu
= 0;
642 prog
= bpf_iter_get_info(&meta
, v
== NULL
);
649 ctx
.key
= &info
->index
;
651 if (!info
->percpu_value_buf
) {
654 pptr
= (void __percpu
*)(uintptr_t)v
;
655 size
= array
->elem_size
;
656 for_each_possible_cpu(cpu
) {
657 copy_map_value_long(map
, info
->percpu_value_buf
+ off
,
658 per_cpu_ptr(pptr
, cpu
));
659 check_and_init_map_value(map
, info
->percpu_value_buf
+ off
);
662 ctx
.value
= info
->percpu_value_buf
;
666 return bpf_iter_run_prog(prog
, &ctx
);
669 static int bpf_array_map_seq_show(struct seq_file
*seq
, void *v
)
671 return __bpf_array_map_seq_show(seq
, v
);
674 static void bpf_array_map_seq_stop(struct seq_file
*seq
, void *v
)
677 (void)__bpf_array_map_seq_show(seq
, NULL
);
680 static int bpf_iter_init_array_map(void *priv_data
,
681 struct bpf_iter_aux_info
*aux
)
683 struct bpf_iter_seq_array_map_info
*seq_info
= priv_data
;
684 struct bpf_map
*map
= aux
->map
;
685 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
689 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
690 buf_size
= array
->elem_size
* num_possible_cpus();
691 value_buf
= kmalloc(buf_size
, GFP_USER
| __GFP_NOWARN
);
695 seq_info
->percpu_value_buf
= value_buf
;
698 /* bpf_iter_attach_map() acquires a map uref, and the uref may be
699 * released before or in the middle of iterating map elements, so
700 * acquire an extra map uref for iterator.
702 bpf_map_inc_with_uref(map
);
707 static void bpf_iter_fini_array_map(void *priv_data
)
709 struct bpf_iter_seq_array_map_info
*seq_info
= priv_data
;
711 bpf_map_put_with_uref(seq_info
->map
);
712 kfree(seq_info
->percpu_value_buf
);
715 static const struct seq_operations bpf_array_map_seq_ops
= {
716 .start
= bpf_array_map_seq_start
,
717 .next
= bpf_array_map_seq_next
,
718 .stop
= bpf_array_map_seq_stop
,
719 .show
= bpf_array_map_seq_show
,
722 static const struct bpf_iter_seq_info iter_seq_info
= {
723 .seq_ops
= &bpf_array_map_seq_ops
,
724 .init_seq_private
= bpf_iter_init_array_map
,
725 .fini_seq_private
= bpf_iter_fini_array_map
,
726 .seq_priv_size
= sizeof(struct bpf_iter_seq_array_map_info
),
729 static long bpf_for_each_array_elem(struct bpf_map
*map
, bpf_callback_t callback_fn
,
730 void *callback_ctx
, u64 flags
)
732 u32 i
, key
, num_elems
= 0;
733 struct bpf_array
*array
;
741 is_percpu
= map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
742 array
= container_of(map
, struct bpf_array
, map
);
745 for (i
= 0; i
< map
->max_entries
; i
++) {
747 val
= this_cpu_ptr(array
->pptrs
[i
]);
749 val
= array_map_elem_ptr(array
, i
);
752 ret
= callback_fn((u64
)(long)map
, (u64
)(long)&key
,
753 (u64
)(long)val
, (u64
)(long)callback_ctx
, 0);
754 /* return value: 0 - continue, 1 - stop and return */
764 static u64
array_map_mem_usage(const struct bpf_map
*map
)
766 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
767 bool percpu
= map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
768 u32 elem_size
= array
->elem_size
;
769 u64 entries
= map
->max_entries
;
770 u64 usage
= sizeof(*array
);
773 usage
+= entries
* sizeof(void *);
774 usage
+= entries
* elem_size
* num_possible_cpus();
776 if (map
->map_flags
& BPF_F_MMAPABLE
) {
777 usage
= PAGE_ALIGN(usage
);
778 usage
+= PAGE_ALIGN(entries
* elem_size
);
780 usage
+= entries
* elem_size
;
786 BTF_ID_LIST_SINGLE(array_map_btf_ids
, struct, bpf_array
)
787 const struct bpf_map_ops array_map_ops
= {
788 .map_meta_equal
= array_map_meta_equal
,
789 .map_alloc_check
= array_map_alloc_check
,
790 .map_alloc
= array_map_alloc
,
791 .map_free
= array_map_free
,
792 .map_get_next_key
= array_map_get_next_key
,
793 .map_release_uref
= array_map_free_timers_wq
,
794 .map_lookup_elem
= array_map_lookup_elem
,
795 .map_update_elem
= array_map_update_elem
,
796 .map_delete_elem
= array_map_delete_elem
,
797 .map_gen_lookup
= array_map_gen_lookup
,
798 .map_direct_value_addr
= array_map_direct_value_addr
,
799 .map_direct_value_meta
= array_map_direct_value_meta
,
800 .map_mmap
= array_map_mmap
,
801 .map_seq_show_elem
= array_map_seq_show_elem
,
802 .map_check_btf
= array_map_check_btf
,
803 .map_lookup_batch
= generic_map_lookup_batch
,
804 .map_update_batch
= generic_map_update_batch
,
805 .map_set_for_each_callback_args
= map_set_for_each_callback_args
,
806 .map_for_each_callback
= bpf_for_each_array_elem
,
807 .map_mem_usage
= array_map_mem_usage
,
808 .map_btf_id
= &array_map_btf_ids
[0],
809 .iter_seq_info
= &iter_seq_info
,
812 const struct bpf_map_ops percpu_array_map_ops
= {
813 .map_meta_equal
= bpf_map_meta_equal
,
814 .map_alloc_check
= array_map_alloc_check
,
815 .map_alloc
= array_map_alloc
,
816 .map_free
= array_map_free
,
817 .map_get_next_key
= array_map_get_next_key
,
818 .map_lookup_elem
= percpu_array_map_lookup_elem
,
819 .map_gen_lookup
= percpu_array_map_gen_lookup
,
820 .map_update_elem
= array_map_update_elem
,
821 .map_delete_elem
= array_map_delete_elem
,
822 .map_lookup_percpu_elem
= percpu_array_map_lookup_percpu_elem
,
823 .map_seq_show_elem
= percpu_array_map_seq_show_elem
,
824 .map_check_btf
= array_map_check_btf
,
825 .map_lookup_batch
= generic_map_lookup_batch
,
826 .map_update_batch
= generic_map_update_batch
,
827 .map_set_for_each_callback_args
= map_set_for_each_callback_args
,
828 .map_for_each_callback
= bpf_for_each_array_elem
,
829 .map_mem_usage
= array_map_mem_usage
,
830 .map_btf_id
= &array_map_btf_ids
[0],
831 .iter_seq_info
= &iter_seq_info
,
834 static int fd_array_map_alloc_check(union bpf_attr
*attr
)
836 /* only file descriptors can be stored in this type of map */
837 if (attr
->value_size
!= sizeof(u32
))
839 /* Program read-only/write-only not supported for special maps yet. */
840 if (attr
->map_flags
& (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
))
842 return array_map_alloc_check(attr
);
845 static void fd_array_map_free(struct bpf_map
*map
)
847 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
850 /* make sure it's empty */
851 for (i
= 0; i
< array
->map
.max_entries
; i
++)
852 BUG_ON(array
->ptrs
[i
] != NULL
);
854 bpf_map_area_free(array
);
857 static void *fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
859 return ERR_PTR(-EOPNOTSUPP
);
862 /* only called from syscall */
863 int bpf_fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
)
868 if (!map
->ops
->map_fd_sys_lookup_elem
)
872 elem
= array_map_lookup_elem(map
, key
);
873 if (elem
&& (ptr
= READ_ONCE(*elem
)))
874 *value
= map
->ops
->map_fd_sys_lookup_elem(ptr
);
882 /* only called from syscall */
883 int bpf_fd_array_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
884 void *key
, void *value
, u64 map_flags
)
886 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
887 void *new_ptr
, *old_ptr
;
888 u32 index
= *(u32
*)key
, ufd
;
890 if (map_flags
!= BPF_ANY
)
893 if (index
>= array
->map
.max_entries
)
897 new_ptr
= map
->ops
->map_fd_get_ptr(map
, map_file
, ufd
);
899 return PTR_ERR(new_ptr
);
901 if (map
->ops
->map_poke_run
) {
902 mutex_lock(&array
->aux
->poke_mutex
);
903 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
904 map
->ops
->map_poke_run(map
, index
, old_ptr
, new_ptr
);
905 mutex_unlock(&array
->aux
->poke_mutex
);
907 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
911 map
->ops
->map_fd_put_ptr(map
, old_ptr
, true);
915 static long __fd_array_map_delete_elem(struct bpf_map
*map
, void *key
, bool need_defer
)
917 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
919 u32 index
= *(u32
*)key
;
921 if (index
>= array
->map
.max_entries
)
924 if (map
->ops
->map_poke_run
) {
925 mutex_lock(&array
->aux
->poke_mutex
);
926 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
927 map
->ops
->map_poke_run(map
, index
, old_ptr
, NULL
);
928 mutex_unlock(&array
->aux
->poke_mutex
);
930 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
934 map
->ops
->map_fd_put_ptr(map
, old_ptr
, need_defer
);
941 static long fd_array_map_delete_elem(struct bpf_map
*map
, void *key
)
943 return __fd_array_map_delete_elem(map
, key
, true);
946 static void *prog_fd_array_get_ptr(struct bpf_map
*map
,
947 struct file
*map_file
, int fd
)
949 struct bpf_prog
*prog
= bpf_prog_get(fd
);
955 if (prog
->type
== BPF_PROG_TYPE_EXT
||
956 !bpf_prog_map_compatible(map
, prog
)) {
958 return ERR_PTR(-EINVAL
);
961 mutex_lock(&prog
->aux
->ext_mutex
);
962 is_extended
= prog
->aux
->is_extended
;
964 prog
->aux
->prog_array_member_cnt
++;
965 mutex_unlock(&prog
->aux
->ext_mutex
);
967 /* Extended prog can not be tail callee. It's to prevent a
968 * potential infinite loop like:
969 * tail callee prog entry -> tail callee prog subprog ->
970 * freplace prog entry --tailcall-> tail callee prog entry.
973 return ERR_PTR(-EBUSY
);
979 static void prog_fd_array_put_ptr(struct bpf_map
*map
, void *ptr
, bool need_defer
)
981 struct bpf_prog
*prog
= ptr
;
983 mutex_lock(&prog
->aux
->ext_mutex
);
984 prog
->aux
->prog_array_member_cnt
--;
985 mutex_unlock(&prog
->aux
->ext_mutex
);
986 /* bpf_prog is freed after one RCU or tasks trace grace period */
990 static u32
prog_fd_array_sys_lookup_elem(void *ptr
)
992 return ((struct bpf_prog
*)ptr
)->aux
->id
;
995 /* decrement refcnt of all bpf_progs that are stored in this map */
996 static void bpf_fd_array_map_clear(struct bpf_map
*map
, bool need_defer
)
998 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
1001 for (i
= 0; i
< array
->map
.max_entries
; i
++)
1002 __fd_array_map_delete_elem(map
, &i
, need_defer
);
1005 static void prog_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
1013 elem
= array_map_lookup_elem(map
, key
);
1015 ptr
= READ_ONCE(*elem
);
1017 seq_printf(m
, "%u: ", *(u32
*)key
);
1018 prog_id
= prog_fd_array_sys_lookup_elem(ptr
);
1019 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
1028 struct prog_poke_elem
{
1029 struct list_head list
;
1030 struct bpf_prog_aux
*aux
;
1033 static int prog_array_map_poke_track(struct bpf_map
*map
,
1034 struct bpf_prog_aux
*prog_aux
)
1036 struct prog_poke_elem
*elem
;
1037 struct bpf_array_aux
*aux
;
1040 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
1041 mutex_lock(&aux
->poke_mutex
);
1042 list_for_each_entry(elem
, &aux
->poke_progs
, list
) {
1043 if (elem
->aux
== prog_aux
)
1047 elem
= kmalloc(sizeof(*elem
), GFP_KERNEL
);
1053 INIT_LIST_HEAD(&elem
->list
);
1054 /* We must track the program's aux info at this point in time
1055 * since the program pointer itself may not be stable yet, see
1056 * also comment in prog_array_map_poke_run().
1058 elem
->aux
= prog_aux
;
1060 list_add_tail(&elem
->list
, &aux
->poke_progs
);
1062 mutex_unlock(&aux
->poke_mutex
);
1066 static void prog_array_map_poke_untrack(struct bpf_map
*map
,
1067 struct bpf_prog_aux
*prog_aux
)
1069 struct prog_poke_elem
*elem
, *tmp
;
1070 struct bpf_array_aux
*aux
;
1072 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
1073 mutex_lock(&aux
->poke_mutex
);
1074 list_for_each_entry_safe(elem
, tmp
, &aux
->poke_progs
, list
) {
1075 if (elem
->aux
== prog_aux
) {
1076 list_del_init(&elem
->list
);
1081 mutex_unlock(&aux
->poke_mutex
);
1084 void __weak
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor
*poke
,
1085 struct bpf_prog
*new, struct bpf_prog
*old
)
1090 static void prog_array_map_poke_run(struct bpf_map
*map
, u32 key
,
1091 struct bpf_prog
*old
,
1092 struct bpf_prog
*new)
1094 struct prog_poke_elem
*elem
;
1095 struct bpf_array_aux
*aux
;
1097 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
1098 WARN_ON_ONCE(!mutex_is_locked(&aux
->poke_mutex
));
1100 list_for_each_entry(elem
, &aux
->poke_progs
, list
) {
1101 struct bpf_jit_poke_descriptor
*poke
;
1104 for (i
= 0; i
< elem
->aux
->size_poke_tab
; i
++) {
1105 poke
= &elem
->aux
->poke_tab
[i
];
1107 /* Few things to be aware of:
1109 * 1) We can only ever access aux in this context, but
1110 * not aux->prog since it might not be stable yet and
1111 * there could be danger of use after free otherwise.
1112 * 2) Initially when we start tracking aux, the program
1113 * is not JITed yet and also does not have a kallsyms
1114 * entry. We skip these as poke->tailcall_target_stable
1115 * is not active yet. The JIT will do the final fixup
1116 * before setting it stable. The various
1117 * poke->tailcall_target_stable are successively
1118 * activated, so tail call updates can arrive from here
1119 * while JIT is still finishing its final fixup for
1120 * non-activated poke entries.
1121 * 3) Also programs reaching refcount of zero while patching
1122 * is in progress is okay since we're protected under
1123 * poke_mutex and untrack the programs before the JIT
1126 if (!READ_ONCE(poke
->tailcall_target_stable
))
1128 if (poke
->reason
!= BPF_POKE_REASON_TAIL_CALL
)
1130 if (poke
->tail_call
.map
!= map
||
1131 poke
->tail_call
.key
!= key
)
1134 bpf_arch_poke_desc_update(poke
, new, old
);
1139 static void prog_array_map_clear_deferred(struct work_struct
*work
)
1141 struct bpf_map
*map
= container_of(work
, struct bpf_array_aux
,
1143 bpf_fd_array_map_clear(map
, true);
1147 static void prog_array_map_clear(struct bpf_map
*map
)
1149 struct bpf_array_aux
*aux
= container_of(map
, struct bpf_array
,
1152 schedule_work(&aux
->work
);
1155 static struct bpf_map
*prog_array_map_alloc(union bpf_attr
*attr
)
1157 struct bpf_array_aux
*aux
;
1158 struct bpf_map
*map
;
1160 aux
= kzalloc(sizeof(*aux
), GFP_KERNEL_ACCOUNT
);
1162 return ERR_PTR(-ENOMEM
);
1164 INIT_WORK(&aux
->work
, prog_array_map_clear_deferred
);
1165 INIT_LIST_HEAD(&aux
->poke_progs
);
1166 mutex_init(&aux
->poke_mutex
);
1168 map
= array_map_alloc(attr
);
1174 container_of(map
, struct bpf_array
, map
)->aux
= aux
;
1180 static void prog_array_map_free(struct bpf_map
*map
)
1182 struct prog_poke_elem
*elem
, *tmp
;
1183 struct bpf_array_aux
*aux
;
1185 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
1186 list_for_each_entry_safe(elem
, tmp
, &aux
->poke_progs
, list
) {
1187 list_del_init(&elem
->list
);
1191 fd_array_map_free(map
);
1194 /* prog_array->aux->{type,jited} is a runtime binding.
1195 * Doing static check alone in the verifier is not enough.
1196 * Thus, prog_array_map cannot be used as an inner_map
1197 * and map_meta_equal is not implemented.
1199 const struct bpf_map_ops prog_array_map_ops
= {
1200 .map_alloc_check
= fd_array_map_alloc_check
,
1201 .map_alloc
= prog_array_map_alloc
,
1202 .map_free
= prog_array_map_free
,
1203 .map_poke_track
= prog_array_map_poke_track
,
1204 .map_poke_untrack
= prog_array_map_poke_untrack
,
1205 .map_poke_run
= prog_array_map_poke_run
,
1206 .map_get_next_key
= array_map_get_next_key
,
1207 .map_lookup_elem
= fd_array_map_lookup_elem
,
1208 .map_delete_elem
= fd_array_map_delete_elem
,
1209 .map_fd_get_ptr
= prog_fd_array_get_ptr
,
1210 .map_fd_put_ptr
= prog_fd_array_put_ptr
,
1211 .map_fd_sys_lookup_elem
= prog_fd_array_sys_lookup_elem
,
1212 .map_release_uref
= prog_array_map_clear
,
1213 .map_seq_show_elem
= prog_array_map_seq_show_elem
,
1214 .map_mem_usage
= array_map_mem_usage
,
1215 .map_btf_id
= &array_map_btf_ids
[0],
1218 static struct bpf_event_entry
*bpf_event_entry_gen(struct file
*perf_file
,
1219 struct file
*map_file
)
1221 struct bpf_event_entry
*ee
;
1223 ee
= kzalloc(sizeof(*ee
), GFP_KERNEL
);
1225 ee
->event
= perf_file
->private_data
;
1226 ee
->perf_file
= perf_file
;
1227 ee
->map_file
= map_file
;
1233 static void __bpf_event_entry_free(struct rcu_head
*rcu
)
1235 struct bpf_event_entry
*ee
;
1237 ee
= container_of(rcu
, struct bpf_event_entry
, rcu
);
1238 fput(ee
->perf_file
);
1242 static void bpf_event_entry_free_rcu(struct bpf_event_entry
*ee
)
1244 call_rcu(&ee
->rcu
, __bpf_event_entry_free
);
1247 static void *perf_event_fd_array_get_ptr(struct bpf_map
*map
,
1248 struct file
*map_file
, int fd
)
1250 struct bpf_event_entry
*ee
;
1251 struct perf_event
*event
;
1252 struct file
*perf_file
;
1255 perf_file
= perf_event_get(fd
);
1256 if (IS_ERR(perf_file
))
1259 ee
= ERR_PTR(-EOPNOTSUPP
);
1260 event
= perf_file
->private_data
;
1261 if (perf_event_read_local(event
, &value
, NULL
, NULL
) == -EOPNOTSUPP
)
1264 ee
= bpf_event_entry_gen(perf_file
, map_file
);
1267 ee
= ERR_PTR(-ENOMEM
);
1273 static void perf_event_fd_array_put_ptr(struct bpf_map
*map
, void *ptr
, bool need_defer
)
1275 /* bpf_perf_event is freed after one RCU grace period */
1276 bpf_event_entry_free_rcu(ptr
);
1279 static void perf_event_fd_array_release(struct bpf_map
*map
,
1280 struct file
*map_file
)
1282 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
1283 struct bpf_event_entry
*ee
;
1286 if (map
->map_flags
& BPF_F_PRESERVE_ELEMS
)
1290 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
1291 ee
= READ_ONCE(array
->ptrs
[i
]);
1292 if (ee
&& ee
->map_file
== map_file
)
1293 __fd_array_map_delete_elem(map
, &i
, true);
1298 static void perf_event_fd_array_map_free(struct bpf_map
*map
)
1300 if (map
->map_flags
& BPF_F_PRESERVE_ELEMS
)
1301 bpf_fd_array_map_clear(map
, false);
1302 fd_array_map_free(map
);
1305 const struct bpf_map_ops perf_event_array_map_ops
= {
1306 .map_meta_equal
= bpf_map_meta_equal
,
1307 .map_alloc_check
= fd_array_map_alloc_check
,
1308 .map_alloc
= array_map_alloc
,
1309 .map_free
= perf_event_fd_array_map_free
,
1310 .map_get_next_key
= array_map_get_next_key
,
1311 .map_lookup_elem
= fd_array_map_lookup_elem
,
1312 .map_delete_elem
= fd_array_map_delete_elem
,
1313 .map_fd_get_ptr
= perf_event_fd_array_get_ptr
,
1314 .map_fd_put_ptr
= perf_event_fd_array_put_ptr
,
1315 .map_release
= perf_event_fd_array_release
,
1316 .map_check_btf
= map_check_no_btf
,
1317 .map_mem_usage
= array_map_mem_usage
,
1318 .map_btf_id
= &array_map_btf_ids
[0],
1321 #ifdef CONFIG_CGROUPS
1322 static void *cgroup_fd_array_get_ptr(struct bpf_map
*map
,
1323 struct file
*map_file
/* not used */,
1326 return cgroup_get_from_fd(fd
);
1329 static void cgroup_fd_array_put_ptr(struct bpf_map
*map
, void *ptr
, bool need_defer
)
1331 /* cgroup_put free cgrp after a rcu grace period */
1335 static void cgroup_fd_array_free(struct bpf_map
*map
)
1337 bpf_fd_array_map_clear(map
, false);
1338 fd_array_map_free(map
);
1341 const struct bpf_map_ops cgroup_array_map_ops
= {
1342 .map_meta_equal
= bpf_map_meta_equal
,
1343 .map_alloc_check
= fd_array_map_alloc_check
,
1344 .map_alloc
= array_map_alloc
,
1345 .map_free
= cgroup_fd_array_free
,
1346 .map_get_next_key
= array_map_get_next_key
,
1347 .map_lookup_elem
= fd_array_map_lookup_elem
,
1348 .map_delete_elem
= fd_array_map_delete_elem
,
1349 .map_fd_get_ptr
= cgroup_fd_array_get_ptr
,
1350 .map_fd_put_ptr
= cgroup_fd_array_put_ptr
,
1351 .map_check_btf
= map_check_no_btf
,
1352 .map_mem_usage
= array_map_mem_usage
,
1353 .map_btf_id
= &array_map_btf_ids
[0],
1357 static struct bpf_map
*array_of_map_alloc(union bpf_attr
*attr
)
1359 struct bpf_map
*map
, *inner_map_meta
;
1361 inner_map_meta
= bpf_map_meta_alloc(attr
->inner_map_fd
);
1362 if (IS_ERR(inner_map_meta
))
1363 return inner_map_meta
;
1365 map
= array_map_alloc(attr
);
1367 bpf_map_meta_free(inner_map_meta
);
1371 map
->inner_map_meta
= inner_map_meta
;
1376 static void array_of_map_free(struct bpf_map
*map
)
1378 /* map->inner_map_meta is only accessed by syscall which
1379 * is protected by fdget/fdput.
1381 bpf_map_meta_free(map
->inner_map_meta
);
1382 bpf_fd_array_map_clear(map
, false);
1383 fd_array_map_free(map
);
1386 static void *array_of_map_lookup_elem(struct bpf_map
*map
, void *key
)
1388 struct bpf_map
**inner_map
= array_map_lookup_elem(map
, key
);
1393 return READ_ONCE(*inner_map
);
1396 static int array_of_map_gen_lookup(struct bpf_map
*map
,
1397 struct bpf_insn
*insn_buf
)
1399 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
1400 u32 elem_size
= array
->elem_size
;
1401 struct bpf_insn
*insn
= insn_buf
;
1402 const int ret
= BPF_REG_0
;
1403 const int map_ptr
= BPF_REG_1
;
1404 const int index
= BPF_REG_2
;
1406 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
1407 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
1408 if (!map
->bypass_spec_v1
) {
1409 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 6);
1410 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
1412 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 5);
1414 if (is_power_of_2(elem_size
))
1415 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
1417 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
1418 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
1419 *insn
++ = BPF_LDX_MEM(BPF_DW
, ret
, ret
, 0);
1420 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, ret
, 0, 1);
1421 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
1422 *insn
++ = BPF_MOV64_IMM(ret
, 0);
1424 return insn
- insn_buf
;
1427 const struct bpf_map_ops array_of_maps_map_ops
= {
1428 .map_alloc_check
= fd_array_map_alloc_check
,
1429 .map_alloc
= array_of_map_alloc
,
1430 .map_free
= array_of_map_free
,
1431 .map_get_next_key
= array_map_get_next_key
,
1432 .map_lookup_elem
= array_of_map_lookup_elem
,
1433 .map_delete_elem
= fd_array_map_delete_elem
,
1434 .map_fd_get_ptr
= bpf_map_fd_get_ptr
,
1435 .map_fd_put_ptr
= bpf_map_fd_put_ptr
,
1436 .map_fd_sys_lookup_elem
= bpf_map_fd_sys_lookup_elem
,
1437 .map_gen_lookup
= array_of_map_gen_lookup
,
1438 .map_lookup_batch
= generic_map_lookup_batch
,
1439 .map_update_batch
= generic_map_update_batch
,
1440 .map_check_btf
= map_check_no_btf
,
1441 .map_mem_usage
= array_map_mem_usage
,
1442 .map_btf_id
= &array_map_btf_ids
[0],