1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
8 #include <linux/slab.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
15 #include "map_in_map.h"
17 #define ARRAY_CREATE_FLAG_MASK \
18 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
19 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
21 static void bpf_array_free_percpu(struct bpf_array
*array
)
25 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
26 free_percpu(array
->pptrs
[i
]);
31 static int bpf_array_alloc_percpu(struct bpf_array
*array
)
36 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
37 ptr
= bpf_map_alloc_percpu(&array
->map
, array
->elem_size
, 8,
38 GFP_USER
| __GFP_NOWARN
);
40 bpf_array_free_percpu(array
);
43 array
->pptrs
[i
] = ptr
;
50 /* Called from syscall */
51 int array_map_alloc_check(union bpf_attr
*attr
)
53 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
54 int numa_node
= bpf_map_attr_numa_node(attr
);
56 /* check sanity of attributes */
57 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
58 attr
->value_size
== 0 ||
59 attr
->map_flags
& ~ARRAY_CREATE_FLAG_MASK
||
60 !bpf_map_flags_access_ok(attr
->map_flags
) ||
61 (percpu
&& numa_node
!= NUMA_NO_NODE
))
64 if (attr
->map_type
!= BPF_MAP_TYPE_ARRAY
&&
65 attr
->map_flags
& (BPF_F_MMAPABLE
| BPF_F_INNER_MAP
))
68 if (attr
->map_type
!= BPF_MAP_TYPE_PERF_EVENT_ARRAY
&&
69 attr
->map_flags
& BPF_F_PRESERVE_ELEMS
)
72 if (attr
->value_size
> KMALLOC_MAX_SIZE
)
73 /* if value_size is bigger, the user space won't be able to
74 * access the elements.
81 static struct bpf_map
*array_map_alloc(union bpf_attr
*attr
)
83 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
84 int numa_node
= bpf_map_attr_numa_node(attr
);
85 u32 elem_size
, index_mask
, max_entries
;
86 bool bypass_spec_v1
= bpf_bypass_spec_v1();
87 u64 array_size
, mask64
;
88 struct bpf_array
*array
;
90 elem_size
= round_up(attr
->value_size
, 8);
92 max_entries
= attr
->max_entries
;
94 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
95 * upper most bit set in u32 space is undefined behavior due to
96 * resulting 1U << 32, so do it manually here in u64 space.
98 mask64
= fls_long(max_entries
- 1);
99 mask64
= 1ULL << mask64
;
103 if (!bypass_spec_v1
) {
104 /* round up array size to nearest power of 2,
105 * since cpu will speculate within index_mask limits
107 max_entries
= index_mask
+ 1;
108 /* Check for overflows. */
109 if (max_entries
< attr
->max_entries
)
110 return ERR_PTR(-E2BIG
);
113 array_size
= sizeof(*array
);
115 array_size
+= (u64
) max_entries
* sizeof(void *);
117 /* rely on vmalloc() to return page-aligned memory and
118 * ensure array->value is exactly page-aligned
120 if (attr
->map_flags
& BPF_F_MMAPABLE
) {
121 array_size
= PAGE_ALIGN(array_size
);
122 array_size
+= PAGE_ALIGN((u64
) max_entries
* elem_size
);
124 array_size
+= (u64
) max_entries
* elem_size
;
128 /* allocate all map elements and zero-initialize them */
129 if (attr
->map_flags
& BPF_F_MMAPABLE
) {
132 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
133 data
= bpf_map_area_mmapable_alloc(array_size
, numa_node
);
135 return ERR_PTR(-ENOMEM
);
136 array
= data
+ PAGE_ALIGN(sizeof(struct bpf_array
))
137 - offsetof(struct bpf_array
, value
);
139 array
= bpf_map_area_alloc(array_size
, numa_node
);
142 return ERR_PTR(-ENOMEM
);
143 array
->index_mask
= index_mask
;
144 array
->map
.bypass_spec_v1
= bypass_spec_v1
;
146 /* copy mandatory map attributes */
147 bpf_map_init_from_attr(&array
->map
, attr
);
148 array
->elem_size
= elem_size
;
150 if (percpu
&& bpf_array_alloc_percpu(array
)) {
151 bpf_map_area_free(array
);
152 return ERR_PTR(-ENOMEM
);
158 /* Called from syscall or from eBPF program */
159 static void *array_map_lookup_elem(struct bpf_map
*map
, void *key
)
161 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
162 u32 index
= *(u32
*)key
;
164 if (unlikely(index
>= array
->map
.max_entries
))
167 return array
->value
+ array
->elem_size
* (index
& array
->index_mask
);
170 static int array_map_direct_value_addr(const struct bpf_map
*map
, u64
*imm
,
173 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
175 if (map
->max_entries
!= 1)
177 if (off
>= map
->value_size
)
180 *imm
= (unsigned long)array
->value
;
184 static int array_map_direct_value_meta(const struct bpf_map
*map
, u64 imm
,
187 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
188 u64 base
= (unsigned long)array
->value
;
189 u64 range
= array
->elem_size
;
191 if (map
->max_entries
!= 1)
193 if (imm
< base
|| imm
>= base
+ range
)
200 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
201 static int array_map_gen_lookup(struct bpf_map
*map
, struct bpf_insn
*insn_buf
)
203 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
204 struct bpf_insn
*insn
= insn_buf
;
205 u32 elem_size
= round_up(map
->value_size
, 8);
206 const int ret
= BPF_REG_0
;
207 const int map_ptr
= BPF_REG_1
;
208 const int index
= BPF_REG_2
;
210 if (map
->map_flags
& BPF_F_INNER_MAP
)
213 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
214 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
215 if (!map
->bypass_spec_v1
) {
216 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 4);
217 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
219 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 3);
222 if (is_power_of_2(elem_size
)) {
223 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
225 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
227 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
228 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
229 *insn
++ = BPF_MOV64_IMM(ret
, 0);
230 return insn
- insn_buf
;
233 /* Called from eBPF program */
234 static void *percpu_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
236 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
237 u32 index
= *(u32
*)key
;
239 if (unlikely(index
>= array
->map
.max_entries
))
242 return this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]);
245 int bpf_percpu_array_copy(struct bpf_map
*map
, void *key
, void *value
)
247 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
248 u32 index
= *(u32
*)key
;
253 if (unlikely(index
>= array
->map
.max_entries
))
256 /* per_cpu areas are zero-filled and bpf programs can only
257 * access 'value_size' of them, so copying rounded areas
258 * will not leak any kernel data
260 size
= round_up(map
->value_size
, 8);
262 pptr
= array
->pptrs
[index
& array
->index_mask
];
263 for_each_possible_cpu(cpu
) {
264 bpf_long_memcpy(value
+ off
, per_cpu_ptr(pptr
, cpu
), size
);
271 /* Called from syscall */
272 static int array_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
274 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
275 u32 index
= key
? *(u32
*)key
: U32_MAX
;
276 u32
*next
= (u32
*)next_key
;
278 if (index
>= array
->map
.max_entries
) {
283 if (index
== array
->map
.max_entries
- 1)
290 /* Called from syscall or from eBPF program */
291 static int array_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
294 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
295 u32 index
= *(u32
*)key
;
298 if (unlikely((map_flags
& ~BPF_F_LOCK
) > BPF_EXIST
))
302 if (unlikely(index
>= array
->map
.max_entries
))
303 /* all elements were pre-allocated, cannot insert a new one */
306 if (unlikely(map_flags
& BPF_NOEXIST
))
307 /* all elements already exist */
310 if (unlikely((map_flags
& BPF_F_LOCK
) &&
311 !map_value_has_spin_lock(map
)))
314 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
315 memcpy(this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]),
316 value
, map
->value_size
);
319 array
->elem_size
* (index
& array
->index_mask
);
320 if (map_flags
& BPF_F_LOCK
)
321 copy_map_value_locked(map
, val
, value
, false);
323 copy_map_value(map
, val
, value
);
328 int bpf_percpu_array_update(struct bpf_map
*map
, void *key
, void *value
,
331 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
332 u32 index
= *(u32
*)key
;
337 if (unlikely(map_flags
> BPF_EXIST
))
341 if (unlikely(index
>= array
->map
.max_entries
))
342 /* all elements were pre-allocated, cannot insert a new one */
345 if (unlikely(map_flags
== BPF_NOEXIST
))
346 /* all elements already exist */
349 /* the user space will provide round_up(value_size, 8) bytes that
350 * will be copied into per-cpu area. bpf programs can only access
351 * value_size of it. During lookup the same extra bytes will be
352 * returned or zeros which were zero-filled by percpu_alloc,
353 * so no kernel data leaks possible
355 size
= round_up(map
->value_size
, 8);
357 pptr
= array
->pptrs
[index
& array
->index_mask
];
358 for_each_possible_cpu(cpu
) {
359 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
), value
+ off
, size
);
366 /* Called from syscall or from eBPF program */
367 static int array_map_delete_elem(struct bpf_map
*map
, void *key
)
372 static void *array_map_vmalloc_addr(struct bpf_array
*array
)
374 return (void *)round_down((unsigned long)array
, PAGE_SIZE
);
377 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
378 static void array_map_free(struct bpf_map
*map
)
380 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
382 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
383 bpf_array_free_percpu(array
);
385 if (array
->map
.map_flags
& BPF_F_MMAPABLE
)
386 bpf_map_area_free(array_map_vmalloc_addr(array
));
388 bpf_map_area_free(array
);
391 static void array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
398 value
= array_map_lookup_elem(map
, key
);
404 if (map
->btf_key_type_id
)
405 seq_printf(m
, "%u: ", *(u32
*)key
);
406 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
, value
, m
);
412 static void percpu_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
415 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
416 u32 index
= *(u32
*)key
;
422 seq_printf(m
, "%u: {\n", *(u32
*)key
);
423 pptr
= array
->pptrs
[index
& array
->index_mask
];
424 for_each_possible_cpu(cpu
) {
425 seq_printf(m
, "\tcpu%d: ", cpu
);
426 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
427 per_cpu_ptr(pptr
, cpu
), m
);
435 static int array_map_check_btf(const struct bpf_map
*map
,
436 const struct btf
*btf
,
437 const struct btf_type
*key_type
,
438 const struct btf_type
*value_type
)
442 /* One exception for keyless BTF: .bss/.data/.rodata map */
443 if (btf_type_is_void(key_type
)) {
444 if (map
->map_type
!= BPF_MAP_TYPE_ARRAY
||
445 map
->max_entries
!= 1)
448 if (BTF_INFO_KIND(value_type
->info
) != BTF_KIND_DATASEC
)
454 if (BTF_INFO_KIND(key_type
->info
) != BTF_KIND_INT
)
457 int_data
= *(u32
*)(key_type
+ 1);
458 /* bpf array can only take a u32 key. This check makes sure
459 * that the btf matches the attr used during map_create.
461 if (BTF_INT_BITS(int_data
) != 32 || BTF_INT_OFFSET(int_data
))
467 static int array_map_mmap(struct bpf_map
*map
, struct vm_area_struct
*vma
)
469 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
470 pgoff_t pgoff
= PAGE_ALIGN(sizeof(*array
)) >> PAGE_SHIFT
;
472 if (!(map
->map_flags
& BPF_F_MMAPABLE
))
475 if (vma
->vm_pgoff
* PAGE_SIZE
+ (vma
->vm_end
- vma
->vm_start
) >
476 PAGE_ALIGN((u64
)array
->map
.max_entries
* array
->elem_size
))
479 return remap_vmalloc_range(vma
, array_map_vmalloc_addr(array
),
480 vma
->vm_pgoff
+ pgoff
);
483 static bool array_map_meta_equal(const struct bpf_map
*meta0
,
484 const struct bpf_map
*meta1
)
486 if (!bpf_map_meta_equal(meta0
, meta1
))
488 return meta0
->map_flags
& BPF_F_INNER_MAP
? true :
489 meta0
->max_entries
== meta1
->max_entries
;
492 struct bpf_iter_seq_array_map_info
{
494 void *percpu_value_buf
;
498 static void *bpf_array_map_seq_start(struct seq_file
*seq
, loff_t
*pos
)
500 struct bpf_iter_seq_array_map_info
*info
= seq
->private;
501 struct bpf_map
*map
= info
->map
;
502 struct bpf_array
*array
;
505 if (info
->index
>= map
->max_entries
)
510 array
= container_of(map
, struct bpf_array
, map
);
511 index
= info
->index
& array
->index_mask
;
512 if (info
->percpu_value_buf
)
513 return array
->pptrs
[index
];
514 return array
->value
+ array
->elem_size
* index
;
517 static void *bpf_array_map_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
519 struct bpf_iter_seq_array_map_info
*info
= seq
->private;
520 struct bpf_map
*map
= info
->map
;
521 struct bpf_array
*array
;
526 if (info
->index
>= map
->max_entries
)
529 array
= container_of(map
, struct bpf_array
, map
);
530 index
= info
->index
& array
->index_mask
;
531 if (info
->percpu_value_buf
)
532 return array
->pptrs
[index
];
533 return array
->value
+ array
->elem_size
* index
;
536 static int __bpf_array_map_seq_show(struct seq_file
*seq
, void *v
)
538 struct bpf_iter_seq_array_map_info
*info
= seq
->private;
539 struct bpf_iter__bpf_map_elem ctx
= {};
540 struct bpf_map
*map
= info
->map
;
541 struct bpf_iter_meta meta
;
542 struct bpf_prog
*prog
;
543 int off
= 0, cpu
= 0;
544 void __percpu
**pptr
;
548 prog
= bpf_iter_get_info(&meta
, v
== NULL
);
555 ctx
.key
= &info
->index
;
557 if (!info
->percpu_value_buf
) {
561 size
= round_up(map
->value_size
, 8);
562 for_each_possible_cpu(cpu
) {
563 bpf_long_memcpy(info
->percpu_value_buf
+ off
,
564 per_cpu_ptr(pptr
, cpu
),
568 ctx
.value
= info
->percpu_value_buf
;
572 return bpf_iter_run_prog(prog
, &ctx
);
575 static int bpf_array_map_seq_show(struct seq_file
*seq
, void *v
)
577 return __bpf_array_map_seq_show(seq
, v
);
580 static void bpf_array_map_seq_stop(struct seq_file
*seq
, void *v
)
583 (void)__bpf_array_map_seq_show(seq
, NULL
);
586 static int bpf_iter_init_array_map(void *priv_data
,
587 struct bpf_iter_aux_info
*aux
)
589 struct bpf_iter_seq_array_map_info
*seq_info
= priv_data
;
590 struct bpf_map
*map
= aux
->map
;
594 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
595 buf_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
596 value_buf
= kmalloc(buf_size
, GFP_USER
| __GFP_NOWARN
);
600 seq_info
->percpu_value_buf
= value_buf
;
607 static void bpf_iter_fini_array_map(void *priv_data
)
609 struct bpf_iter_seq_array_map_info
*seq_info
= priv_data
;
611 kfree(seq_info
->percpu_value_buf
);
614 static const struct seq_operations bpf_array_map_seq_ops
= {
615 .start
= bpf_array_map_seq_start
,
616 .next
= bpf_array_map_seq_next
,
617 .stop
= bpf_array_map_seq_stop
,
618 .show
= bpf_array_map_seq_show
,
621 static const struct bpf_iter_seq_info iter_seq_info
= {
622 .seq_ops
= &bpf_array_map_seq_ops
,
623 .init_seq_private
= bpf_iter_init_array_map
,
624 .fini_seq_private
= bpf_iter_fini_array_map
,
625 .seq_priv_size
= sizeof(struct bpf_iter_seq_array_map_info
),
628 static int array_map_btf_id
;
629 const struct bpf_map_ops array_map_ops
= {
630 .map_meta_equal
= array_map_meta_equal
,
631 .map_alloc_check
= array_map_alloc_check
,
632 .map_alloc
= array_map_alloc
,
633 .map_free
= array_map_free
,
634 .map_get_next_key
= array_map_get_next_key
,
635 .map_lookup_elem
= array_map_lookup_elem
,
636 .map_update_elem
= array_map_update_elem
,
637 .map_delete_elem
= array_map_delete_elem
,
638 .map_gen_lookup
= array_map_gen_lookup
,
639 .map_direct_value_addr
= array_map_direct_value_addr
,
640 .map_direct_value_meta
= array_map_direct_value_meta
,
641 .map_mmap
= array_map_mmap
,
642 .map_seq_show_elem
= array_map_seq_show_elem
,
643 .map_check_btf
= array_map_check_btf
,
644 .map_lookup_batch
= generic_map_lookup_batch
,
645 .map_update_batch
= generic_map_update_batch
,
646 .map_btf_name
= "bpf_array",
647 .map_btf_id
= &array_map_btf_id
,
648 .iter_seq_info
= &iter_seq_info
,
651 static int percpu_array_map_btf_id
;
652 const struct bpf_map_ops percpu_array_map_ops
= {
653 .map_meta_equal
= bpf_map_meta_equal
,
654 .map_alloc_check
= array_map_alloc_check
,
655 .map_alloc
= array_map_alloc
,
656 .map_free
= array_map_free
,
657 .map_get_next_key
= array_map_get_next_key
,
658 .map_lookup_elem
= percpu_array_map_lookup_elem
,
659 .map_update_elem
= array_map_update_elem
,
660 .map_delete_elem
= array_map_delete_elem
,
661 .map_seq_show_elem
= percpu_array_map_seq_show_elem
,
662 .map_check_btf
= array_map_check_btf
,
663 .map_btf_name
= "bpf_array",
664 .map_btf_id
= &percpu_array_map_btf_id
,
665 .iter_seq_info
= &iter_seq_info
,
668 static int fd_array_map_alloc_check(union bpf_attr
*attr
)
670 /* only file descriptors can be stored in this type of map */
671 if (attr
->value_size
!= sizeof(u32
))
673 /* Program read-only/write-only not supported for special maps yet. */
674 if (attr
->map_flags
& (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
))
676 return array_map_alloc_check(attr
);
679 static void fd_array_map_free(struct bpf_map
*map
)
681 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
684 /* make sure it's empty */
685 for (i
= 0; i
< array
->map
.max_entries
; i
++)
686 BUG_ON(array
->ptrs
[i
] != NULL
);
688 bpf_map_area_free(array
);
691 static void *fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
693 return ERR_PTR(-EOPNOTSUPP
);
696 /* only called from syscall */
697 int bpf_fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
)
702 if (!map
->ops
->map_fd_sys_lookup_elem
)
706 elem
= array_map_lookup_elem(map
, key
);
707 if (elem
&& (ptr
= READ_ONCE(*elem
)))
708 *value
= map
->ops
->map_fd_sys_lookup_elem(ptr
);
716 /* only called from syscall */
717 int bpf_fd_array_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
718 void *key
, void *value
, u64 map_flags
)
720 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
721 void *new_ptr
, *old_ptr
;
722 u32 index
= *(u32
*)key
, ufd
;
724 if (map_flags
!= BPF_ANY
)
727 if (index
>= array
->map
.max_entries
)
731 new_ptr
= map
->ops
->map_fd_get_ptr(map
, map_file
, ufd
);
733 return PTR_ERR(new_ptr
);
735 if (map
->ops
->map_poke_run
) {
736 mutex_lock(&array
->aux
->poke_mutex
);
737 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
738 map
->ops
->map_poke_run(map
, index
, old_ptr
, new_ptr
);
739 mutex_unlock(&array
->aux
->poke_mutex
);
741 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
745 map
->ops
->map_fd_put_ptr(old_ptr
);
749 static int fd_array_map_delete_elem(struct bpf_map
*map
, void *key
)
751 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
753 u32 index
= *(u32
*)key
;
755 if (index
>= array
->map
.max_entries
)
758 if (map
->ops
->map_poke_run
) {
759 mutex_lock(&array
->aux
->poke_mutex
);
760 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
761 map
->ops
->map_poke_run(map
, index
, old_ptr
, NULL
);
762 mutex_unlock(&array
->aux
->poke_mutex
);
764 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
768 map
->ops
->map_fd_put_ptr(old_ptr
);
775 static void *prog_fd_array_get_ptr(struct bpf_map
*map
,
776 struct file
*map_file
, int fd
)
778 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
779 struct bpf_prog
*prog
= bpf_prog_get(fd
);
784 if (!bpf_prog_array_compatible(array
, prog
)) {
786 return ERR_PTR(-EINVAL
);
792 static void prog_fd_array_put_ptr(void *ptr
)
797 static u32
prog_fd_array_sys_lookup_elem(void *ptr
)
799 return ((struct bpf_prog
*)ptr
)->aux
->id
;
802 /* decrement refcnt of all bpf_progs that are stored in this map */
803 static void bpf_fd_array_map_clear(struct bpf_map
*map
)
805 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
808 for (i
= 0; i
< array
->map
.max_entries
; i
++)
809 fd_array_map_delete_elem(map
, &i
);
812 static void prog_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
820 elem
= array_map_lookup_elem(map
, key
);
822 ptr
= READ_ONCE(*elem
);
824 seq_printf(m
, "%u: ", *(u32
*)key
);
825 prog_id
= prog_fd_array_sys_lookup_elem(ptr
);
826 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
835 struct prog_poke_elem
{
836 struct list_head list
;
837 struct bpf_prog_aux
*aux
;
840 static int prog_array_map_poke_track(struct bpf_map
*map
,
841 struct bpf_prog_aux
*prog_aux
)
843 struct prog_poke_elem
*elem
;
844 struct bpf_array_aux
*aux
;
847 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
848 mutex_lock(&aux
->poke_mutex
);
849 list_for_each_entry(elem
, &aux
->poke_progs
, list
) {
850 if (elem
->aux
== prog_aux
)
854 elem
= kmalloc(sizeof(*elem
), GFP_KERNEL
);
860 INIT_LIST_HEAD(&elem
->list
);
861 /* We must track the program's aux info at this point in time
862 * since the program pointer itself may not be stable yet, see
863 * also comment in prog_array_map_poke_run().
865 elem
->aux
= prog_aux
;
867 list_add_tail(&elem
->list
, &aux
->poke_progs
);
869 mutex_unlock(&aux
->poke_mutex
);
873 static void prog_array_map_poke_untrack(struct bpf_map
*map
,
874 struct bpf_prog_aux
*prog_aux
)
876 struct prog_poke_elem
*elem
, *tmp
;
877 struct bpf_array_aux
*aux
;
879 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
880 mutex_lock(&aux
->poke_mutex
);
881 list_for_each_entry_safe(elem
, tmp
, &aux
->poke_progs
, list
) {
882 if (elem
->aux
== prog_aux
) {
883 list_del_init(&elem
->list
);
888 mutex_unlock(&aux
->poke_mutex
);
891 static void prog_array_map_poke_run(struct bpf_map
*map
, u32 key
,
892 struct bpf_prog
*old
,
893 struct bpf_prog
*new)
895 u8
*old_addr
, *new_addr
, *old_bypass_addr
;
896 struct prog_poke_elem
*elem
;
897 struct bpf_array_aux
*aux
;
899 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
900 WARN_ON_ONCE(!mutex_is_locked(&aux
->poke_mutex
));
902 list_for_each_entry(elem
, &aux
->poke_progs
, list
) {
903 struct bpf_jit_poke_descriptor
*poke
;
906 for (i
= 0; i
< elem
->aux
->size_poke_tab
; i
++) {
907 poke
= &elem
->aux
->poke_tab
[i
];
909 /* Few things to be aware of:
911 * 1) We can only ever access aux in this context, but
912 * not aux->prog since it might not be stable yet and
913 * there could be danger of use after free otherwise.
914 * 2) Initially when we start tracking aux, the program
915 * is not JITed yet and also does not have a kallsyms
916 * entry. We skip these as poke->tailcall_target_stable
917 * is not active yet. The JIT will do the final fixup
918 * before setting it stable. The various
919 * poke->tailcall_target_stable are successively
920 * activated, so tail call updates can arrive from here
921 * while JIT is still finishing its final fixup for
922 * non-activated poke entries.
923 * 3) On program teardown, the program's kallsym entry gets
924 * removed out of RCU callback, but we can only untrack
925 * from sleepable context, therefore bpf_arch_text_poke()
926 * might not see that this is in BPF text section and
927 * bails out with -EINVAL. As these are unreachable since
928 * RCU grace period already passed, we simply skip them.
929 * 4) Also programs reaching refcount of zero while patching
930 * is in progress is okay since we're protected under
931 * poke_mutex and untrack the programs before the JIT
932 * buffer is freed. When we're still in the middle of
933 * patching and suddenly kallsyms entry of the program
934 * gets evicted, we just skip the rest which is fine due
936 * 5) Any other error happening below from bpf_arch_text_poke()
937 * is a unexpected bug.
939 if (!READ_ONCE(poke
->tailcall_target_stable
))
941 if (poke
->reason
!= BPF_POKE_REASON_TAIL_CALL
)
943 if (poke
->tail_call
.map
!= map
||
944 poke
->tail_call
.key
!= key
)
947 old_bypass_addr
= old
? NULL
: poke
->bypass_addr
;
948 old_addr
= old
? (u8
*)old
->bpf_func
+ poke
->adj_off
: NULL
;
949 new_addr
= new ? (u8
*)new->bpf_func
+ poke
->adj_off
: NULL
;
952 ret
= bpf_arch_text_poke(poke
->tailcall_target
,
955 BUG_ON(ret
< 0 && ret
!= -EINVAL
);
957 ret
= bpf_arch_text_poke(poke
->tailcall_bypass
,
961 BUG_ON(ret
< 0 && ret
!= -EINVAL
);
964 ret
= bpf_arch_text_poke(poke
->tailcall_bypass
,
968 BUG_ON(ret
< 0 && ret
!= -EINVAL
);
969 /* let other CPUs finish the execution of program
970 * so that it will not possible to expose them
971 * to invalid nop, stack unwind, nop state
975 ret
= bpf_arch_text_poke(poke
->tailcall_target
,
978 BUG_ON(ret
< 0 && ret
!= -EINVAL
);
984 static void prog_array_map_clear_deferred(struct work_struct
*work
)
986 struct bpf_map
*map
= container_of(work
, struct bpf_array_aux
,
988 bpf_fd_array_map_clear(map
);
992 static void prog_array_map_clear(struct bpf_map
*map
)
994 struct bpf_array_aux
*aux
= container_of(map
, struct bpf_array
,
997 schedule_work(&aux
->work
);
1000 static struct bpf_map
*prog_array_map_alloc(union bpf_attr
*attr
)
1002 struct bpf_array_aux
*aux
;
1003 struct bpf_map
*map
;
1005 aux
= kzalloc(sizeof(*aux
), GFP_KERNEL_ACCOUNT
);
1007 return ERR_PTR(-ENOMEM
);
1009 INIT_WORK(&aux
->work
, prog_array_map_clear_deferred
);
1010 INIT_LIST_HEAD(&aux
->poke_progs
);
1011 mutex_init(&aux
->poke_mutex
);
1013 map
= array_map_alloc(attr
);
1019 container_of(map
, struct bpf_array
, map
)->aux
= aux
;
1025 static void prog_array_map_free(struct bpf_map
*map
)
1027 struct prog_poke_elem
*elem
, *tmp
;
1028 struct bpf_array_aux
*aux
;
1030 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
1031 list_for_each_entry_safe(elem
, tmp
, &aux
->poke_progs
, list
) {
1032 list_del_init(&elem
->list
);
1036 fd_array_map_free(map
);
1039 /* prog_array->aux->{type,jited} is a runtime binding.
1040 * Doing static check alone in the verifier is not enough.
1041 * Thus, prog_array_map cannot be used as an inner_map
1042 * and map_meta_equal is not implemented.
1044 static int prog_array_map_btf_id
;
1045 const struct bpf_map_ops prog_array_map_ops
= {
1046 .map_alloc_check
= fd_array_map_alloc_check
,
1047 .map_alloc
= prog_array_map_alloc
,
1048 .map_free
= prog_array_map_free
,
1049 .map_poke_track
= prog_array_map_poke_track
,
1050 .map_poke_untrack
= prog_array_map_poke_untrack
,
1051 .map_poke_run
= prog_array_map_poke_run
,
1052 .map_get_next_key
= array_map_get_next_key
,
1053 .map_lookup_elem
= fd_array_map_lookup_elem
,
1054 .map_delete_elem
= fd_array_map_delete_elem
,
1055 .map_fd_get_ptr
= prog_fd_array_get_ptr
,
1056 .map_fd_put_ptr
= prog_fd_array_put_ptr
,
1057 .map_fd_sys_lookup_elem
= prog_fd_array_sys_lookup_elem
,
1058 .map_release_uref
= prog_array_map_clear
,
1059 .map_seq_show_elem
= prog_array_map_seq_show_elem
,
1060 .map_btf_name
= "bpf_array",
1061 .map_btf_id
= &prog_array_map_btf_id
,
1064 static struct bpf_event_entry
*bpf_event_entry_gen(struct file
*perf_file
,
1065 struct file
*map_file
)
1067 struct bpf_event_entry
*ee
;
1069 ee
= kzalloc(sizeof(*ee
), GFP_ATOMIC
);
1071 ee
->event
= perf_file
->private_data
;
1072 ee
->perf_file
= perf_file
;
1073 ee
->map_file
= map_file
;
1079 static void __bpf_event_entry_free(struct rcu_head
*rcu
)
1081 struct bpf_event_entry
*ee
;
1083 ee
= container_of(rcu
, struct bpf_event_entry
, rcu
);
1084 fput(ee
->perf_file
);
1088 static void bpf_event_entry_free_rcu(struct bpf_event_entry
*ee
)
1090 call_rcu(&ee
->rcu
, __bpf_event_entry_free
);
1093 static void *perf_event_fd_array_get_ptr(struct bpf_map
*map
,
1094 struct file
*map_file
, int fd
)
1096 struct bpf_event_entry
*ee
;
1097 struct perf_event
*event
;
1098 struct file
*perf_file
;
1101 perf_file
= perf_event_get(fd
);
1102 if (IS_ERR(perf_file
))
1105 ee
= ERR_PTR(-EOPNOTSUPP
);
1106 event
= perf_file
->private_data
;
1107 if (perf_event_read_local(event
, &value
, NULL
, NULL
) == -EOPNOTSUPP
)
1110 ee
= bpf_event_entry_gen(perf_file
, map_file
);
1113 ee
= ERR_PTR(-ENOMEM
);
1119 static void perf_event_fd_array_put_ptr(void *ptr
)
1121 bpf_event_entry_free_rcu(ptr
);
1124 static void perf_event_fd_array_release(struct bpf_map
*map
,
1125 struct file
*map_file
)
1127 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
1128 struct bpf_event_entry
*ee
;
1131 if (map
->map_flags
& BPF_F_PRESERVE_ELEMS
)
1135 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
1136 ee
= READ_ONCE(array
->ptrs
[i
]);
1137 if (ee
&& ee
->map_file
== map_file
)
1138 fd_array_map_delete_elem(map
, &i
);
1143 static void perf_event_fd_array_map_free(struct bpf_map
*map
)
1145 if (map
->map_flags
& BPF_F_PRESERVE_ELEMS
)
1146 bpf_fd_array_map_clear(map
);
1147 fd_array_map_free(map
);
1150 static int perf_event_array_map_btf_id
;
1151 const struct bpf_map_ops perf_event_array_map_ops
= {
1152 .map_meta_equal
= bpf_map_meta_equal
,
1153 .map_alloc_check
= fd_array_map_alloc_check
,
1154 .map_alloc
= array_map_alloc
,
1155 .map_free
= perf_event_fd_array_map_free
,
1156 .map_get_next_key
= array_map_get_next_key
,
1157 .map_lookup_elem
= fd_array_map_lookup_elem
,
1158 .map_delete_elem
= fd_array_map_delete_elem
,
1159 .map_fd_get_ptr
= perf_event_fd_array_get_ptr
,
1160 .map_fd_put_ptr
= perf_event_fd_array_put_ptr
,
1161 .map_release
= perf_event_fd_array_release
,
1162 .map_check_btf
= map_check_no_btf
,
1163 .map_btf_name
= "bpf_array",
1164 .map_btf_id
= &perf_event_array_map_btf_id
,
1167 #ifdef CONFIG_CGROUPS
1168 static void *cgroup_fd_array_get_ptr(struct bpf_map
*map
,
1169 struct file
*map_file
/* not used */,
1172 return cgroup_get_from_fd(fd
);
1175 static void cgroup_fd_array_put_ptr(void *ptr
)
1177 /* cgroup_put free cgrp after a rcu grace period */
1181 static void cgroup_fd_array_free(struct bpf_map
*map
)
1183 bpf_fd_array_map_clear(map
);
1184 fd_array_map_free(map
);
1187 static int cgroup_array_map_btf_id
;
1188 const struct bpf_map_ops cgroup_array_map_ops
= {
1189 .map_meta_equal
= bpf_map_meta_equal
,
1190 .map_alloc_check
= fd_array_map_alloc_check
,
1191 .map_alloc
= array_map_alloc
,
1192 .map_free
= cgroup_fd_array_free
,
1193 .map_get_next_key
= array_map_get_next_key
,
1194 .map_lookup_elem
= fd_array_map_lookup_elem
,
1195 .map_delete_elem
= fd_array_map_delete_elem
,
1196 .map_fd_get_ptr
= cgroup_fd_array_get_ptr
,
1197 .map_fd_put_ptr
= cgroup_fd_array_put_ptr
,
1198 .map_check_btf
= map_check_no_btf
,
1199 .map_btf_name
= "bpf_array",
1200 .map_btf_id
= &cgroup_array_map_btf_id
,
1204 static struct bpf_map
*array_of_map_alloc(union bpf_attr
*attr
)
1206 struct bpf_map
*map
, *inner_map_meta
;
1208 inner_map_meta
= bpf_map_meta_alloc(attr
->inner_map_fd
);
1209 if (IS_ERR(inner_map_meta
))
1210 return inner_map_meta
;
1212 map
= array_map_alloc(attr
);
1214 bpf_map_meta_free(inner_map_meta
);
1218 map
->inner_map_meta
= inner_map_meta
;
1223 static void array_of_map_free(struct bpf_map
*map
)
1225 /* map->inner_map_meta is only accessed by syscall which
1226 * is protected by fdget/fdput.
1228 bpf_map_meta_free(map
->inner_map_meta
);
1229 bpf_fd_array_map_clear(map
);
1230 fd_array_map_free(map
);
1233 static void *array_of_map_lookup_elem(struct bpf_map
*map
, void *key
)
1235 struct bpf_map
**inner_map
= array_map_lookup_elem(map
, key
);
1240 return READ_ONCE(*inner_map
);
1243 static int array_of_map_gen_lookup(struct bpf_map
*map
,
1244 struct bpf_insn
*insn_buf
)
1246 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
1247 u32 elem_size
= round_up(map
->value_size
, 8);
1248 struct bpf_insn
*insn
= insn_buf
;
1249 const int ret
= BPF_REG_0
;
1250 const int map_ptr
= BPF_REG_1
;
1251 const int index
= BPF_REG_2
;
1253 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
1254 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
1255 if (!map
->bypass_spec_v1
) {
1256 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 6);
1257 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
1259 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 5);
1261 if (is_power_of_2(elem_size
))
1262 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
1264 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
1265 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
1266 *insn
++ = BPF_LDX_MEM(BPF_DW
, ret
, ret
, 0);
1267 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, ret
, 0, 1);
1268 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
1269 *insn
++ = BPF_MOV64_IMM(ret
, 0);
1271 return insn
- insn_buf
;
1274 static int array_of_maps_map_btf_id
;
1275 const struct bpf_map_ops array_of_maps_map_ops
= {
1276 .map_alloc_check
= fd_array_map_alloc_check
,
1277 .map_alloc
= array_of_map_alloc
,
1278 .map_free
= array_of_map_free
,
1279 .map_get_next_key
= array_map_get_next_key
,
1280 .map_lookup_elem
= array_of_map_lookup_elem
,
1281 .map_delete_elem
= fd_array_map_delete_elem
,
1282 .map_fd_get_ptr
= bpf_map_fd_get_ptr
,
1283 .map_fd_put_ptr
= bpf_map_fd_put_ptr
,
1284 .map_fd_sys_lookup_elem
= bpf_map_fd_sys_lookup_elem
,
1285 .map_gen_lookup
= array_of_map_gen_lookup
,
1286 .map_check_btf
= map_check_no_btf
,
1287 .map_btf_name
= "bpf_array",
1288 .map_btf_id
= &array_of_maps_map_btf_id
,