1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
8 #include <linux/slab.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
14 #include "map_in_map.h"
16 #define ARRAY_CREATE_FLAG_MASK \
17 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK)
19 static void bpf_array_free_percpu(struct bpf_array
*array
)
23 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
24 free_percpu(array
->pptrs
[i
]);
29 static int bpf_array_alloc_percpu(struct bpf_array
*array
)
34 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
35 ptr
= __alloc_percpu_gfp(array
->elem_size
, 8,
36 GFP_USER
| __GFP_NOWARN
);
38 bpf_array_free_percpu(array
);
41 array
->pptrs
[i
] = ptr
;
48 /* Called from syscall */
49 int array_map_alloc_check(union bpf_attr
*attr
)
51 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
52 int numa_node
= bpf_map_attr_numa_node(attr
);
54 /* check sanity of attributes */
55 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
56 attr
->value_size
== 0 ||
57 attr
->map_flags
& ~ARRAY_CREATE_FLAG_MASK
||
58 !bpf_map_flags_access_ok(attr
->map_flags
) ||
59 (percpu
&& numa_node
!= NUMA_NO_NODE
))
62 if (attr
->map_type
!= BPF_MAP_TYPE_ARRAY
&&
63 attr
->map_flags
& BPF_F_MMAPABLE
)
66 if (attr
->value_size
> KMALLOC_MAX_SIZE
)
67 /* if value_size is bigger, the user space won't be able to
68 * access the elements.
75 static struct bpf_map
*array_map_alloc(union bpf_attr
*attr
)
77 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
78 int ret
, numa_node
= bpf_map_attr_numa_node(attr
);
79 u32 elem_size
, index_mask
, max_entries
;
80 bool unpriv
= !capable(CAP_SYS_ADMIN
);
81 u64 cost
, array_size
, mask64
;
82 struct bpf_map_memory mem
;
83 struct bpf_array
*array
;
85 elem_size
= round_up(attr
->value_size
, 8);
87 max_entries
= attr
->max_entries
;
89 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
90 * upper most bit set in u32 space is undefined behavior due to
91 * resulting 1U << 32, so do it manually here in u64 space.
93 mask64
= fls_long(max_entries
- 1);
94 mask64
= 1ULL << mask64
;
99 /* round up array size to nearest power of 2,
100 * since cpu will speculate within index_mask limits
102 max_entries
= index_mask
+ 1;
103 /* Check for overflows. */
104 if (max_entries
< attr
->max_entries
)
105 return ERR_PTR(-E2BIG
);
108 array_size
= sizeof(*array
);
110 array_size
+= (u64
) max_entries
* sizeof(void *);
112 /* rely on vmalloc() to return page-aligned memory and
113 * ensure array->value is exactly page-aligned
115 if (attr
->map_flags
& BPF_F_MMAPABLE
) {
116 array_size
= PAGE_ALIGN(array_size
);
117 array_size
+= PAGE_ALIGN((u64
) max_entries
* elem_size
);
119 array_size
+= (u64
) max_entries
* elem_size
;
123 /* make sure there is no u32 overflow later in round_up() */
126 cost
+= (u64
)attr
->max_entries
* elem_size
* num_possible_cpus();
128 ret
= bpf_map_charge_init(&mem
, cost
);
132 /* allocate all map elements and zero-initialize them */
133 if (attr
->map_flags
& BPF_F_MMAPABLE
) {
136 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
137 data
= bpf_map_area_mmapable_alloc(array_size
, numa_node
);
139 bpf_map_charge_finish(&mem
);
140 return ERR_PTR(-ENOMEM
);
142 array
= data
+ PAGE_ALIGN(sizeof(struct bpf_array
))
143 - offsetof(struct bpf_array
, value
);
145 array
= bpf_map_area_alloc(array_size
, numa_node
);
148 bpf_map_charge_finish(&mem
);
149 return ERR_PTR(-ENOMEM
);
151 array
->index_mask
= index_mask
;
152 array
->map
.unpriv_array
= unpriv
;
154 /* copy mandatory map attributes */
155 bpf_map_init_from_attr(&array
->map
, attr
);
156 bpf_map_charge_move(&array
->map
.memory
, &mem
);
157 array
->elem_size
= elem_size
;
159 if (percpu
&& bpf_array_alloc_percpu(array
)) {
160 bpf_map_charge_finish(&array
->map
.memory
);
161 bpf_map_area_free(array
);
162 return ERR_PTR(-ENOMEM
);
168 /* Called from syscall or from eBPF program */
169 static void *array_map_lookup_elem(struct bpf_map
*map
, void *key
)
171 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
172 u32 index
= *(u32
*)key
;
174 if (unlikely(index
>= array
->map
.max_entries
))
177 return array
->value
+ array
->elem_size
* (index
& array
->index_mask
);
180 static int array_map_direct_value_addr(const struct bpf_map
*map
, u64
*imm
,
183 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
185 if (map
->max_entries
!= 1)
187 if (off
>= map
->value_size
)
190 *imm
= (unsigned long)array
->value
;
194 static int array_map_direct_value_meta(const struct bpf_map
*map
, u64 imm
,
197 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
198 u64 base
= (unsigned long)array
->value
;
199 u64 range
= array
->elem_size
;
201 if (map
->max_entries
!= 1)
203 if (imm
< base
|| imm
>= base
+ range
)
210 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
211 static u32
array_map_gen_lookup(struct bpf_map
*map
, struct bpf_insn
*insn_buf
)
213 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
214 struct bpf_insn
*insn
= insn_buf
;
215 u32 elem_size
= round_up(map
->value_size
, 8);
216 const int ret
= BPF_REG_0
;
217 const int map_ptr
= BPF_REG_1
;
218 const int index
= BPF_REG_2
;
220 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
221 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
222 if (map
->unpriv_array
) {
223 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 4);
224 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
226 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 3);
229 if (is_power_of_2(elem_size
)) {
230 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
232 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
234 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
235 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
236 *insn
++ = BPF_MOV64_IMM(ret
, 0);
237 return insn
- insn_buf
;
240 /* Called from eBPF program */
241 static void *percpu_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
243 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
244 u32 index
= *(u32
*)key
;
246 if (unlikely(index
>= array
->map
.max_entries
))
249 return this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]);
252 int bpf_percpu_array_copy(struct bpf_map
*map
, void *key
, void *value
)
254 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
255 u32 index
= *(u32
*)key
;
260 if (unlikely(index
>= array
->map
.max_entries
))
263 /* per_cpu areas are zero-filled and bpf programs can only
264 * access 'value_size' of them, so copying rounded areas
265 * will not leak any kernel data
267 size
= round_up(map
->value_size
, 8);
269 pptr
= array
->pptrs
[index
& array
->index_mask
];
270 for_each_possible_cpu(cpu
) {
271 bpf_long_memcpy(value
+ off
, per_cpu_ptr(pptr
, cpu
), size
);
278 /* Called from syscall */
279 static int array_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
281 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
282 u32 index
= key
? *(u32
*)key
: U32_MAX
;
283 u32
*next
= (u32
*)next_key
;
285 if (index
>= array
->map
.max_entries
) {
290 if (index
== array
->map
.max_entries
- 1)
297 /* Called from syscall or from eBPF program */
298 static int array_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
301 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
302 u32 index
= *(u32
*)key
;
305 if (unlikely((map_flags
& ~BPF_F_LOCK
) > BPF_EXIST
))
309 if (unlikely(index
>= array
->map
.max_entries
))
310 /* all elements were pre-allocated, cannot insert a new one */
313 if (unlikely(map_flags
& BPF_NOEXIST
))
314 /* all elements already exist */
317 if (unlikely((map_flags
& BPF_F_LOCK
) &&
318 !map_value_has_spin_lock(map
)))
321 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
322 memcpy(this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]),
323 value
, map
->value_size
);
326 array
->elem_size
* (index
& array
->index_mask
);
327 if (map_flags
& BPF_F_LOCK
)
328 copy_map_value_locked(map
, val
, value
, false);
330 copy_map_value(map
, val
, value
);
335 int bpf_percpu_array_update(struct bpf_map
*map
, void *key
, void *value
,
338 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
339 u32 index
= *(u32
*)key
;
344 if (unlikely(map_flags
> BPF_EXIST
))
348 if (unlikely(index
>= array
->map
.max_entries
))
349 /* all elements were pre-allocated, cannot insert a new one */
352 if (unlikely(map_flags
== BPF_NOEXIST
))
353 /* all elements already exist */
356 /* the user space will provide round_up(value_size, 8) bytes that
357 * will be copied into per-cpu area. bpf programs can only access
358 * value_size of it. During lookup the same extra bytes will be
359 * returned or zeros which were zero-filled by percpu_alloc,
360 * so no kernel data leaks possible
362 size
= round_up(map
->value_size
, 8);
364 pptr
= array
->pptrs
[index
& array
->index_mask
];
365 for_each_possible_cpu(cpu
) {
366 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
), value
+ off
, size
);
373 /* Called from syscall or from eBPF program */
374 static int array_map_delete_elem(struct bpf_map
*map
, void *key
)
379 static void *array_map_vmalloc_addr(struct bpf_array
*array
)
381 return (void *)round_down((unsigned long)array
, PAGE_SIZE
);
384 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
385 static void array_map_free(struct bpf_map
*map
)
387 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
389 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
390 * so the programs (can be more than one that used this map) were
391 * disconnected from events. Wait for outstanding programs to complete
396 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
397 bpf_array_free_percpu(array
);
399 if (array
->map
.map_flags
& BPF_F_MMAPABLE
)
400 bpf_map_area_free(array_map_vmalloc_addr(array
));
402 bpf_map_area_free(array
);
405 static void array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
412 value
= array_map_lookup_elem(map
, key
);
418 if (map
->btf_key_type_id
)
419 seq_printf(m
, "%u: ", *(u32
*)key
);
420 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
, value
, m
);
426 static void percpu_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
429 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
430 u32 index
= *(u32
*)key
;
436 seq_printf(m
, "%u: {\n", *(u32
*)key
);
437 pptr
= array
->pptrs
[index
& array
->index_mask
];
438 for_each_possible_cpu(cpu
) {
439 seq_printf(m
, "\tcpu%d: ", cpu
);
440 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
441 per_cpu_ptr(pptr
, cpu
), m
);
449 static int array_map_check_btf(const struct bpf_map
*map
,
450 const struct btf
*btf
,
451 const struct btf_type
*key_type
,
452 const struct btf_type
*value_type
)
456 /* One exception for keyless BTF: .bss/.data/.rodata map */
457 if (btf_type_is_void(key_type
)) {
458 if (map
->map_type
!= BPF_MAP_TYPE_ARRAY
||
459 map
->max_entries
!= 1)
462 if (BTF_INFO_KIND(value_type
->info
) != BTF_KIND_DATASEC
)
468 if (BTF_INFO_KIND(key_type
->info
) != BTF_KIND_INT
)
471 int_data
= *(u32
*)(key_type
+ 1);
472 /* bpf array can only take a u32 key. This check makes sure
473 * that the btf matches the attr used during map_create.
475 if (BTF_INT_BITS(int_data
) != 32 || BTF_INT_OFFSET(int_data
))
481 static int array_map_mmap(struct bpf_map
*map
, struct vm_area_struct
*vma
)
483 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
484 pgoff_t pgoff
= PAGE_ALIGN(sizeof(*array
)) >> PAGE_SHIFT
;
486 if (!(map
->map_flags
& BPF_F_MMAPABLE
))
489 return remap_vmalloc_range(vma
, array_map_vmalloc_addr(array
), pgoff
);
492 const struct bpf_map_ops array_map_ops
= {
493 .map_alloc_check
= array_map_alloc_check
,
494 .map_alloc
= array_map_alloc
,
495 .map_free
= array_map_free
,
496 .map_get_next_key
= array_map_get_next_key
,
497 .map_lookup_elem
= array_map_lookup_elem
,
498 .map_update_elem
= array_map_update_elem
,
499 .map_delete_elem
= array_map_delete_elem
,
500 .map_gen_lookup
= array_map_gen_lookup
,
501 .map_direct_value_addr
= array_map_direct_value_addr
,
502 .map_direct_value_meta
= array_map_direct_value_meta
,
503 .map_mmap
= array_map_mmap
,
504 .map_seq_show_elem
= array_map_seq_show_elem
,
505 .map_check_btf
= array_map_check_btf
,
506 .map_lookup_batch
= generic_map_lookup_batch
,
507 .map_update_batch
= generic_map_update_batch
,
510 const struct bpf_map_ops percpu_array_map_ops
= {
511 .map_alloc_check
= array_map_alloc_check
,
512 .map_alloc
= array_map_alloc
,
513 .map_free
= array_map_free
,
514 .map_get_next_key
= array_map_get_next_key
,
515 .map_lookup_elem
= percpu_array_map_lookup_elem
,
516 .map_update_elem
= array_map_update_elem
,
517 .map_delete_elem
= array_map_delete_elem
,
518 .map_seq_show_elem
= percpu_array_map_seq_show_elem
,
519 .map_check_btf
= array_map_check_btf
,
522 static int fd_array_map_alloc_check(union bpf_attr
*attr
)
524 /* only file descriptors can be stored in this type of map */
525 if (attr
->value_size
!= sizeof(u32
))
527 /* Program read-only/write-only not supported for special maps yet. */
528 if (attr
->map_flags
& (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
))
530 return array_map_alloc_check(attr
);
533 static void fd_array_map_free(struct bpf_map
*map
)
535 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
540 /* make sure it's empty */
541 for (i
= 0; i
< array
->map
.max_entries
; i
++)
542 BUG_ON(array
->ptrs
[i
] != NULL
);
544 bpf_map_area_free(array
);
547 static void *fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
549 return ERR_PTR(-EOPNOTSUPP
);
552 /* only called from syscall */
553 int bpf_fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
)
558 if (!map
->ops
->map_fd_sys_lookup_elem
)
562 elem
= array_map_lookup_elem(map
, key
);
563 if (elem
&& (ptr
= READ_ONCE(*elem
)))
564 *value
= map
->ops
->map_fd_sys_lookup_elem(ptr
);
572 /* only called from syscall */
573 int bpf_fd_array_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
574 void *key
, void *value
, u64 map_flags
)
576 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
577 void *new_ptr
, *old_ptr
;
578 u32 index
= *(u32
*)key
, ufd
;
580 if (map_flags
!= BPF_ANY
)
583 if (index
>= array
->map
.max_entries
)
587 new_ptr
= map
->ops
->map_fd_get_ptr(map
, map_file
, ufd
);
589 return PTR_ERR(new_ptr
);
591 if (map
->ops
->map_poke_run
) {
592 mutex_lock(&array
->aux
->poke_mutex
);
593 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
594 map
->ops
->map_poke_run(map
, index
, old_ptr
, new_ptr
);
595 mutex_unlock(&array
->aux
->poke_mutex
);
597 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
601 map
->ops
->map_fd_put_ptr(old_ptr
);
605 static int fd_array_map_delete_elem(struct bpf_map
*map
, void *key
)
607 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
609 u32 index
= *(u32
*)key
;
611 if (index
>= array
->map
.max_entries
)
614 if (map
->ops
->map_poke_run
) {
615 mutex_lock(&array
->aux
->poke_mutex
);
616 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
617 map
->ops
->map_poke_run(map
, index
, old_ptr
, NULL
);
618 mutex_unlock(&array
->aux
->poke_mutex
);
620 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
624 map
->ops
->map_fd_put_ptr(old_ptr
);
631 static void *prog_fd_array_get_ptr(struct bpf_map
*map
,
632 struct file
*map_file
, int fd
)
634 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
635 struct bpf_prog
*prog
= bpf_prog_get(fd
);
640 if (!bpf_prog_array_compatible(array
, prog
)) {
642 return ERR_PTR(-EINVAL
);
648 static void prog_fd_array_put_ptr(void *ptr
)
653 static u32
prog_fd_array_sys_lookup_elem(void *ptr
)
655 return ((struct bpf_prog
*)ptr
)->aux
->id
;
658 /* decrement refcnt of all bpf_progs that are stored in this map */
659 static void bpf_fd_array_map_clear(struct bpf_map
*map
)
661 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
664 for (i
= 0; i
< array
->map
.max_entries
; i
++)
665 fd_array_map_delete_elem(map
, &i
);
668 static void prog_array_map_seq_show_elem(struct bpf_map
*map
, void *key
,
676 elem
= array_map_lookup_elem(map
, key
);
678 ptr
= READ_ONCE(*elem
);
680 seq_printf(m
, "%u: ", *(u32
*)key
);
681 prog_id
= prog_fd_array_sys_lookup_elem(ptr
);
682 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
691 struct prog_poke_elem
{
692 struct list_head list
;
693 struct bpf_prog_aux
*aux
;
696 static int prog_array_map_poke_track(struct bpf_map
*map
,
697 struct bpf_prog_aux
*prog_aux
)
699 struct prog_poke_elem
*elem
;
700 struct bpf_array_aux
*aux
;
703 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
704 mutex_lock(&aux
->poke_mutex
);
705 list_for_each_entry(elem
, &aux
->poke_progs
, list
) {
706 if (elem
->aux
== prog_aux
)
710 elem
= kmalloc(sizeof(*elem
), GFP_KERNEL
);
716 INIT_LIST_HEAD(&elem
->list
);
717 /* We must track the program's aux info at this point in time
718 * since the program pointer itself may not be stable yet, see
719 * also comment in prog_array_map_poke_run().
721 elem
->aux
= prog_aux
;
723 list_add_tail(&elem
->list
, &aux
->poke_progs
);
725 mutex_unlock(&aux
->poke_mutex
);
729 static void prog_array_map_poke_untrack(struct bpf_map
*map
,
730 struct bpf_prog_aux
*prog_aux
)
732 struct prog_poke_elem
*elem
, *tmp
;
733 struct bpf_array_aux
*aux
;
735 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
736 mutex_lock(&aux
->poke_mutex
);
737 list_for_each_entry_safe(elem
, tmp
, &aux
->poke_progs
, list
) {
738 if (elem
->aux
== prog_aux
) {
739 list_del_init(&elem
->list
);
744 mutex_unlock(&aux
->poke_mutex
);
747 static void prog_array_map_poke_run(struct bpf_map
*map
, u32 key
,
748 struct bpf_prog
*old
,
749 struct bpf_prog
*new)
751 struct prog_poke_elem
*elem
;
752 struct bpf_array_aux
*aux
;
754 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
755 WARN_ON_ONCE(!mutex_is_locked(&aux
->poke_mutex
));
757 list_for_each_entry(elem
, &aux
->poke_progs
, list
) {
758 struct bpf_jit_poke_descriptor
*poke
;
761 for (i
= 0; i
< elem
->aux
->size_poke_tab
; i
++) {
762 poke
= &elem
->aux
->poke_tab
[i
];
764 /* Few things to be aware of:
766 * 1) We can only ever access aux in this context, but
767 * not aux->prog since it might not be stable yet and
768 * there could be danger of use after free otherwise.
769 * 2) Initially when we start tracking aux, the program
770 * is not JITed yet and also does not have a kallsyms
771 * entry. We skip these as poke->ip_stable is not
772 * active yet. The JIT will do the final fixup before
773 * setting it stable. The various poke->ip_stable are
774 * successively activated, so tail call updates can
775 * arrive from here while JIT is still finishing its
776 * final fixup for non-activated poke entries.
777 * 3) On program teardown, the program's kallsym entry gets
778 * removed out of RCU callback, but we can only untrack
779 * from sleepable context, therefore bpf_arch_text_poke()
780 * might not see that this is in BPF text section and
781 * bails out with -EINVAL. As these are unreachable since
782 * RCU grace period already passed, we simply skip them.
783 * 4) Also programs reaching refcount of zero while patching
784 * is in progress is okay since we're protected under
785 * poke_mutex and untrack the programs before the JIT
786 * buffer is freed. When we're still in the middle of
787 * patching and suddenly kallsyms entry of the program
788 * gets evicted, we just skip the rest which is fine due
790 * 5) Any other error happening below from bpf_arch_text_poke()
791 * is a unexpected bug.
793 if (!READ_ONCE(poke
->ip_stable
))
795 if (poke
->reason
!= BPF_POKE_REASON_TAIL_CALL
)
797 if (poke
->tail_call
.map
!= map
||
798 poke
->tail_call
.key
!= key
)
801 ret
= bpf_arch_text_poke(poke
->ip
, BPF_MOD_JUMP
,
802 old
? (u8
*)old
->bpf_func
+
803 poke
->adj_off
: NULL
,
804 new ? (u8
*)new->bpf_func
+
805 poke
->adj_off
: NULL
);
806 BUG_ON(ret
< 0 && ret
!= -EINVAL
);
811 static void prog_array_map_clear_deferred(struct work_struct
*work
)
813 struct bpf_map
*map
= container_of(work
, struct bpf_array_aux
,
815 bpf_fd_array_map_clear(map
);
819 static void prog_array_map_clear(struct bpf_map
*map
)
821 struct bpf_array_aux
*aux
= container_of(map
, struct bpf_array
,
824 schedule_work(&aux
->work
);
827 static struct bpf_map
*prog_array_map_alloc(union bpf_attr
*attr
)
829 struct bpf_array_aux
*aux
;
832 aux
= kzalloc(sizeof(*aux
), GFP_KERNEL
);
834 return ERR_PTR(-ENOMEM
);
836 INIT_WORK(&aux
->work
, prog_array_map_clear_deferred
);
837 INIT_LIST_HEAD(&aux
->poke_progs
);
838 mutex_init(&aux
->poke_mutex
);
840 map
= array_map_alloc(attr
);
846 container_of(map
, struct bpf_array
, map
)->aux
= aux
;
852 static void prog_array_map_free(struct bpf_map
*map
)
854 struct prog_poke_elem
*elem
, *tmp
;
855 struct bpf_array_aux
*aux
;
857 aux
= container_of(map
, struct bpf_array
, map
)->aux
;
858 list_for_each_entry_safe(elem
, tmp
, &aux
->poke_progs
, list
) {
859 list_del_init(&elem
->list
);
863 fd_array_map_free(map
);
866 const struct bpf_map_ops prog_array_map_ops
= {
867 .map_alloc_check
= fd_array_map_alloc_check
,
868 .map_alloc
= prog_array_map_alloc
,
869 .map_free
= prog_array_map_free
,
870 .map_poke_track
= prog_array_map_poke_track
,
871 .map_poke_untrack
= prog_array_map_poke_untrack
,
872 .map_poke_run
= prog_array_map_poke_run
,
873 .map_get_next_key
= array_map_get_next_key
,
874 .map_lookup_elem
= fd_array_map_lookup_elem
,
875 .map_delete_elem
= fd_array_map_delete_elem
,
876 .map_fd_get_ptr
= prog_fd_array_get_ptr
,
877 .map_fd_put_ptr
= prog_fd_array_put_ptr
,
878 .map_fd_sys_lookup_elem
= prog_fd_array_sys_lookup_elem
,
879 .map_release_uref
= prog_array_map_clear
,
880 .map_seq_show_elem
= prog_array_map_seq_show_elem
,
883 static struct bpf_event_entry
*bpf_event_entry_gen(struct file
*perf_file
,
884 struct file
*map_file
)
886 struct bpf_event_entry
*ee
;
888 ee
= kzalloc(sizeof(*ee
), GFP_ATOMIC
);
890 ee
->event
= perf_file
->private_data
;
891 ee
->perf_file
= perf_file
;
892 ee
->map_file
= map_file
;
898 static void __bpf_event_entry_free(struct rcu_head
*rcu
)
900 struct bpf_event_entry
*ee
;
902 ee
= container_of(rcu
, struct bpf_event_entry
, rcu
);
907 static void bpf_event_entry_free_rcu(struct bpf_event_entry
*ee
)
909 call_rcu(&ee
->rcu
, __bpf_event_entry_free
);
912 static void *perf_event_fd_array_get_ptr(struct bpf_map
*map
,
913 struct file
*map_file
, int fd
)
915 struct bpf_event_entry
*ee
;
916 struct perf_event
*event
;
917 struct file
*perf_file
;
920 perf_file
= perf_event_get(fd
);
921 if (IS_ERR(perf_file
))
924 ee
= ERR_PTR(-EOPNOTSUPP
);
925 event
= perf_file
->private_data
;
926 if (perf_event_read_local(event
, &value
, NULL
, NULL
) == -EOPNOTSUPP
)
929 ee
= bpf_event_entry_gen(perf_file
, map_file
);
932 ee
= ERR_PTR(-ENOMEM
);
938 static void perf_event_fd_array_put_ptr(void *ptr
)
940 bpf_event_entry_free_rcu(ptr
);
943 static void perf_event_fd_array_release(struct bpf_map
*map
,
944 struct file
*map_file
)
946 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
947 struct bpf_event_entry
*ee
;
951 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
952 ee
= READ_ONCE(array
->ptrs
[i
]);
953 if (ee
&& ee
->map_file
== map_file
)
954 fd_array_map_delete_elem(map
, &i
);
959 const struct bpf_map_ops perf_event_array_map_ops
= {
960 .map_alloc_check
= fd_array_map_alloc_check
,
961 .map_alloc
= array_map_alloc
,
962 .map_free
= fd_array_map_free
,
963 .map_get_next_key
= array_map_get_next_key
,
964 .map_lookup_elem
= fd_array_map_lookup_elem
,
965 .map_delete_elem
= fd_array_map_delete_elem
,
966 .map_fd_get_ptr
= perf_event_fd_array_get_ptr
,
967 .map_fd_put_ptr
= perf_event_fd_array_put_ptr
,
968 .map_release
= perf_event_fd_array_release
,
969 .map_check_btf
= map_check_no_btf
,
972 #ifdef CONFIG_CGROUPS
973 static void *cgroup_fd_array_get_ptr(struct bpf_map
*map
,
974 struct file
*map_file
/* not used */,
977 return cgroup_get_from_fd(fd
);
980 static void cgroup_fd_array_put_ptr(void *ptr
)
982 /* cgroup_put free cgrp after a rcu grace period */
986 static void cgroup_fd_array_free(struct bpf_map
*map
)
988 bpf_fd_array_map_clear(map
);
989 fd_array_map_free(map
);
992 const struct bpf_map_ops cgroup_array_map_ops
= {
993 .map_alloc_check
= fd_array_map_alloc_check
,
994 .map_alloc
= array_map_alloc
,
995 .map_free
= cgroup_fd_array_free
,
996 .map_get_next_key
= array_map_get_next_key
,
997 .map_lookup_elem
= fd_array_map_lookup_elem
,
998 .map_delete_elem
= fd_array_map_delete_elem
,
999 .map_fd_get_ptr
= cgroup_fd_array_get_ptr
,
1000 .map_fd_put_ptr
= cgroup_fd_array_put_ptr
,
1001 .map_check_btf
= map_check_no_btf
,
1005 static struct bpf_map
*array_of_map_alloc(union bpf_attr
*attr
)
1007 struct bpf_map
*map
, *inner_map_meta
;
1009 inner_map_meta
= bpf_map_meta_alloc(attr
->inner_map_fd
);
1010 if (IS_ERR(inner_map_meta
))
1011 return inner_map_meta
;
1013 map
= array_map_alloc(attr
);
1015 bpf_map_meta_free(inner_map_meta
);
1019 map
->inner_map_meta
= inner_map_meta
;
1024 static void array_of_map_free(struct bpf_map
*map
)
1026 /* map->inner_map_meta is only accessed by syscall which
1027 * is protected by fdget/fdput.
1029 bpf_map_meta_free(map
->inner_map_meta
);
1030 bpf_fd_array_map_clear(map
);
1031 fd_array_map_free(map
);
1034 static void *array_of_map_lookup_elem(struct bpf_map
*map
, void *key
)
1036 struct bpf_map
**inner_map
= array_map_lookup_elem(map
, key
);
1041 return READ_ONCE(*inner_map
);
1044 static u32
array_of_map_gen_lookup(struct bpf_map
*map
,
1045 struct bpf_insn
*insn_buf
)
1047 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
1048 u32 elem_size
= round_up(map
->value_size
, 8);
1049 struct bpf_insn
*insn
= insn_buf
;
1050 const int ret
= BPF_REG_0
;
1051 const int map_ptr
= BPF_REG_1
;
1052 const int index
= BPF_REG_2
;
1054 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
1055 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
1056 if (map
->unpriv_array
) {
1057 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 6);
1058 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
1060 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 5);
1062 if (is_power_of_2(elem_size
))
1063 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
1065 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
1066 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
1067 *insn
++ = BPF_LDX_MEM(BPF_DW
, ret
, ret
, 0);
1068 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, ret
, 0, 1);
1069 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
1070 *insn
++ = BPF_MOV64_IMM(ret
, 0);
1072 return insn
- insn_buf
;
1075 const struct bpf_map_ops array_of_maps_map_ops
= {
1076 .map_alloc_check
= fd_array_map_alloc_check
,
1077 .map_alloc
= array_of_map_alloc
,
1078 .map_free
= array_of_map_free
,
1079 .map_get_next_key
= array_map_get_next_key
,
1080 .map_lookup_elem
= array_of_map_lookup_elem
,
1081 .map_delete_elem
= fd_array_map_delete_elem
,
1082 .map_fd_get_ptr
= bpf_map_fd_get_ptr
,
1083 .map_fd_put_ptr
= bpf_map_fd_put_ptr
,
1084 .map_fd_sys_lookup_elem
= bpf_map_fd_sys_lookup_elem
,
1085 .map_gen_lookup
= array_of_map_gen_lookup
,
1086 .map_check_btf
= map_check_no_btf
,