Merge tag 'for-linus-20190706' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / kernel / bpf / arraymap.c
blob262a321f58a63bc264a9cc1548a0cc6883167082
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
4 */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/err.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
14 #include "map_in_map.h"
16 #define ARRAY_CREATE_FLAG_MASK \
17 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
19 static void bpf_array_free_percpu(struct bpf_array *array)
21 int i;
23 for (i = 0; i < array->map.max_entries; i++) {
24 free_percpu(array->pptrs[i]);
25 cond_resched();
29 static int bpf_array_alloc_percpu(struct bpf_array *array)
31 void __percpu *ptr;
32 int i;
34 for (i = 0; i < array->map.max_entries; i++) {
35 ptr = __alloc_percpu_gfp(array->elem_size, 8,
36 GFP_USER | __GFP_NOWARN);
37 if (!ptr) {
38 bpf_array_free_percpu(array);
39 return -ENOMEM;
41 array->pptrs[i] = ptr;
42 cond_resched();
45 return 0;
48 /* Called from syscall */
49 int array_map_alloc_check(union bpf_attr *attr)
51 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52 int numa_node = bpf_map_attr_numa_node(attr);
54 /* check sanity of attributes */
55 if (attr->max_entries == 0 || attr->key_size != 4 ||
56 attr->value_size == 0 ||
57 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
58 !bpf_map_flags_access_ok(attr->map_flags) ||
59 (percpu && numa_node != NUMA_NO_NODE))
60 return -EINVAL;
62 if (attr->value_size > KMALLOC_MAX_SIZE)
63 /* if value_size is bigger, the user space won't be able to
64 * access the elements.
66 return -E2BIG;
68 return 0;
71 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
73 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
74 int ret, numa_node = bpf_map_attr_numa_node(attr);
75 u32 elem_size, index_mask, max_entries;
76 bool unpriv = !capable(CAP_SYS_ADMIN);
77 u64 cost, array_size, mask64;
78 struct bpf_array *array;
80 elem_size = round_up(attr->value_size, 8);
82 max_entries = attr->max_entries;
84 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
85 * upper most bit set in u32 space is undefined behavior due to
86 * resulting 1U << 32, so do it manually here in u64 space.
88 mask64 = fls_long(max_entries - 1);
89 mask64 = 1ULL << mask64;
90 mask64 -= 1;
92 index_mask = mask64;
93 if (unpriv) {
94 /* round up array size to nearest power of 2,
95 * since cpu will speculate within index_mask limits
97 max_entries = index_mask + 1;
98 /* Check for overflows. */
99 if (max_entries < attr->max_entries)
100 return ERR_PTR(-E2BIG);
103 array_size = sizeof(*array);
104 if (percpu)
105 array_size += (u64) max_entries * sizeof(void *);
106 else
107 array_size += (u64) max_entries * elem_size;
109 /* make sure there is no u32 overflow later in round_up() */
110 cost = array_size;
111 if (cost >= U32_MAX - PAGE_SIZE)
112 return ERR_PTR(-ENOMEM);
113 if (percpu) {
114 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
115 if (cost >= U32_MAX - PAGE_SIZE)
116 return ERR_PTR(-ENOMEM);
118 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
120 ret = bpf_map_precharge_memlock(cost);
121 if (ret < 0)
122 return ERR_PTR(ret);
124 /* allocate all map elements and zero-initialize them */
125 array = bpf_map_area_alloc(array_size, numa_node);
126 if (!array)
127 return ERR_PTR(-ENOMEM);
128 array->index_mask = index_mask;
129 array->map.unpriv_array = unpriv;
131 /* copy mandatory map attributes */
132 bpf_map_init_from_attr(&array->map, attr);
133 array->map.pages = cost;
134 array->elem_size = elem_size;
136 if (percpu && bpf_array_alloc_percpu(array)) {
137 bpf_map_area_free(array);
138 return ERR_PTR(-ENOMEM);
141 return &array->map;
144 /* Called from syscall or from eBPF program */
145 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
147 struct bpf_array *array = container_of(map, struct bpf_array, map);
148 u32 index = *(u32 *)key;
150 if (unlikely(index >= array->map.max_entries))
151 return NULL;
153 return array->value + array->elem_size * (index & array->index_mask);
156 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
157 u32 off)
159 struct bpf_array *array = container_of(map, struct bpf_array, map);
161 if (map->max_entries != 1)
162 return -ENOTSUPP;
163 if (off >= map->value_size)
164 return -EINVAL;
166 *imm = (unsigned long)array->value;
167 return 0;
170 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
171 u32 *off)
173 struct bpf_array *array = container_of(map, struct bpf_array, map);
174 u64 base = (unsigned long)array->value;
175 u64 range = array->elem_size;
177 if (map->max_entries != 1)
178 return -ENOTSUPP;
179 if (imm < base || imm >= base + range)
180 return -ENOENT;
182 *off = imm - base;
183 return 0;
186 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
187 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
189 struct bpf_array *array = container_of(map, struct bpf_array, map);
190 struct bpf_insn *insn = insn_buf;
191 u32 elem_size = round_up(map->value_size, 8);
192 const int ret = BPF_REG_0;
193 const int map_ptr = BPF_REG_1;
194 const int index = BPF_REG_2;
196 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
197 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
198 if (map->unpriv_array) {
199 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
200 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
201 } else {
202 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
205 if (is_power_of_2(elem_size)) {
206 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
207 } else {
208 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
210 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
211 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
212 *insn++ = BPF_MOV64_IMM(ret, 0);
213 return insn - insn_buf;
216 /* Called from eBPF program */
217 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
219 struct bpf_array *array = container_of(map, struct bpf_array, map);
220 u32 index = *(u32 *)key;
222 if (unlikely(index >= array->map.max_entries))
223 return NULL;
225 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
228 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
230 struct bpf_array *array = container_of(map, struct bpf_array, map);
231 u32 index = *(u32 *)key;
232 void __percpu *pptr;
233 int cpu, off = 0;
234 u32 size;
236 if (unlikely(index >= array->map.max_entries))
237 return -ENOENT;
239 /* per_cpu areas are zero-filled and bpf programs can only
240 * access 'value_size' of them, so copying rounded areas
241 * will not leak any kernel data
243 size = round_up(map->value_size, 8);
244 rcu_read_lock();
245 pptr = array->pptrs[index & array->index_mask];
246 for_each_possible_cpu(cpu) {
247 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
248 off += size;
250 rcu_read_unlock();
251 return 0;
254 /* Called from syscall */
255 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
257 struct bpf_array *array = container_of(map, struct bpf_array, map);
258 u32 index = key ? *(u32 *)key : U32_MAX;
259 u32 *next = (u32 *)next_key;
261 if (index >= array->map.max_entries) {
262 *next = 0;
263 return 0;
266 if (index == array->map.max_entries - 1)
267 return -ENOENT;
269 *next = index + 1;
270 return 0;
273 /* Called from syscall or from eBPF program */
274 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
275 u64 map_flags)
277 struct bpf_array *array = container_of(map, struct bpf_array, map);
278 u32 index = *(u32 *)key;
279 char *val;
281 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
282 /* unknown flags */
283 return -EINVAL;
285 if (unlikely(index >= array->map.max_entries))
286 /* all elements were pre-allocated, cannot insert a new one */
287 return -E2BIG;
289 if (unlikely(map_flags & BPF_NOEXIST))
290 /* all elements already exist */
291 return -EEXIST;
293 if (unlikely((map_flags & BPF_F_LOCK) &&
294 !map_value_has_spin_lock(map)))
295 return -EINVAL;
297 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
298 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
299 value, map->value_size);
300 } else {
301 val = array->value +
302 array->elem_size * (index & array->index_mask);
303 if (map_flags & BPF_F_LOCK)
304 copy_map_value_locked(map, val, value, false);
305 else
306 copy_map_value(map, val, value);
308 return 0;
311 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
312 u64 map_flags)
314 struct bpf_array *array = container_of(map, struct bpf_array, map);
315 u32 index = *(u32 *)key;
316 void __percpu *pptr;
317 int cpu, off = 0;
318 u32 size;
320 if (unlikely(map_flags > BPF_EXIST))
321 /* unknown flags */
322 return -EINVAL;
324 if (unlikely(index >= array->map.max_entries))
325 /* all elements were pre-allocated, cannot insert a new one */
326 return -E2BIG;
328 if (unlikely(map_flags == BPF_NOEXIST))
329 /* all elements already exist */
330 return -EEXIST;
332 /* the user space will provide round_up(value_size, 8) bytes that
333 * will be copied into per-cpu area. bpf programs can only access
334 * value_size of it. During lookup the same extra bytes will be
335 * returned or zeros which were zero-filled by percpu_alloc,
336 * so no kernel data leaks possible
338 size = round_up(map->value_size, 8);
339 rcu_read_lock();
340 pptr = array->pptrs[index & array->index_mask];
341 for_each_possible_cpu(cpu) {
342 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
343 off += size;
345 rcu_read_unlock();
346 return 0;
349 /* Called from syscall or from eBPF program */
350 static int array_map_delete_elem(struct bpf_map *map, void *key)
352 return -EINVAL;
355 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
356 static void array_map_free(struct bpf_map *map)
358 struct bpf_array *array = container_of(map, struct bpf_array, map);
360 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
361 * so the programs (can be more than one that used this map) were
362 * disconnected from events. Wait for outstanding programs to complete
363 * and free the array
365 synchronize_rcu();
367 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
368 bpf_array_free_percpu(array);
370 bpf_map_area_free(array);
373 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
374 struct seq_file *m)
376 void *value;
378 rcu_read_lock();
380 value = array_map_lookup_elem(map, key);
381 if (!value) {
382 rcu_read_unlock();
383 return;
386 if (map->btf_key_type_id)
387 seq_printf(m, "%u: ", *(u32 *)key);
388 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
389 seq_puts(m, "\n");
391 rcu_read_unlock();
394 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
395 struct seq_file *m)
397 struct bpf_array *array = container_of(map, struct bpf_array, map);
398 u32 index = *(u32 *)key;
399 void __percpu *pptr;
400 int cpu;
402 rcu_read_lock();
404 seq_printf(m, "%u: {\n", *(u32 *)key);
405 pptr = array->pptrs[index & array->index_mask];
406 for_each_possible_cpu(cpu) {
407 seq_printf(m, "\tcpu%d: ", cpu);
408 btf_type_seq_show(map->btf, map->btf_value_type_id,
409 per_cpu_ptr(pptr, cpu), m);
410 seq_puts(m, "\n");
412 seq_puts(m, "}\n");
414 rcu_read_unlock();
417 static int array_map_check_btf(const struct bpf_map *map,
418 const struct btf *btf,
419 const struct btf_type *key_type,
420 const struct btf_type *value_type)
422 u32 int_data;
424 /* One exception for keyless BTF: .bss/.data/.rodata map */
425 if (btf_type_is_void(key_type)) {
426 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
427 map->max_entries != 1)
428 return -EINVAL;
430 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
431 return -EINVAL;
433 return 0;
436 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
437 return -EINVAL;
439 int_data = *(u32 *)(key_type + 1);
440 /* bpf array can only take a u32 key. This check makes sure
441 * that the btf matches the attr used during map_create.
443 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
444 return -EINVAL;
446 return 0;
449 const struct bpf_map_ops array_map_ops = {
450 .map_alloc_check = array_map_alloc_check,
451 .map_alloc = array_map_alloc,
452 .map_free = array_map_free,
453 .map_get_next_key = array_map_get_next_key,
454 .map_lookup_elem = array_map_lookup_elem,
455 .map_update_elem = array_map_update_elem,
456 .map_delete_elem = array_map_delete_elem,
457 .map_gen_lookup = array_map_gen_lookup,
458 .map_direct_value_addr = array_map_direct_value_addr,
459 .map_direct_value_meta = array_map_direct_value_meta,
460 .map_seq_show_elem = array_map_seq_show_elem,
461 .map_check_btf = array_map_check_btf,
464 const struct bpf_map_ops percpu_array_map_ops = {
465 .map_alloc_check = array_map_alloc_check,
466 .map_alloc = array_map_alloc,
467 .map_free = array_map_free,
468 .map_get_next_key = array_map_get_next_key,
469 .map_lookup_elem = percpu_array_map_lookup_elem,
470 .map_update_elem = array_map_update_elem,
471 .map_delete_elem = array_map_delete_elem,
472 .map_seq_show_elem = percpu_array_map_seq_show_elem,
473 .map_check_btf = array_map_check_btf,
476 static int fd_array_map_alloc_check(union bpf_attr *attr)
478 /* only file descriptors can be stored in this type of map */
479 if (attr->value_size != sizeof(u32))
480 return -EINVAL;
481 /* Program read-only/write-only not supported for special maps yet. */
482 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
483 return -EINVAL;
484 return array_map_alloc_check(attr);
487 static void fd_array_map_free(struct bpf_map *map)
489 struct bpf_array *array = container_of(map, struct bpf_array, map);
490 int i;
492 synchronize_rcu();
494 /* make sure it's empty */
495 for (i = 0; i < array->map.max_entries; i++)
496 BUG_ON(array->ptrs[i] != NULL);
498 bpf_map_area_free(array);
501 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
503 return ERR_PTR(-EOPNOTSUPP);
506 /* only called from syscall */
507 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
509 void **elem, *ptr;
510 int ret = 0;
512 if (!map->ops->map_fd_sys_lookup_elem)
513 return -ENOTSUPP;
515 rcu_read_lock();
516 elem = array_map_lookup_elem(map, key);
517 if (elem && (ptr = READ_ONCE(*elem)))
518 *value = map->ops->map_fd_sys_lookup_elem(ptr);
519 else
520 ret = -ENOENT;
521 rcu_read_unlock();
523 return ret;
526 /* only called from syscall */
527 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
528 void *key, void *value, u64 map_flags)
530 struct bpf_array *array = container_of(map, struct bpf_array, map);
531 void *new_ptr, *old_ptr;
532 u32 index = *(u32 *)key, ufd;
534 if (map_flags != BPF_ANY)
535 return -EINVAL;
537 if (index >= array->map.max_entries)
538 return -E2BIG;
540 ufd = *(u32 *)value;
541 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
542 if (IS_ERR(new_ptr))
543 return PTR_ERR(new_ptr);
545 old_ptr = xchg(array->ptrs + index, new_ptr);
546 if (old_ptr)
547 map->ops->map_fd_put_ptr(old_ptr);
549 return 0;
552 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
554 struct bpf_array *array = container_of(map, struct bpf_array, map);
555 void *old_ptr;
556 u32 index = *(u32 *)key;
558 if (index >= array->map.max_entries)
559 return -E2BIG;
561 old_ptr = xchg(array->ptrs + index, NULL);
562 if (old_ptr) {
563 map->ops->map_fd_put_ptr(old_ptr);
564 return 0;
565 } else {
566 return -ENOENT;
570 static void *prog_fd_array_get_ptr(struct bpf_map *map,
571 struct file *map_file, int fd)
573 struct bpf_array *array = container_of(map, struct bpf_array, map);
574 struct bpf_prog *prog = bpf_prog_get(fd);
576 if (IS_ERR(prog))
577 return prog;
579 if (!bpf_prog_array_compatible(array, prog)) {
580 bpf_prog_put(prog);
581 return ERR_PTR(-EINVAL);
584 return prog;
587 static void prog_fd_array_put_ptr(void *ptr)
589 bpf_prog_put(ptr);
592 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
594 return ((struct bpf_prog *)ptr)->aux->id;
597 /* decrement refcnt of all bpf_progs that are stored in this map */
598 static void bpf_fd_array_map_clear(struct bpf_map *map)
600 struct bpf_array *array = container_of(map, struct bpf_array, map);
601 int i;
603 for (i = 0; i < array->map.max_entries; i++)
604 fd_array_map_delete_elem(map, &i);
607 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
608 struct seq_file *m)
610 void **elem, *ptr;
611 u32 prog_id;
613 rcu_read_lock();
615 elem = array_map_lookup_elem(map, key);
616 if (elem) {
617 ptr = READ_ONCE(*elem);
618 if (ptr) {
619 seq_printf(m, "%u: ", *(u32 *)key);
620 prog_id = prog_fd_array_sys_lookup_elem(ptr);
621 btf_type_seq_show(map->btf, map->btf_value_type_id,
622 &prog_id, m);
623 seq_puts(m, "\n");
627 rcu_read_unlock();
630 const struct bpf_map_ops prog_array_map_ops = {
631 .map_alloc_check = fd_array_map_alloc_check,
632 .map_alloc = array_map_alloc,
633 .map_free = fd_array_map_free,
634 .map_get_next_key = array_map_get_next_key,
635 .map_lookup_elem = fd_array_map_lookup_elem,
636 .map_delete_elem = fd_array_map_delete_elem,
637 .map_fd_get_ptr = prog_fd_array_get_ptr,
638 .map_fd_put_ptr = prog_fd_array_put_ptr,
639 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
640 .map_release_uref = bpf_fd_array_map_clear,
641 .map_seq_show_elem = prog_array_map_seq_show_elem,
644 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
645 struct file *map_file)
647 struct bpf_event_entry *ee;
649 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
650 if (ee) {
651 ee->event = perf_file->private_data;
652 ee->perf_file = perf_file;
653 ee->map_file = map_file;
656 return ee;
659 static void __bpf_event_entry_free(struct rcu_head *rcu)
661 struct bpf_event_entry *ee;
663 ee = container_of(rcu, struct bpf_event_entry, rcu);
664 fput(ee->perf_file);
665 kfree(ee);
668 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
670 call_rcu(&ee->rcu, __bpf_event_entry_free);
673 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
674 struct file *map_file, int fd)
676 struct bpf_event_entry *ee;
677 struct perf_event *event;
678 struct file *perf_file;
679 u64 value;
681 perf_file = perf_event_get(fd);
682 if (IS_ERR(perf_file))
683 return perf_file;
685 ee = ERR_PTR(-EOPNOTSUPP);
686 event = perf_file->private_data;
687 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
688 goto err_out;
690 ee = bpf_event_entry_gen(perf_file, map_file);
691 if (ee)
692 return ee;
693 ee = ERR_PTR(-ENOMEM);
694 err_out:
695 fput(perf_file);
696 return ee;
699 static void perf_event_fd_array_put_ptr(void *ptr)
701 bpf_event_entry_free_rcu(ptr);
704 static void perf_event_fd_array_release(struct bpf_map *map,
705 struct file *map_file)
707 struct bpf_array *array = container_of(map, struct bpf_array, map);
708 struct bpf_event_entry *ee;
709 int i;
711 rcu_read_lock();
712 for (i = 0; i < array->map.max_entries; i++) {
713 ee = READ_ONCE(array->ptrs[i]);
714 if (ee && ee->map_file == map_file)
715 fd_array_map_delete_elem(map, &i);
717 rcu_read_unlock();
720 const struct bpf_map_ops perf_event_array_map_ops = {
721 .map_alloc_check = fd_array_map_alloc_check,
722 .map_alloc = array_map_alloc,
723 .map_free = fd_array_map_free,
724 .map_get_next_key = array_map_get_next_key,
725 .map_lookup_elem = fd_array_map_lookup_elem,
726 .map_delete_elem = fd_array_map_delete_elem,
727 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
728 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
729 .map_release = perf_event_fd_array_release,
730 .map_check_btf = map_check_no_btf,
733 #ifdef CONFIG_CGROUPS
734 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
735 struct file *map_file /* not used */,
736 int fd)
738 return cgroup_get_from_fd(fd);
741 static void cgroup_fd_array_put_ptr(void *ptr)
743 /* cgroup_put free cgrp after a rcu grace period */
744 cgroup_put(ptr);
747 static void cgroup_fd_array_free(struct bpf_map *map)
749 bpf_fd_array_map_clear(map);
750 fd_array_map_free(map);
753 const struct bpf_map_ops cgroup_array_map_ops = {
754 .map_alloc_check = fd_array_map_alloc_check,
755 .map_alloc = array_map_alloc,
756 .map_free = cgroup_fd_array_free,
757 .map_get_next_key = array_map_get_next_key,
758 .map_lookup_elem = fd_array_map_lookup_elem,
759 .map_delete_elem = fd_array_map_delete_elem,
760 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
761 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
762 .map_check_btf = map_check_no_btf,
764 #endif
766 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
768 struct bpf_map *map, *inner_map_meta;
770 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
771 if (IS_ERR(inner_map_meta))
772 return inner_map_meta;
774 map = array_map_alloc(attr);
775 if (IS_ERR(map)) {
776 bpf_map_meta_free(inner_map_meta);
777 return map;
780 map->inner_map_meta = inner_map_meta;
782 return map;
785 static void array_of_map_free(struct bpf_map *map)
787 /* map->inner_map_meta is only accessed by syscall which
788 * is protected by fdget/fdput.
790 bpf_map_meta_free(map->inner_map_meta);
791 bpf_fd_array_map_clear(map);
792 fd_array_map_free(map);
795 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
797 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
799 if (!inner_map)
800 return NULL;
802 return READ_ONCE(*inner_map);
805 static u32 array_of_map_gen_lookup(struct bpf_map *map,
806 struct bpf_insn *insn_buf)
808 struct bpf_array *array = container_of(map, struct bpf_array, map);
809 u32 elem_size = round_up(map->value_size, 8);
810 struct bpf_insn *insn = insn_buf;
811 const int ret = BPF_REG_0;
812 const int map_ptr = BPF_REG_1;
813 const int index = BPF_REG_2;
815 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
816 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
817 if (map->unpriv_array) {
818 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
819 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
820 } else {
821 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
823 if (is_power_of_2(elem_size))
824 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
825 else
826 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
827 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
828 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
829 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
830 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
831 *insn++ = BPF_MOV64_IMM(ret, 0);
833 return insn - insn_buf;
836 const struct bpf_map_ops array_of_maps_map_ops = {
837 .map_alloc_check = fd_array_map_alloc_check,
838 .map_alloc = array_of_map_alloc,
839 .map_free = array_of_map_free,
840 .map_get_next_key = array_map_get_next_key,
841 .map_lookup_elem = array_of_map_lookup_elem,
842 .map_delete_elem = fd_array_map_delete_elem,
843 .map_fd_get_ptr = bpf_map_fd_get_ptr,
844 .map_fd_put_ptr = bpf_map_fd_put_ptr,
845 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
846 .map_gen_lookup = array_of_map_gen_lookup,
847 .map_check_btf = map_check_no_btf,