ARM: dts: stm32: add gadget fifo sizes to usbotg_hs on stm32f746
[linux/fpc-iii.git] / kernel / bpf / arraymap.c
blobb1f66480135b3d8e21fedef41e71e0dfb3ba01c1
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
20 #include "map_in_map.h"
22 #define ARRAY_CREATE_FLAG_MASK \
23 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
25 static void bpf_array_free_percpu(struct bpf_array *array)
27 int i;
29 for (i = 0; i < array->map.max_entries; i++)
30 free_percpu(array->pptrs[i]);
33 static int bpf_array_alloc_percpu(struct bpf_array *array)
35 void __percpu *ptr;
36 int i;
38 for (i = 0; i < array->map.max_entries; i++) {
39 ptr = __alloc_percpu_gfp(array->elem_size, 8,
40 GFP_USER | __GFP_NOWARN);
41 if (!ptr) {
42 bpf_array_free_percpu(array);
43 return -ENOMEM;
45 array->pptrs[i] = ptr;
48 return 0;
51 /* Called from syscall */
52 static int array_map_alloc_check(union bpf_attr *attr)
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 int numa_node = bpf_map_attr_numa_node(attr);
57 /* check sanity of attributes */
58 if (attr->max_entries == 0 || attr->key_size != 4 ||
59 attr->value_size == 0 ||
60 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61 (percpu && numa_node != NUMA_NO_NODE))
62 return -EINVAL;
64 if (attr->value_size > KMALLOC_MAX_SIZE)
65 /* if value_size is bigger, the user space won't be able to
66 * access the elements.
68 return -E2BIG;
70 return 0;
73 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
75 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
76 int numa_node = bpf_map_attr_numa_node(attr);
77 u32 elem_size, index_mask, max_entries;
78 bool unpriv = !capable(CAP_SYS_ADMIN);
79 struct bpf_array *array;
80 u64 array_size, mask64;
82 elem_size = round_up(attr->value_size, 8);
84 max_entries = attr->max_entries;
86 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
87 * upper most bit set in u32 space is undefined behavior due to
88 * resulting 1U << 32, so do it manually here in u64 space.
90 mask64 = fls_long(max_entries - 1);
91 mask64 = 1ULL << mask64;
92 mask64 -= 1;
94 index_mask = mask64;
95 if (unpriv) {
96 /* round up array size to nearest power of 2,
97 * since cpu will speculate within index_mask limits
99 max_entries = index_mask + 1;
100 /* Check for overflows. */
101 if (max_entries < attr->max_entries)
102 return ERR_PTR(-E2BIG);
105 array_size = sizeof(*array);
106 if (percpu)
107 array_size += (u64) max_entries * sizeof(void *);
108 else
109 array_size += (u64) max_entries * elem_size;
111 /* make sure there is no u32 overflow later in round_up() */
112 if (array_size >= U32_MAX - PAGE_SIZE)
113 return ERR_PTR(-ENOMEM);
115 /* allocate all map elements and zero-initialize them */
116 array = bpf_map_area_alloc(array_size, numa_node);
117 if (!array)
118 return ERR_PTR(-ENOMEM);
119 array->index_mask = index_mask;
120 array->map.unpriv_array = unpriv;
122 /* copy mandatory map attributes */
123 bpf_map_init_from_attr(&array->map, attr);
124 array->elem_size = elem_size;
126 if (!percpu)
127 goto out;
129 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
131 if (array_size >= U32_MAX - PAGE_SIZE ||
132 bpf_array_alloc_percpu(array)) {
133 bpf_map_area_free(array);
134 return ERR_PTR(-ENOMEM);
136 out:
137 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
139 return &array->map;
142 /* Called from syscall or from eBPF program */
143 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
145 struct bpf_array *array = container_of(map, struct bpf_array, map);
146 u32 index = *(u32 *)key;
148 if (unlikely(index >= array->map.max_entries))
149 return NULL;
151 return array->value + array->elem_size * (index & array->index_mask);
154 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
155 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
157 struct bpf_array *array = container_of(map, struct bpf_array, map);
158 struct bpf_insn *insn = insn_buf;
159 u32 elem_size = round_up(map->value_size, 8);
160 const int ret = BPF_REG_0;
161 const int map_ptr = BPF_REG_1;
162 const int index = BPF_REG_2;
164 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
165 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
166 if (map->unpriv_array) {
167 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
168 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
169 } else {
170 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
173 if (is_power_of_2(elem_size)) {
174 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
175 } else {
176 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
178 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
179 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
180 *insn++ = BPF_MOV64_IMM(ret, 0);
181 return insn - insn_buf;
184 /* Called from eBPF program */
185 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
187 struct bpf_array *array = container_of(map, struct bpf_array, map);
188 u32 index = *(u32 *)key;
190 if (unlikely(index >= array->map.max_entries))
191 return NULL;
193 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
196 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
198 struct bpf_array *array = container_of(map, struct bpf_array, map);
199 u32 index = *(u32 *)key;
200 void __percpu *pptr;
201 int cpu, off = 0;
202 u32 size;
204 if (unlikely(index >= array->map.max_entries))
205 return -ENOENT;
207 /* per_cpu areas are zero-filled and bpf programs can only
208 * access 'value_size' of them, so copying rounded areas
209 * will not leak any kernel data
211 size = round_up(map->value_size, 8);
212 rcu_read_lock();
213 pptr = array->pptrs[index & array->index_mask];
214 for_each_possible_cpu(cpu) {
215 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
216 off += size;
218 rcu_read_unlock();
219 return 0;
222 /* Called from syscall */
223 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
225 struct bpf_array *array = container_of(map, struct bpf_array, map);
226 u32 index = key ? *(u32 *)key : U32_MAX;
227 u32 *next = (u32 *)next_key;
229 if (index >= array->map.max_entries) {
230 *next = 0;
231 return 0;
234 if (index == array->map.max_entries - 1)
235 return -ENOENT;
237 *next = index + 1;
238 return 0;
241 /* Called from syscall or from eBPF program */
242 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
243 u64 map_flags)
245 struct bpf_array *array = container_of(map, struct bpf_array, map);
246 u32 index = *(u32 *)key;
248 if (unlikely(map_flags > BPF_EXIST))
249 /* unknown flags */
250 return -EINVAL;
252 if (unlikely(index >= array->map.max_entries))
253 /* all elements were pre-allocated, cannot insert a new one */
254 return -E2BIG;
256 if (unlikely(map_flags == BPF_NOEXIST))
257 /* all elements already exist */
258 return -EEXIST;
260 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
261 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
262 value, map->value_size);
263 else
264 memcpy(array->value +
265 array->elem_size * (index & array->index_mask),
266 value, map->value_size);
267 return 0;
270 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
271 u64 map_flags)
273 struct bpf_array *array = container_of(map, struct bpf_array, map);
274 u32 index = *(u32 *)key;
275 void __percpu *pptr;
276 int cpu, off = 0;
277 u32 size;
279 if (unlikely(map_flags > BPF_EXIST))
280 /* unknown flags */
281 return -EINVAL;
283 if (unlikely(index >= array->map.max_entries))
284 /* all elements were pre-allocated, cannot insert a new one */
285 return -E2BIG;
287 if (unlikely(map_flags == BPF_NOEXIST))
288 /* all elements already exist */
289 return -EEXIST;
291 /* the user space will provide round_up(value_size, 8) bytes that
292 * will be copied into per-cpu area. bpf programs can only access
293 * value_size of it. During lookup the same extra bytes will be
294 * returned or zeros which were zero-filled by percpu_alloc,
295 * so no kernel data leaks possible
297 size = round_up(map->value_size, 8);
298 rcu_read_lock();
299 pptr = array->pptrs[index & array->index_mask];
300 for_each_possible_cpu(cpu) {
301 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
302 off += size;
304 rcu_read_unlock();
305 return 0;
308 /* Called from syscall or from eBPF program */
309 static int array_map_delete_elem(struct bpf_map *map, void *key)
311 return -EINVAL;
314 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
315 static void array_map_free(struct bpf_map *map)
317 struct bpf_array *array = container_of(map, struct bpf_array, map);
319 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
320 * so the programs (can be more than one that used this map) were
321 * disconnected from events. Wait for outstanding programs to complete
322 * and free the array
324 synchronize_rcu();
326 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
327 bpf_array_free_percpu(array);
329 bpf_map_area_free(array);
332 const struct bpf_map_ops array_map_ops = {
333 .map_alloc_check = array_map_alloc_check,
334 .map_alloc = array_map_alloc,
335 .map_free = array_map_free,
336 .map_get_next_key = array_map_get_next_key,
337 .map_lookup_elem = array_map_lookup_elem,
338 .map_update_elem = array_map_update_elem,
339 .map_delete_elem = array_map_delete_elem,
340 .map_gen_lookup = array_map_gen_lookup,
343 const struct bpf_map_ops percpu_array_map_ops = {
344 .map_alloc_check = array_map_alloc_check,
345 .map_alloc = array_map_alloc,
346 .map_free = array_map_free,
347 .map_get_next_key = array_map_get_next_key,
348 .map_lookup_elem = percpu_array_map_lookup_elem,
349 .map_update_elem = array_map_update_elem,
350 .map_delete_elem = array_map_delete_elem,
353 static int fd_array_map_alloc_check(union bpf_attr *attr)
355 /* only file descriptors can be stored in this type of map */
356 if (attr->value_size != sizeof(u32))
357 return -EINVAL;
358 return array_map_alloc_check(attr);
361 static void fd_array_map_free(struct bpf_map *map)
363 struct bpf_array *array = container_of(map, struct bpf_array, map);
364 int i;
366 synchronize_rcu();
368 /* make sure it's empty */
369 for (i = 0; i < array->map.max_entries; i++)
370 BUG_ON(array->ptrs[i] != NULL);
372 bpf_map_area_free(array);
375 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
377 return NULL;
380 /* only called from syscall */
381 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
383 void **elem, *ptr;
384 int ret = 0;
386 if (!map->ops->map_fd_sys_lookup_elem)
387 return -ENOTSUPP;
389 rcu_read_lock();
390 elem = array_map_lookup_elem(map, key);
391 if (elem && (ptr = READ_ONCE(*elem)))
392 *value = map->ops->map_fd_sys_lookup_elem(ptr);
393 else
394 ret = -ENOENT;
395 rcu_read_unlock();
397 return ret;
400 /* only called from syscall */
401 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
402 void *key, void *value, u64 map_flags)
404 struct bpf_array *array = container_of(map, struct bpf_array, map);
405 void *new_ptr, *old_ptr;
406 u32 index = *(u32 *)key, ufd;
408 if (map_flags != BPF_ANY)
409 return -EINVAL;
411 if (index >= array->map.max_entries)
412 return -E2BIG;
414 ufd = *(u32 *)value;
415 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
416 if (IS_ERR(new_ptr))
417 return PTR_ERR(new_ptr);
419 old_ptr = xchg(array->ptrs + index, new_ptr);
420 if (old_ptr)
421 map->ops->map_fd_put_ptr(old_ptr);
423 return 0;
426 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
428 struct bpf_array *array = container_of(map, struct bpf_array, map);
429 void *old_ptr;
430 u32 index = *(u32 *)key;
432 if (index >= array->map.max_entries)
433 return -E2BIG;
435 old_ptr = xchg(array->ptrs + index, NULL);
436 if (old_ptr) {
437 map->ops->map_fd_put_ptr(old_ptr);
438 return 0;
439 } else {
440 return -ENOENT;
444 static void *prog_fd_array_get_ptr(struct bpf_map *map,
445 struct file *map_file, int fd)
447 struct bpf_array *array = container_of(map, struct bpf_array, map);
448 struct bpf_prog *prog = bpf_prog_get(fd);
450 if (IS_ERR(prog))
451 return prog;
453 if (!bpf_prog_array_compatible(array, prog)) {
454 bpf_prog_put(prog);
455 return ERR_PTR(-EINVAL);
458 return prog;
461 static void prog_fd_array_put_ptr(void *ptr)
463 bpf_prog_put(ptr);
466 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
468 return ((struct bpf_prog *)ptr)->aux->id;
471 /* decrement refcnt of all bpf_progs that are stored in this map */
472 void bpf_fd_array_map_clear(struct bpf_map *map)
474 struct bpf_array *array = container_of(map, struct bpf_array, map);
475 int i;
477 for (i = 0; i < array->map.max_entries; i++)
478 fd_array_map_delete_elem(map, &i);
481 const struct bpf_map_ops prog_array_map_ops = {
482 .map_alloc_check = fd_array_map_alloc_check,
483 .map_alloc = array_map_alloc,
484 .map_free = fd_array_map_free,
485 .map_get_next_key = array_map_get_next_key,
486 .map_lookup_elem = fd_array_map_lookup_elem,
487 .map_delete_elem = fd_array_map_delete_elem,
488 .map_fd_get_ptr = prog_fd_array_get_ptr,
489 .map_fd_put_ptr = prog_fd_array_put_ptr,
490 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
493 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
494 struct file *map_file)
496 struct bpf_event_entry *ee;
498 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
499 if (ee) {
500 ee->event = perf_file->private_data;
501 ee->perf_file = perf_file;
502 ee->map_file = map_file;
505 return ee;
508 static void __bpf_event_entry_free(struct rcu_head *rcu)
510 struct bpf_event_entry *ee;
512 ee = container_of(rcu, struct bpf_event_entry, rcu);
513 fput(ee->perf_file);
514 kfree(ee);
517 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
519 call_rcu(&ee->rcu, __bpf_event_entry_free);
522 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
523 struct file *map_file, int fd)
525 struct bpf_event_entry *ee;
526 struct perf_event *event;
527 struct file *perf_file;
528 u64 value;
530 perf_file = perf_event_get(fd);
531 if (IS_ERR(perf_file))
532 return perf_file;
534 ee = ERR_PTR(-EOPNOTSUPP);
535 event = perf_file->private_data;
536 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
537 goto err_out;
539 ee = bpf_event_entry_gen(perf_file, map_file);
540 if (ee)
541 return ee;
542 ee = ERR_PTR(-ENOMEM);
543 err_out:
544 fput(perf_file);
545 return ee;
548 static void perf_event_fd_array_put_ptr(void *ptr)
550 bpf_event_entry_free_rcu(ptr);
553 static void perf_event_fd_array_release(struct bpf_map *map,
554 struct file *map_file)
556 struct bpf_array *array = container_of(map, struct bpf_array, map);
557 struct bpf_event_entry *ee;
558 int i;
560 rcu_read_lock();
561 for (i = 0; i < array->map.max_entries; i++) {
562 ee = READ_ONCE(array->ptrs[i]);
563 if (ee && ee->map_file == map_file)
564 fd_array_map_delete_elem(map, &i);
566 rcu_read_unlock();
569 const struct bpf_map_ops perf_event_array_map_ops = {
570 .map_alloc_check = fd_array_map_alloc_check,
571 .map_alloc = array_map_alloc,
572 .map_free = fd_array_map_free,
573 .map_get_next_key = array_map_get_next_key,
574 .map_lookup_elem = fd_array_map_lookup_elem,
575 .map_delete_elem = fd_array_map_delete_elem,
576 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
577 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
578 .map_release = perf_event_fd_array_release,
581 #ifdef CONFIG_CGROUPS
582 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
583 struct file *map_file /* not used */,
584 int fd)
586 return cgroup_get_from_fd(fd);
589 static void cgroup_fd_array_put_ptr(void *ptr)
591 /* cgroup_put free cgrp after a rcu grace period */
592 cgroup_put(ptr);
595 static void cgroup_fd_array_free(struct bpf_map *map)
597 bpf_fd_array_map_clear(map);
598 fd_array_map_free(map);
601 const struct bpf_map_ops cgroup_array_map_ops = {
602 .map_alloc_check = fd_array_map_alloc_check,
603 .map_alloc = array_map_alloc,
604 .map_free = cgroup_fd_array_free,
605 .map_get_next_key = array_map_get_next_key,
606 .map_lookup_elem = fd_array_map_lookup_elem,
607 .map_delete_elem = fd_array_map_delete_elem,
608 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
609 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
611 #endif
613 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
615 struct bpf_map *map, *inner_map_meta;
617 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
618 if (IS_ERR(inner_map_meta))
619 return inner_map_meta;
621 map = array_map_alloc(attr);
622 if (IS_ERR(map)) {
623 bpf_map_meta_free(inner_map_meta);
624 return map;
627 map->inner_map_meta = inner_map_meta;
629 return map;
632 static void array_of_map_free(struct bpf_map *map)
634 /* map->inner_map_meta is only accessed by syscall which
635 * is protected by fdget/fdput.
637 bpf_map_meta_free(map->inner_map_meta);
638 bpf_fd_array_map_clear(map);
639 fd_array_map_free(map);
642 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
644 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
646 if (!inner_map)
647 return NULL;
649 return READ_ONCE(*inner_map);
652 static u32 array_of_map_gen_lookup(struct bpf_map *map,
653 struct bpf_insn *insn_buf)
655 struct bpf_array *array = container_of(map, struct bpf_array, map);
656 u32 elem_size = round_up(map->value_size, 8);
657 struct bpf_insn *insn = insn_buf;
658 const int ret = BPF_REG_0;
659 const int map_ptr = BPF_REG_1;
660 const int index = BPF_REG_2;
662 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
663 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
664 if (map->unpriv_array) {
665 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
666 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
667 } else {
668 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
670 if (is_power_of_2(elem_size))
671 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
672 else
673 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
674 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
675 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
676 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
677 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
678 *insn++ = BPF_MOV64_IMM(ret, 0);
680 return insn - insn_buf;
683 const struct bpf_map_ops array_of_maps_map_ops = {
684 .map_alloc_check = fd_array_map_alloc_check,
685 .map_alloc = array_of_map_alloc,
686 .map_free = array_of_map_free,
687 .map_get_next_key = array_map_get_next_key,
688 .map_lookup_elem = array_of_map_lookup_elem,
689 .map_delete_elem = fd_array_map_delete_elem,
690 .map_fd_get_ptr = bpf_map_fd_get_ptr,
691 .map_fd_put_ptr = bpf_map_fd_put_ptr,
692 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
693 .map_gen_lookup = array_of_map_gen_lookup,