1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
20 #include "map_in_map.h"
22 #define ARRAY_CREATE_FLAG_MASK \
23 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
25 static void bpf_array_free_percpu(struct bpf_array
*array
)
29 for (i
= 0; i
< array
->map
.max_entries
; i
++)
30 free_percpu(array
->pptrs
[i
]);
33 static int bpf_array_alloc_percpu(struct bpf_array
*array
)
38 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
39 ptr
= __alloc_percpu_gfp(array
->elem_size
, 8,
40 GFP_USER
| __GFP_NOWARN
);
42 bpf_array_free_percpu(array
);
45 array
->pptrs
[i
] = ptr
;
51 /* Called from syscall */
52 static struct bpf_map
*array_map_alloc(union bpf_attr
*attr
)
54 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
55 int numa_node
= bpf_map_attr_numa_node(attr
);
56 u32 elem_size
, index_mask
, max_entries
;
57 bool unpriv
= !capable(CAP_SYS_ADMIN
);
58 struct bpf_array
*array
;
59 u64 array_size
, mask64
;
61 /* check sanity of attributes */
62 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
63 attr
->value_size
== 0 ||
64 attr
->map_flags
& ~ARRAY_CREATE_FLAG_MASK
||
65 (percpu
&& numa_node
!= NUMA_NO_NODE
))
66 return ERR_PTR(-EINVAL
);
68 if (attr
->value_size
> KMALLOC_MAX_SIZE
)
69 /* if value_size is bigger, the user space won't be able to
70 * access the elements.
72 return ERR_PTR(-E2BIG
);
74 elem_size
= round_up(attr
->value_size
, 8);
76 max_entries
= attr
->max_entries
;
78 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
79 * upper most bit set in u32 space is undefined behavior due to
80 * resulting 1U << 32, so do it manually here in u64 space.
82 mask64
= fls_long(max_entries
- 1);
83 mask64
= 1ULL << mask64
;
88 /* round up array size to nearest power of 2,
89 * since cpu will speculate within index_mask limits
91 max_entries
= index_mask
+ 1;
92 /* Check for overflows. */
93 if (max_entries
< attr
->max_entries
)
94 return ERR_PTR(-E2BIG
);
97 array_size
= sizeof(*array
);
99 array_size
+= (u64
) max_entries
* sizeof(void *);
101 array_size
+= (u64
) max_entries
* elem_size
;
103 /* make sure there is no u32 overflow later in round_up() */
104 if (array_size
>= U32_MAX
- PAGE_SIZE
)
105 return ERR_PTR(-ENOMEM
);
107 /* allocate all map elements and zero-initialize them */
108 array
= bpf_map_area_alloc(array_size
, numa_node
);
110 return ERR_PTR(-ENOMEM
);
111 array
->index_mask
= index_mask
;
112 array
->map
.unpriv_array
= unpriv
;
114 /* copy mandatory map attributes */
115 array
->map
.map_type
= attr
->map_type
;
116 array
->map
.key_size
= attr
->key_size
;
117 array
->map
.value_size
= attr
->value_size
;
118 array
->map
.max_entries
= attr
->max_entries
;
119 array
->map
.map_flags
= attr
->map_flags
;
120 array
->map
.numa_node
= numa_node
;
121 array
->elem_size
= elem_size
;
126 array_size
+= (u64
) attr
->max_entries
* elem_size
* num_possible_cpus();
128 if (array_size
>= U32_MAX
- PAGE_SIZE
||
129 bpf_array_alloc_percpu(array
)) {
130 bpf_map_area_free(array
);
131 return ERR_PTR(-ENOMEM
);
134 array
->map
.pages
= round_up(array_size
, PAGE_SIZE
) >> PAGE_SHIFT
;
139 /* Called from syscall or from eBPF program */
140 static void *array_map_lookup_elem(struct bpf_map
*map
, void *key
)
142 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
143 u32 index
= *(u32
*)key
;
145 if (unlikely(index
>= array
->map
.max_entries
))
148 return array
->value
+ array
->elem_size
* (index
& array
->index_mask
);
151 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
152 static u32
array_map_gen_lookup(struct bpf_map
*map
, struct bpf_insn
*insn_buf
)
154 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
155 struct bpf_insn
*insn
= insn_buf
;
156 u32 elem_size
= round_up(map
->value_size
, 8);
157 const int ret
= BPF_REG_0
;
158 const int map_ptr
= BPF_REG_1
;
159 const int index
= BPF_REG_2
;
161 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
162 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
163 if (map
->unpriv_array
) {
164 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 4);
165 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
167 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 3);
170 if (is_power_of_2(elem_size
)) {
171 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
173 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
175 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
176 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
177 *insn
++ = BPF_MOV64_IMM(ret
, 0);
178 return insn
- insn_buf
;
181 /* Called from eBPF program */
182 static void *percpu_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
184 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
185 u32 index
= *(u32
*)key
;
187 if (unlikely(index
>= array
->map
.max_entries
))
190 return this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]);
193 int bpf_percpu_array_copy(struct bpf_map
*map
, void *key
, void *value
)
195 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
196 u32 index
= *(u32
*)key
;
201 if (unlikely(index
>= array
->map
.max_entries
))
204 /* per_cpu areas are zero-filled and bpf programs can only
205 * access 'value_size' of them, so copying rounded areas
206 * will not leak any kernel data
208 size
= round_up(map
->value_size
, 8);
210 pptr
= array
->pptrs
[index
& array
->index_mask
];
211 for_each_possible_cpu(cpu
) {
212 bpf_long_memcpy(value
+ off
, per_cpu_ptr(pptr
, cpu
), size
);
219 /* Called from syscall */
220 static int array_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
222 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
223 u32 index
= key
? *(u32
*)key
: U32_MAX
;
224 u32
*next
= (u32
*)next_key
;
226 if (index
>= array
->map
.max_entries
) {
231 if (index
== array
->map
.max_entries
- 1)
238 /* Called from syscall or from eBPF program */
239 static int array_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
242 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
243 u32 index
= *(u32
*)key
;
245 if (unlikely(map_flags
> BPF_EXIST
))
249 if (unlikely(index
>= array
->map
.max_entries
))
250 /* all elements were pre-allocated, cannot insert a new one */
253 if (unlikely(map_flags
== BPF_NOEXIST
))
254 /* all elements already exist */
257 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
258 memcpy(this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]),
259 value
, map
->value_size
);
261 memcpy(array
->value
+
262 array
->elem_size
* (index
& array
->index_mask
),
263 value
, map
->value_size
);
267 int bpf_percpu_array_update(struct bpf_map
*map
, void *key
, void *value
,
270 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
271 u32 index
= *(u32
*)key
;
276 if (unlikely(map_flags
> BPF_EXIST
))
280 if (unlikely(index
>= array
->map
.max_entries
))
281 /* all elements were pre-allocated, cannot insert a new one */
284 if (unlikely(map_flags
== BPF_NOEXIST
))
285 /* all elements already exist */
288 /* the user space will provide round_up(value_size, 8) bytes that
289 * will be copied into per-cpu area. bpf programs can only access
290 * value_size of it. During lookup the same extra bytes will be
291 * returned or zeros which were zero-filled by percpu_alloc,
292 * so no kernel data leaks possible
294 size
= round_up(map
->value_size
, 8);
296 pptr
= array
->pptrs
[index
& array
->index_mask
];
297 for_each_possible_cpu(cpu
) {
298 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
), value
+ off
, size
);
305 /* Called from syscall or from eBPF program */
306 static int array_map_delete_elem(struct bpf_map
*map
, void *key
)
311 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
312 static void array_map_free(struct bpf_map
*map
)
314 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
316 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
317 * so the programs (can be more than one that used this map) were
318 * disconnected from events. Wait for outstanding programs to complete
323 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
324 bpf_array_free_percpu(array
);
326 bpf_map_area_free(array
);
329 const struct bpf_map_ops array_map_ops
= {
330 .map_alloc
= array_map_alloc
,
331 .map_free
= array_map_free
,
332 .map_get_next_key
= array_map_get_next_key
,
333 .map_lookup_elem
= array_map_lookup_elem
,
334 .map_update_elem
= array_map_update_elem
,
335 .map_delete_elem
= array_map_delete_elem
,
336 .map_gen_lookup
= array_map_gen_lookup
,
339 const struct bpf_map_ops percpu_array_map_ops
= {
340 .map_alloc
= array_map_alloc
,
341 .map_free
= array_map_free
,
342 .map_get_next_key
= array_map_get_next_key
,
343 .map_lookup_elem
= percpu_array_map_lookup_elem
,
344 .map_update_elem
= array_map_update_elem
,
345 .map_delete_elem
= array_map_delete_elem
,
348 static struct bpf_map
*fd_array_map_alloc(union bpf_attr
*attr
)
350 /* only file descriptors can be stored in this type of map */
351 if (attr
->value_size
!= sizeof(u32
))
352 return ERR_PTR(-EINVAL
);
353 return array_map_alloc(attr
);
356 static void fd_array_map_free(struct bpf_map
*map
)
358 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
363 /* make sure it's empty */
364 for (i
= 0; i
< array
->map
.max_entries
; i
++)
365 BUG_ON(array
->ptrs
[i
] != NULL
);
367 bpf_map_area_free(array
);
370 static void *fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
375 /* only called from syscall */
376 int bpf_fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
)
381 if (!map
->ops
->map_fd_sys_lookup_elem
)
385 elem
= array_map_lookup_elem(map
, key
);
386 if (elem
&& (ptr
= READ_ONCE(*elem
)))
387 *value
= map
->ops
->map_fd_sys_lookup_elem(ptr
);
395 /* only called from syscall */
396 int bpf_fd_array_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
397 void *key
, void *value
, u64 map_flags
)
399 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
400 void *new_ptr
, *old_ptr
;
401 u32 index
= *(u32
*)key
, ufd
;
403 if (map_flags
!= BPF_ANY
)
406 if (index
>= array
->map
.max_entries
)
410 new_ptr
= map
->ops
->map_fd_get_ptr(map
, map_file
, ufd
);
412 return PTR_ERR(new_ptr
);
414 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
416 map
->ops
->map_fd_put_ptr(old_ptr
);
421 static int fd_array_map_delete_elem(struct bpf_map
*map
, void *key
)
423 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
425 u32 index
= *(u32
*)key
;
427 if (index
>= array
->map
.max_entries
)
430 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
432 map
->ops
->map_fd_put_ptr(old_ptr
);
439 static void *prog_fd_array_get_ptr(struct bpf_map
*map
,
440 struct file
*map_file
, int fd
)
442 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
443 struct bpf_prog
*prog
= bpf_prog_get(fd
);
448 if (!bpf_prog_array_compatible(array
, prog
)) {
450 return ERR_PTR(-EINVAL
);
456 static void prog_fd_array_put_ptr(void *ptr
)
461 static u32
prog_fd_array_sys_lookup_elem(void *ptr
)
463 return ((struct bpf_prog
*)ptr
)->aux
->id
;
466 /* decrement refcnt of all bpf_progs that are stored in this map */
467 void bpf_fd_array_map_clear(struct bpf_map
*map
)
469 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
472 for (i
= 0; i
< array
->map
.max_entries
; i
++)
473 fd_array_map_delete_elem(map
, &i
);
476 const struct bpf_map_ops prog_array_map_ops
= {
477 .map_alloc
= fd_array_map_alloc
,
478 .map_free
= fd_array_map_free
,
479 .map_get_next_key
= array_map_get_next_key
,
480 .map_lookup_elem
= fd_array_map_lookup_elem
,
481 .map_delete_elem
= fd_array_map_delete_elem
,
482 .map_fd_get_ptr
= prog_fd_array_get_ptr
,
483 .map_fd_put_ptr
= prog_fd_array_put_ptr
,
484 .map_fd_sys_lookup_elem
= prog_fd_array_sys_lookup_elem
,
487 static struct bpf_event_entry
*bpf_event_entry_gen(struct file
*perf_file
,
488 struct file
*map_file
)
490 struct bpf_event_entry
*ee
;
492 ee
= kzalloc(sizeof(*ee
), GFP_ATOMIC
);
494 ee
->event
= perf_file
->private_data
;
495 ee
->perf_file
= perf_file
;
496 ee
->map_file
= map_file
;
502 static void __bpf_event_entry_free(struct rcu_head
*rcu
)
504 struct bpf_event_entry
*ee
;
506 ee
= container_of(rcu
, struct bpf_event_entry
, rcu
);
511 static void bpf_event_entry_free_rcu(struct bpf_event_entry
*ee
)
513 call_rcu(&ee
->rcu
, __bpf_event_entry_free
);
516 static void *perf_event_fd_array_get_ptr(struct bpf_map
*map
,
517 struct file
*map_file
, int fd
)
519 struct bpf_event_entry
*ee
;
520 struct perf_event
*event
;
521 struct file
*perf_file
;
524 perf_file
= perf_event_get(fd
);
525 if (IS_ERR(perf_file
))
528 ee
= ERR_PTR(-EOPNOTSUPP
);
529 event
= perf_file
->private_data
;
530 if (perf_event_read_local(event
, &value
, NULL
, NULL
) == -EOPNOTSUPP
)
533 ee
= bpf_event_entry_gen(perf_file
, map_file
);
536 ee
= ERR_PTR(-ENOMEM
);
542 static void perf_event_fd_array_put_ptr(void *ptr
)
544 bpf_event_entry_free_rcu(ptr
);
547 static void perf_event_fd_array_release(struct bpf_map
*map
,
548 struct file
*map_file
)
550 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
551 struct bpf_event_entry
*ee
;
555 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
556 ee
= READ_ONCE(array
->ptrs
[i
]);
557 if (ee
&& ee
->map_file
== map_file
)
558 fd_array_map_delete_elem(map
, &i
);
563 const struct bpf_map_ops perf_event_array_map_ops
= {
564 .map_alloc
= fd_array_map_alloc
,
565 .map_free
= fd_array_map_free
,
566 .map_get_next_key
= array_map_get_next_key
,
567 .map_lookup_elem
= fd_array_map_lookup_elem
,
568 .map_delete_elem
= fd_array_map_delete_elem
,
569 .map_fd_get_ptr
= perf_event_fd_array_get_ptr
,
570 .map_fd_put_ptr
= perf_event_fd_array_put_ptr
,
571 .map_release
= perf_event_fd_array_release
,
574 #ifdef CONFIG_CGROUPS
575 static void *cgroup_fd_array_get_ptr(struct bpf_map
*map
,
576 struct file
*map_file
/* not used */,
579 return cgroup_get_from_fd(fd
);
582 static void cgroup_fd_array_put_ptr(void *ptr
)
584 /* cgroup_put free cgrp after a rcu grace period */
588 static void cgroup_fd_array_free(struct bpf_map
*map
)
590 bpf_fd_array_map_clear(map
);
591 fd_array_map_free(map
);
594 const struct bpf_map_ops cgroup_array_map_ops
= {
595 .map_alloc
= fd_array_map_alloc
,
596 .map_free
= cgroup_fd_array_free
,
597 .map_get_next_key
= array_map_get_next_key
,
598 .map_lookup_elem
= fd_array_map_lookup_elem
,
599 .map_delete_elem
= fd_array_map_delete_elem
,
600 .map_fd_get_ptr
= cgroup_fd_array_get_ptr
,
601 .map_fd_put_ptr
= cgroup_fd_array_put_ptr
,
605 static struct bpf_map
*array_of_map_alloc(union bpf_attr
*attr
)
607 struct bpf_map
*map
, *inner_map_meta
;
609 inner_map_meta
= bpf_map_meta_alloc(attr
->inner_map_fd
);
610 if (IS_ERR(inner_map_meta
))
611 return inner_map_meta
;
613 map
= fd_array_map_alloc(attr
);
615 bpf_map_meta_free(inner_map_meta
);
619 map
->inner_map_meta
= inner_map_meta
;
624 static void array_of_map_free(struct bpf_map
*map
)
626 /* map->inner_map_meta is only accessed by syscall which
627 * is protected by fdget/fdput.
629 bpf_map_meta_free(map
->inner_map_meta
);
630 bpf_fd_array_map_clear(map
);
631 fd_array_map_free(map
);
634 static void *array_of_map_lookup_elem(struct bpf_map
*map
, void *key
)
636 struct bpf_map
**inner_map
= array_map_lookup_elem(map
, key
);
641 return READ_ONCE(*inner_map
);
644 static u32
array_of_map_gen_lookup(struct bpf_map
*map
,
645 struct bpf_insn
*insn_buf
)
647 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
648 u32 elem_size
= round_up(map
->value_size
, 8);
649 struct bpf_insn
*insn
= insn_buf
;
650 const int ret
= BPF_REG_0
;
651 const int map_ptr
= BPF_REG_1
;
652 const int index
= BPF_REG_2
;
654 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, map_ptr
, offsetof(struct bpf_array
, value
));
655 *insn
++ = BPF_LDX_MEM(BPF_W
, ret
, index
, 0);
656 if (map
->unpriv_array
) {
657 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 6);
658 *insn
++ = BPF_ALU32_IMM(BPF_AND
, ret
, array
->index_mask
);
660 *insn
++ = BPF_JMP_IMM(BPF_JGE
, ret
, map
->max_entries
, 5);
662 if (is_power_of_2(elem_size
))
663 *insn
++ = BPF_ALU64_IMM(BPF_LSH
, ret
, ilog2(elem_size
));
665 *insn
++ = BPF_ALU64_IMM(BPF_MUL
, ret
, elem_size
);
666 *insn
++ = BPF_ALU64_REG(BPF_ADD
, ret
, map_ptr
);
667 *insn
++ = BPF_LDX_MEM(BPF_DW
, ret
, ret
, 0);
668 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, ret
, 0, 1);
669 *insn
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
670 *insn
++ = BPF_MOV64_IMM(ret
, 0);
672 return insn
- insn_buf
;
675 const struct bpf_map_ops array_of_maps_map_ops
= {
676 .map_alloc
= array_of_map_alloc
,
677 .map_free
= array_of_map_free
,
678 .map_get_next_key
= array_map_get_next_key
,
679 .map_lookup_elem
= array_of_map_lookup_elem
,
680 .map_delete_elem
= fd_array_map_delete_elem
,
681 .map_fd_get_ptr
= bpf_map_fd_get_ptr
,
682 .map_fd_put_ptr
= bpf_map_fd_put_ptr
,
683 .map_fd_sys_lookup_elem
= bpf_map_fd_sys_lookup_elem
,
684 .map_gen_lookup
= array_of_map_gen_lookup
,