1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/err.h>
14 #include <linux/slab.h>
16 #include <linux/filter.h>
17 #include <linux/perf_event.h>
19 static void bpf_array_free_percpu(struct bpf_array
*array
)
23 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
24 free_percpu(array
->pptrs
[i
]);
29 static int bpf_array_alloc_percpu(struct bpf_array
*array
)
34 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
35 ptr
= __alloc_percpu_gfp(array
->elem_size
, 8,
36 GFP_USER
| __GFP_NOWARN
);
38 bpf_array_free_percpu(array
);
41 array
->pptrs
[i
] = ptr
;
48 /* Called from syscall */
49 static struct bpf_map
*array_map_alloc(union bpf_attr
*attr
)
51 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
;
52 u32 elem_size
, index_mask
, max_entries
;
53 bool unpriv
= !capable(CAP_SYS_ADMIN
);
54 u64 cost
, array_size
, mask64
;
55 struct bpf_array
*array
;
58 /* check sanity of attributes */
59 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
60 attr
->value_size
== 0 || attr
->map_flags
)
61 return ERR_PTR(-EINVAL
);
63 if (attr
->value_size
>= 1 << (KMALLOC_SHIFT_MAX
- 1))
64 /* if value_size is bigger, the user space won't be able to
65 * access the elements.
67 return ERR_PTR(-E2BIG
);
69 elem_size
= round_up(attr
->value_size
, 8);
71 max_entries
= attr
->max_entries
;
73 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
74 * upper most bit set in u32 space is undefined behavior due to
75 * resulting 1U << 32, so do it manually here in u64 space.
77 mask64
= fls_long(max_entries
- 1);
78 mask64
= 1ULL << mask64
;
83 /* round up array size to nearest power of 2,
84 * since cpu will speculate within index_mask limits
86 max_entries
= index_mask
+ 1;
87 /* Check for overflows. */
88 if (max_entries
< attr
->max_entries
)
89 return ERR_PTR(-E2BIG
);
92 array_size
= sizeof(*array
);
94 array_size
+= (u64
) max_entries
* sizeof(void *);
96 array_size
+= (u64
) max_entries
* elem_size
;
98 /* make sure there is no u32 overflow later in round_up() */
100 if (cost
>= U32_MAX
- PAGE_SIZE
)
101 return ERR_PTR(-ENOMEM
);
103 cost
+= (u64
)attr
->max_entries
* elem_size
* num_possible_cpus();
104 if (cost
>= U32_MAX
- PAGE_SIZE
)
105 return ERR_PTR(-ENOMEM
);
107 cost
= round_up(cost
, PAGE_SIZE
) >> PAGE_SHIFT
;
109 ret
= bpf_map_precharge_memlock(cost
);
113 /* allocate all map elements and zero-initialize them */
114 array
= bpf_map_area_alloc(array_size
);
116 return ERR_PTR(-ENOMEM
);
117 array
->index_mask
= index_mask
;
118 array
->map
.unpriv_array
= unpriv
;
120 /* copy mandatory map attributes */
121 array
->map
.map_type
= attr
->map_type
;
122 array
->map
.key_size
= attr
->key_size
;
123 array
->map
.value_size
= attr
->value_size
;
124 array
->map
.max_entries
= attr
->max_entries
;
125 array
->map
.map_flags
= attr
->map_flags
;
126 array
->map
.pages
= cost
;
127 array
->elem_size
= elem_size
;
130 (elem_size
> PCPU_MIN_UNIT_SIZE
||
131 bpf_array_alloc_percpu(array
))) {
132 bpf_map_area_free(array
);
133 return ERR_PTR(-ENOMEM
);
139 /* Called from syscall or from eBPF program */
140 static void *array_map_lookup_elem(struct bpf_map
*map
, void *key
)
142 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
143 u32 index
= *(u32
*)key
;
145 if (unlikely(index
>= array
->map
.max_entries
))
148 return array
->value
+ array
->elem_size
* (index
& array
->index_mask
);
151 /* Called from eBPF program */
152 static void *percpu_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
154 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
155 u32 index
= *(u32
*)key
;
157 if (unlikely(index
>= array
->map
.max_entries
))
160 return this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]);
163 int bpf_percpu_array_copy(struct bpf_map
*map
, void *key
, void *value
)
165 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
166 u32 index
= *(u32
*)key
;
171 if (unlikely(index
>= array
->map
.max_entries
))
174 /* per_cpu areas are zero-filled and bpf programs can only
175 * access 'value_size' of them, so copying rounded areas
176 * will not leak any kernel data
178 size
= round_up(map
->value_size
, 8);
180 pptr
= array
->pptrs
[index
& array
->index_mask
];
181 for_each_possible_cpu(cpu
) {
182 bpf_long_memcpy(value
+ off
, per_cpu_ptr(pptr
, cpu
), size
);
189 /* Called from syscall */
190 static int array_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
192 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
193 u32 index
= key
? *(u32
*)key
: U32_MAX
;
194 u32
*next
= (u32
*)next_key
;
196 if (index
>= array
->map
.max_entries
) {
201 if (index
== array
->map
.max_entries
- 1)
208 /* Called from syscall or from eBPF program */
209 static int array_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
212 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
213 u32 index
= *(u32
*)key
;
215 if (unlikely(map_flags
> BPF_EXIST
))
219 if (unlikely(index
>= array
->map
.max_entries
))
220 /* all elements were pre-allocated, cannot insert a new one */
223 if (unlikely(map_flags
== BPF_NOEXIST
))
224 /* all elements already exist */
227 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
228 memcpy(this_cpu_ptr(array
->pptrs
[index
& array
->index_mask
]),
229 value
, map
->value_size
);
231 memcpy(array
->value
+
232 array
->elem_size
* (index
& array
->index_mask
),
233 value
, map
->value_size
);
237 int bpf_percpu_array_update(struct bpf_map
*map
, void *key
, void *value
,
240 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
241 u32 index
= *(u32
*)key
;
246 if (unlikely(map_flags
> BPF_EXIST
))
250 if (unlikely(index
>= array
->map
.max_entries
))
251 /* all elements were pre-allocated, cannot insert a new one */
254 if (unlikely(map_flags
== BPF_NOEXIST
))
255 /* all elements already exist */
258 /* the user space will provide round_up(value_size, 8) bytes that
259 * will be copied into per-cpu area. bpf programs can only access
260 * value_size of it. During lookup the same extra bytes will be
261 * returned or zeros which were zero-filled by percpu_alloc,
262 * so no kernel data leaks possible
264 size
= round_up(map
->value_size
, 8);
266 pptr
= array
->pptrs
[index
& array
->index_mask
];
267 for_each_possible_cpu(cpu
) {
268 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
), value
+ off
, size
);
275 /* Called from syscall or from eBPF program */
276 static int array_map_delete_elem(struct bpf_map
*map
, void *key
)
281 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
282 static void array_map_free(struct bpf_map
*map
)
284 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
286 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
287 * so the programs (can be more than one that used this map) were
288 * disconnected from events. Wait for outstanding programs to complete
293 if (array
->map
.map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
294 bpf_array_free_percpu(array
);
296 bpf_map_area_free(array
);
299 static const struct bpf_map_ops array_ops
= {
300 .map_alloc
= array_map_alloc
,
301 .map_free
= array_map_free
,
302 .map_get_next_key
= array_map_get_next_key
,
303 .map_lookup_elem
= array_map_lookup_elem
,
304 .map_update_elem
= array_map_update_elem
,
305 .map_delete_elem
= array_map_delete_elem
,
308 static struct bpf_map_type_list array_type __read_mostly
= {
310 .type
= BPF_MAP_TYPE_ARRAY
,
313 static const struct bpf_map_ops percpu_array_ops
= {
314 .map_alloc
= array_map_alloc
,
315 .map_free
= array_map_free
,
316 .map_get_next_key
= array_map_get_next_key
,
317 .map_lookup_elem
= percpu_array_map_lookup_elem
,
318 .map_update_elem
= array_map_update_elem
,
319 .map_delete_elem
= array_map_delete_elem
,
322 static struct bpf_map_type_list percpu_array_type __read_mostly
= {
323 .ops
= &percpu_array_ops
,
324 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
327 static int __init
register_array_map(void)
329 bpf_register_map_type(&array_type
);
330 bpf_register_map_type(&percpu_array_type
);
333 late_initcall(register_array_map
);
335 static struct bpf_map
*fd_array_map_alloc(union bpf_attr
*attr
)
337 /* only file descriptors can be stored in this type of map */
338 if (attr
->value_size
!= sizeof(u32
))
339 return ERR_PTR(-EINVAL
);
340 return array_map_alloc(attr
);
343 static void fd_array_map_free(struct bpf_map
*map
)
345 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
350 /* make sure it's empty */
351 for (i
= 0; i
< array
->map
.max_entries
; i
++)
352 BUG_ON(array
->ptrs
[i
] != NULL
);
354 bpf_map_area_free(array
);
357 static void *fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
362 /* only called from syscall */
363 int bpf_fd_array_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
364 void *key
, void *value
, u64 map_flags
)
366 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
367 void *new_ptr
, *old_ptr
;
368 u32 index
= *(u32
*)key
, ufd
;
370 if (map_flags
!= BPF_ANY
)
373 if (index
>= array
->map
.max_entries
)
377 new_ptr
= map
->ops
->map_fd_get_ptr(map
, map_file
, ufd
);
379 return PTR_ERR(new_ptr
);
381 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
383 map
->ops
->map_fd_put_ptr(old_ptr
);
388 static int fd_array_map_delete_elem(struct bpf_map
*map
, void *key
)
390 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
392 u32 index
= *(u32
*)key
;
394 if (index
>= array
->map
.max_entries
)
397 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
399 map
->ops
->map_fd_put_ptr(old_ptr
);
406 static void *prog_fd_array_get_ptr(struct bpf_map
*map
,
407 struct file
*map_file
, int fd
)
409 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
410 struct bpf_prog
*prog
= bpf_prog_get(fd
);
415 if (!bpf_prog_array_compatible(array
, prog
)) {
417 return ERR_PTR(-EINVAL
);
423 static void prog_fd_array_put_ptr(void *ptr
)
428 /* decrement refcnt of all bpf_progs that are stored in this map */
429 void bpf_fd_array_map_clear(struct bpf_map
*map
)
431 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
434 for (i
= 0; i
< array
->map
.max_entries
; i
++)
435 fd_array_map_delete_elem(map
, &i
);
438 static const struct bpf_map_ops prog_array_ops
= {
439 .map_alloc
= fd_array_map_alloc
,
440 .map_free
= fd_array_map_free
,
441 .map_get_next_key
= array_map_get_next_key
,
442 .map_lookup_elem
= fd_array_map_lookup_elem
,
443 .map_delete_elem
= fd_array_map_delete_elem
,
444 .map_fd_get_ptr
= prog_fd_array_get_ptr
,
445 .map_fd_put_ptr
= prog_fd_array_put_ptr
,
448 static struct bpf_map_type_list prog_array_type __read_mostly
= {
449 .ops
= &prog_array_ops
,
450 .type
= BPF_MAP_TYPE_PROG_ARRAY
,
453 static int __init
register_prog_array_map(void)
455 bpf_register_map_type(&prog_array_type
);
458 late_initcall(register_prog_array_map
);
460 static struct bpf_event_entry
*bpf_event_entry_gen(struct file
*perf_file
,
461 struct file
*map_file
)
463 struct bpf_event_entry
*ee
;
465 ee
= kzalloc(sizeof(*ee
), GFP_ATOMIC
);
467 ee
->event
= perf_file
->private_data
;
468 ee
->perf_file
= perf_file
;
469 ee
->map_file
= map_file
;
475 static void __bpf_event_entry_free(struct rcu_head
*rcu
)
477 struct bpf_event_entry
*ee
;
479 ee
= container_of(rcu
, struct bpf_event_entry
, rcu
);
484 static void bpf_event_entry_free_rcu(struct bpf_event_entry
*ee
)
486 call_rcu(&ee
->rcu
, __bpf_event_entry_free
);
489 static void *perf_event_fd_array_get_ptr(struct bpf_map
*map
,
490 struct file
*map_file
, int fd
)
492 const struct perf_event_attr
*attr
;
493 struct bpf_event_entry
*ee
;
494 struct perf_event
*event
;
495 struct file
*perf_file
;
497 perf_file
= perf_event_get(fd
);
498 if (IS_ERR(perf_file
))
501 event
= perf_file
->private_data
;
502 ee
= ERR_PTR(-EINVAL
);
504 attr
= perf_event_attrs(event
);
505 if (IS_ERR(attr
) || attr
->inherit
)
508 switch (attr
->type
) {
509 case PERF_TYPE_SOFTWARE
:
510 if (attr
->config
!= PERF_COUNT_SW_BPF_OUTPUT
)
514 case PERF_TYPE_HARDWARE
:
515 ee
= bpf_event_entry_gen(perf_file
, map_file
);
518 ee
= ERR_PTR(-ENOMEM
);
529 static void perf_event_fd_array_put_ptr(void *ptr
)
531 bpf_event_entry_free_rcu(ptr
);
534 static void perf_event_fd_array_release(struct bpf_map
*map
,
535 struct file
*map_file
)
537 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
538 struct bpf_event_entry
*ee
;
542 for (i
= 0; i
< array
->map
.max_entries
; i
++) {
543 ee
= READ_ONCE(array
->ptrs
[i
]);
544 if (ee
&& ee
->map_file
== map_file
)
545 fd_array_map_delete_elem(map
, &i
);
550 static const struct bpf_map_ops perf_event_array_ops
= {
551 .map_alloc
= fd_array_map_alloc
,
552 .map_free
= fd_array_map_free
,
553 .map_get_next_key
= array_map_get_next_key
,
554 .map_lookup_elem
= fd_array_map_lookup_elem
,
555 .map_delete_elem
= fd_array_map_delete_elem
,
556 .map_fd_get_ptr
= perf_event_fd_array_get_ptr
,
557 .map_fd_put_ptr
= perf_event_fd_array_put_ptr
,
558 .map_release
= perf_event_fd_array_release
,
561 static struct bpf_map_type_list perf_event_array_type __read_mostly
= {
562 .ops
= &perf_event_array_ops
,
563 .type
= BPF_MAP_TYPE_PERF_EVENT_ARRAY
,
566 static int __init
register_perf_event_array_map(void)
568 bpf_register_map_type(&perf_event_array_type
);
571 late_initcall(register_perf_event_array_map
);
573 #ifdef CONFIG_CGROUPS
574 static void *cgroup_fd_array_get_ptr(struct bpf_map
*map
,
575 struct file
*map_file
/* not used */,
578 return cgroup_get_from_fd(fd
);
581 static void cgroup_fd_array_put_ptr(void *ptr
)
583 /* cgroup_put free cgrp after a rcu grace period */
587 static void cgroup_fd_array_free(struct bpf_map
*map
)
589 bpf_fd_array_map_clear(map
);
590 fd_array_map_free(map
);
593 static const struct bpf_map_ops cgroup_array_ops
= {
594 .map_alloc
= fd_array_map_alloc
,
595 .map_free
= cgroup_fd_array_free
,
596 .map_get_next_key
= array_map_get_next_key
,
597 .map_lookup_elem
= fd_array_map_lookup_elem
,
598 .map_delete_elem
= fd_array_map_delete_elem
,
599 .map_fd_get_ptr
= cgroup_fd_array_get_ptr
,
600 .map_fd_put_ptr
= cgroup_fd_array_put_ptr
,
603 static struct bpf_map_type_list cgroup_array_type __read_mostly
= {
604 .ops
= &cgroup_array_ops
,
605 .type
= BPF_MAP_TYPE_CGROUP_ARRAY
,
608 static int __init
register_cgroup_array_map(void)
610 bpf_register_map_type(&cgroup_array_type
);
613 late_initcall(register_cgroup_array_map
);