1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/err.h>
14 #include <linux/vmalloc.h>
15 #include <linux/slab.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
20 /* Called from syscall */
21 static struct bpf_map
*array_map_alloc(union bpf_attr
*attr
)
23 struct bpf_array
*array
;
24 u32 elem_size
, array_size
;
26 /* check sanity of attributes */
27 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
28 attr
->value_size
== 0)
29 return ERR_PTR(-EINVAL
);
31 if (attr
->value_size
>= 1 << (KMALLOC_SHIFT_MAX
- 1))
32 /* if value_size is bigger, the user space won't be able to
33 * access the elements.
35 return ERR_PTR(-E2BIG
);
37 elem_size
= round_up(attr
->value_size
, 8);
39 /* check round_up into zero and u32 overflow */
41 attr
->max_entries
> (U32_MAX
- PAGE_SIZE
- sizeof(*array
)) / elem_size
)
42 return ERR_PTR(-ENOMEM
);
44 array_size
= sizeof(*array
) + attr
->max_entries
* elem_size
;
46 /* allocate all map elements and zero-initialize them */
47 array
= kzalloc(array_size
, GFP_USER
| __GFP_NOWARN
);
49 array
= vzalloc(array_size
);
51 return ERR_PTR(-ENOMEM
);
54 /* copy mandatory map attributes */
55 array
->map
.key_size
= attr
->key_size
;
56 array
->map
.value_size
= attr
->value_size
;
57 array
->map
.max_entries
= attr
->max_entries
;
58 array
->map
.pages
= round_up(array_size
, PAGE_SIZE
) >> PAGE_SHIFT
;
59 array
->elem_size
= elem_size
;
64 /* Called from syscall or from eBPF program */
65 static void *array_map_lookup_elem(struct bpf_map
*map
, void *key
)
67 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
68 u32 index
= *(u32
*)key
;
70 if (index
>= array
->map
.max_entries
)
73 return array
->value
+ array
->elem_size
* index
;
76 /* Called from syscall */
77 static int array_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
79 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
80 u32 index
= *(u32
*)key
;
81 u32
*next
= (u32
*)next_key
;
83 if (index
>= array
->map
.max_entries
) {
88 if (index
== array
->map
.max_entries
- 1)
95 /* Called from syscall or from eBPF program */
96 static int array_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
99 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
100 u32 index
= *(u32
*)key
;
102 if (map_flags
> BPF_EXIST
)
106 if (index
>= array
->map
.max_entries
)
107 /* all elements were pre-allocated, cannot insert a new one */
110 if (map_flags
== BPF_NOEXIST
)
111 /* all elements already exist */
114 memcpy(array
->value
+ array
->elem_size
* index
, value
, map
->value_size
);
118 /* Called from syscall or from eBPF program */
119 static int array_map_delete_elem(struct bpf_map
*map
, void *key
)
124 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
125 static void array_map_free(struct bpf_map
*map
)
127 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
129 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
130 * so the programs (can be more than one that used this map) were
131 * disconnected from events. Wait for outstanding programs to complete
139 static const struct bpf_map_ops array_ops
= {
140 .map_alloc
= array_map_alloc
,
141 .map_free
= array_map_free
,
142 .map_get_next_key
= array_map_get_next_key
,
143 .map_lookup_elem
= array_map_lookup_elem
,
144 .map_update_elem
= array_map_update_elem
,
145 .map_delete_elem
= array_map_delete_elem
,
148 static struct bpf_map_type_list array_type __read_mostly
= {
150 .type
= BPF_MAP_TYPE_ARRAY
,
153 static int __init
register_array_map(void)
155 bpf_register_map_type(&array_type
);
158 late_initcall(register_array_map
);
160 static struct bpf_map
*fd_array_map_alloc(union bpf_attr
*attr
)
162 /* only file descriptors can be stored in this type of map */
163 if (attr
->value_size
!= sizeof(u32
))
164 return ERR_PTR(-EINVAL
);
165 return array_map_alloc(attr
);
168 static void fd_array_map_free(struct bpf_map
*map
)
170 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
175 /* make sure it's empty */
176 for (i
= 0; i
< array
->map
.max_entries
; i
++)
177 BUG_ON(array
->ptrs
[i
] != NULL
);
181 static void *fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
186 /* only called from syscall */
187 static int fd_array_map_update_elem(struct bpf_map
*map
, void *key
,
188 void *value
, u64 map_flags
)
190 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
191 void *new_ptr
, *old_ptr
;
192 u32 index
= *(u32
*)key
, ufd
;
194 if (map_flags
!= BPF_ANY
)
197 if (index
>= array
->map
.max_entries
)
201 new_ptr
= map
->ops
->map_fd_get_ptr(map
, ufd
);
203 return PTR_ERR(new_ptr
);
205 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
207 map
->ops
->map_fd_put_ptr(old_ptr
);
212 static int fd_array_map_delete_elem(struct bpf_map
*map
, void *key
)
214 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
216 u32 index
= *(u32
*)key
;
218 if (index
>= array
->map
.max_entries
)
221 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
223 map
->ops
->map_fd_put_ptr(old_ptr
);
230 static void *prog_fd_array_get_ptr(struct bpf_map
*map
, int fd
)
232 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
233 struct bpf_prog
*prog
= bpf_prog_get(fd
);
237 if (!bpf_prog_array_compatible(array
, prog
)) {
239 return ERR_PTR(-EINVAL
);
244 static void prog_fd_array_put_ptr(void *ptr
)
246 struct bpf_prog
*prog
= ptr
;
248 bpf_prog_put_rcu(prog
);
251 /* decrement refcnt of all bpf_progs that are stored in this map */
252 void bpf_fd_array_map_clear(struct bpf_map
*map
)
254 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
257 for (i
= 0; i
< array
->map
.max_entries
; i
++)
258 fd_array_map_delete_elem(map
, &i
);
261 static const struct bpf_map_ops prog_array_ops
= {
262 .map_alloc
= fd_array_map_alloc
,
263 .map_free
= fd_array_map_free
,
264 .map_get_next_key
= array_map_get_next_key
,
265 .map_lookup_elem
= fd_array_map_lookup_elem
,
266 .map_update_elem
= fd_array_map_update_elem
,
267 .map_delete_elem
= fd_array_map_delete_elem
,
268 .map_fd_get_ptr
= prog_fd_array_get_ptr
,
269 .map_fd_put_ptr
= prog_fd_array_put_ptr
,
272 static struct bpf_map_type_list prog_array_type __read_mostly
= {
273 .ops
= &prog_array_ops
,
274 .type
= BPF_MAP_TYPE_PROG_ARRAY
,
277 static int __init
register_prog_array_map(void)
279 bpf_register_map_type(&prog_array_type
);
282 late_initcall(register_prog_array_map
);
284 static void perf_event_array_map_free(struct bpf_map
*map
)
286 bpf_fd_array_map_clear(map
);
287 fd_array_map_free(map
);
290 static void *perf_event_fd_array_get_ptr(struct bpf_map
*map
, int fd
)
292 struct perf_event
*event
;
293 const struct perf_event_attr
*attr
;
296 file
= perf_event_get(fd
);
300 event
= file
->private_data
;
302 attr
= perf_event_attrs(event
);
309 if (attr
->type
== PERF_TYPE_RAW
)
312 if (attr
->type
== PERF_TYPE_HARDWARE
)
315 if (attr
->type
== PERF_TYPE_SOFTWARE
&&
316 attr
->config
== PERF_COUNT_SW_BPF_OUTPUT
)
320 return ERR_PTR(-EINVAL
);
323 static void perf_event_fd_array_put_ptr(void *ptr
)
325 fput((struct file
*)ptr
);
328 static const struct bpf_map_ops perf_event_array_ops
= {
329 .map_alloc
= fd_array_map_alloc
,
330 .map_free
= perf_event_array_map_free
,
331 .map_get_next_key
= array_map_get_next_key
,
332 .map_lookup_elem
= fd_array_map_lookup_elem
,
333 .map_update_elem
= fd_array_map_update_elem
,
334 .map_delete_elem
= fd_array_map_delete_elem
,
335 .map_fd_get_ptr
= perf_event_fd_array_get_ptr
,
336 .map_fd_put_ptr
= perf_event_fd_array_put_ptr
,
339 static struct bpf_map_type_list perf_event_array_type __read_mostly
= {
340 .ops
= &perf_event_array_ops
,
341 .type
= BPF_MAP_TYPE_PERF_EVENT_ARRAY
,
344 static int __init
register_perf_event_array_map(void)
346 bpf_register_map_type(&perf_event_array_type
);
349 late_initcall(register_perf_event_array_map
);