1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/err.h>
14 #include <linux/vmalloc.h>
15 #include <linux/slab.h>
17 #include <linux/filter.h>
19 /* Called from syscall */
20 static struct bpf_map
*array_map_alloc(union bpf_attr
*attr
)
22 struct bpf_array
*array
;
23 u32 elem_size
, array_size
;
25 /* check sanity of attributes */
26 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
27 attr
->value_size
== 0)
28 return ERR_PTR(-EINVAL
);
30 elem_size
= round_up(attr
->value_size
, 8);
32 /* check round_up into zero and u32 overflow */
34 attr
->max_entries
> (U32_MAX
- sizeof(*array
)) / elem_size
)
35 return ERR_PTR(-ENOMEM
);
37 array_size
= sizeof(*array
) + attr
->max_entries
* elem_size
;
39 /* allocate all map elements and zero-initialize them */
40 array
= kzalloc(array_size
, GFP_USER
| __GFP_NOWARN
);
42 array
= vzalloc(array_size
);
44 return ERR_PTR(-ENOMEM
);
47 /* copy mandatory map attributes */
48 array
->map
.key_size
= attr
->key_size
;
49 array
->map
.value_size
= attr
->value_size
;
50 array
->map
.max_entries
= attr
->max_entries
;
52 array
->elem_size
= elem_size
;
57 /* Called from syscall or from eBPF program */
58 static void *array_map_lookup_elem(struct bpf_map
*map
, void *key
)
60 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
61 u32 index
= *(u32
*)key
;
63 if (index
>= array
->map
.max_entries
)
66 return array
->value
+ array
->elem_size
* index
;
69 /* Called from syscall */
70 static int array_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
72 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
73 u32 index
= *(u32
*)key
;
74 u32
*next
= (u32
*)next_key
;
76 if (index
>= array
->map
.max_entries
) {
81 if (index
== array
->map
.max_entries
- 1)
88 /* Called from syscall or from eBPF program */
89 static int array_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
92 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
93 u32 index
= *(u32
*)key
;
95 if (map_flags
> BPF_EXIST
)
99 if (index
>= array
->map
.max_entries
)
100 /* all elements were pre-allocated, cannot insert a new one */
103 if (map_flags
== BPF_NOEXIST
)
104 /* all elements already exist */
107 memcpy(array
->value
+ array
->elem_size
* index
, value
, array
->elem_size
);
111 /* Called from syscall or from eBPF program */
112 static int array_map_delete_elem(struct bpf_map
*map
, void *key
)
117 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
118 static void array_map_free(struct bpf_map
*map
)
120 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
122 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
123 * so the programs (can be more than one that used this map) were
124 * disconnected from events. Wait for outstanding programs to complete
132 static const struct bpf_map_ops array_ops
= {
133 .map_alloc
= array_map_alloc
,
134 .map_free
= array_map_free
,
135 .map_get_next_key
= array_map_get_next_key
,
136 .map_lookup_elem
= array_map_lookup_elem
,
137 .map_update_elem
= array_map_update_elem
,
138 .map_delete_elem
= array_map_delete_elem
,
141 static struct bpf_map_type_list array_type __read_mostly
= {
143 .type
= BPF_MAP_TYPE_ARRAY
,
146 static int __init
register_array_map(void)
148 bpf_register_map_type(&array_type
);
151 late_initcall(register_array_map
);
153 static struct bpf_map
*fd_array_map_alloc(union bpf_attr
*attr
)
155 /* only file descriptors can be stored in this type of map */
156 if (attr
->value_size
!= sizeof(u32
))
157 return ERR_PTR(-EINVAL
);
158 return array_map_alloc(attr
);
161 static void fd_array_map_free(struct bpf_map
*map
)
163 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
168 /* make sure it's empty */
169 for (i
= 0; i
< array
->map
.max_entries
; i
++)
170 BUG_ON(array
->ptrs
[i
] != NULL
);
174 static void *fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
)
179 /* only called from syscall */
180 static int fd_array_map_update_elem(struct bpf_map
*map
, void *key
,
181 void *value
, u64 map_flags
)
183 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
184 void *new_ptr
, *old_ptr
;
185 u32 index
= *(u32
*)key
, ufd
;
187 if (map_flags
!= BPF_ANY
)
190 if (index
>= array
->map
.max_entries
)
194 new_ptr
= map
->ops
->map_fd_get_ptr(map
, ufd
);
196 return PTR_ERR(new_ptr
);
198 old_ptr
= xchg(array
->ptrs
+ index
, new_ptr
);
200 map
->ops
->map_fd_put_ptr(old_ptr
);
205 static int fd_array_map_delete_elem(struct bpf_map
*map
, void *key
)
207 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
209 u32 index
= *(u32
*)key
;
211 if (index
>= array
->map
.max_entries
)
214 old_ptr
= xchg(array
->ptrs
+ index
, NULL
);
216 map
->ops
->map_fd_put_ptr(old_ptr
);
223 static void *prog_fd_array_get_ptr(struct bpf_map
*map
, int fd
)
225 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
226 struct bpf_prog
*prog
= bpf_prog_get(fd
);
230 if (!bpf_prog_array_compatible(array
, prog
)) {
232 return ERR_PTR(-EINVAL
);
237 static void prog_fd_array_put_ptr(void *ptr
)
239 struct bpf_prog
*prog
= ptr
;
241 bpf_prog_put_rcu(prog
);
244 /* decrement refcnt of all bpf_progs that are stored in this map */
245 void bpf_fd_array_map_clear(struct bpf_map
*map
)
247 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
250 for (i
= 0; i
< array
->map
.max_entries
; i
++)
251 fd_array_map_delete_elem(map
, &i
);
254 static const struct bpf_map_ops prog_array_ops
= {
255 .map_alloc
= fd_array_map_alloc
,
256 .map_free
= fd_array_map_free
,
257 .map_get_next_key
= array_map_get_next_key
,
258 .map_lookup_elem
= fd_array_map_lookup_elem
,
259 .map_update_elem
= fd_array_map_update_elem
,
260 .map_delete_elem
= fd_array_map_delete_elem
,
261 .map_fd_get_ptr
= prog_fd_array_get_ptr
,
262 .map_fd_put_ptr
= prog_fd_array_put_ptr
,
265 static struct bpf_map_type_list prog_array_type __read_mostly
= {
266 .ops
= &prog_array_ops
,
267 .type
= BPF_MAP_TYPE_PROG_ARRAY
,
270 static int __init
register_prog_array_map(void)
272 bpf_register_map_type(&prog_array_type
);
275 late_initcall(register_prog_array_map
);
277 static void perf_event_array_map_free(struct bpf_map
*map
)
279 bpf_fd_array_map_clear(map
);
280 fd_array_map_free(map
);
283 static void *perf_event_fd_array_get_ptr(struct bpf_map
*map
, int fd
)
285 struct perf_event
*event
;
286 const struct perf_event_attr
*attr
;
288 event
= perf_event_get(fd
);
292 attr
= perf_event_attrs(event
);
296 if (attr
->type
!= PERF_TYPE_RAW
&&
297 attr
->type
!= PERF_TYPE_HARDWARE
) {
298 perf_event_release_kernel(event
);
299 return ERR_PTR(-EINVAL
);
304 static void perf_event_fd_array_put_ptr(void *ptr
)
306 struct perf_event
*event
= ptr
;
308 perf_event_release_kernel(event
);
311 static const struct bpf_map_ops perf_event_array_ops
= {
312 .map_alloc
= fd_array_map_alloc
,
313 .map_free
= perf_event_array_map_free
,
314 .map_get_next_key
= array_map_get_next_key
,
315 .map_lookup_elem
= fd_array_map_lookup_elem
,
316 .map_update_elem
= fd_array_map_update_elem
,
317 .map_delete_elem
= fd_array_map_delete_elem
,
318 .map_fd_get_ptr
= perf_event_fd_array_get_ptr
,
319 .map_fd_put_ptr
= perf_event_fd_array_put_ptr
,
322 static struct bpf_map_type_list perf_event_array_type __read_mostly
= {
323 .ops
= &perf_event_array_ops
,
324 .type
= BPF_MAP_TYPE_PERF_EVENT_ARRAY
,
327 static int __init
register_perf_event_array_map(void)
329 bpf_register_map_type(&perf_event_array_type
);
332 late_initcall(register_perf_event_array_map
);