Merge branch 'aarch64/psci/drivers' into aarch64/for-next/core
[linux/fpc-iii.git] / kernel / bpf / syscall.c
bloba1b14d197a4fc2da6acf2d812372284e78bc5af8
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
21 static LIST_HEAD(bpf_map_types);
23 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
25 struct bpf_map_type_list *tl;
26 struct bpf_map *map;
28 list_for_each_entry(tl, &bpf_map_types, list_node) {
29 if (tl->type == attr->map_type) {
30 map = tl->ops->map_alloc(attr);
31 if (IS_ERR(map))
32 return map;
33 map->ops = tl->ops;
34 map->map_type = attr->map_type;
35 return map;
38 return ERR_PTR(-EINVAL);
41 /* boot time registration of different map implementations */
42 void bpf_register_map_type(struct bpf_map_type_list *tl)
44 list_add(&tl->list_node, &bpf_map_types);
47 /* called from workqueue */
48 static void bpf_map_free_deferred(struct work_struct *work)
50 struct bpf_map *map = container_of(work, struct bpf_map, work);
52 /* implementation dependent freeing */
53 map->ops->map_free(map);
56 /* decrement map refcnt and schedule it for freeing via workqueue
57 * (unrelying map implementation ops->map_free() might sleep)
59 void bpf_map_put(struct bpf_map *map)
61 if (atomic_dec_and_test(&map->refcnt)) {
62 INIT_WORK(&map->work, bpf_map_free_deferred);
63 schedule_work(&map->work);
67 static int bpf_map_release(struct inode *inode, struct file *filp)
69 struct bpf_map *map = filp->private_data;
71 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
72 /* prog_array stores refcnt-ed bpf_prog pointers
73 * release them all when user space closes prog_array_fd
75 bpf_prog_array_map_clear(map);
77 bpf_map_put(map);
78 return 0;
81 static const struct file_operations bpf_map_fops = {
82 .release = bpf_map_release,
85 /* helper macro to check that unused fields 'union bpf_attr' are zero */
86 #define CHECK_ATTR(CMD) \
87 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
88 sizeof(attr->CMD##_LAST_FIELD), 0, \
89 sizeof(*attr) - \
90 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
91 sizeof(attr->CMD##_LAST_FIELD)) != NULL
93 #define BPF_MAP_CREATE_LAST_FIELD max_entries
94 /* called via syscall */
95 static int map_create(union bpf_attr *attr)
97 struct bpf_map *map;
98 int err;
100 err = CHECK_ATTR(BPF_MAP_CREATE);
101 if (err)
102 return -EINVAL;
104 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
105 map = find_and_alloc_map(attr);
106 if (IS_ERR(map))
107 return PTR_ERR(map);
109 atomic_set(&map->refcnt, 1);
111 err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC);
113 if (err < 0)
114 /* failed to allocate fd */
115 goto free_map;
117 return err;
119 free_map:
120 map->ops->map_free(map);
121 return err;
124 /* if error is returned, fd is released.
125 * On success caller should complete fd access with matching fdput()
127 struct bpf_map *bpf_map_get(struct fd f)
129 struct bpf_map *map;
131 if (!f.file)
132 return ERR_PTR(-EBADF);
134 if (f.file->f_op != &bpf_map_fops) {
135 fdput(f);
136 return ERR_PTR(-EINVAL);
139 map = f.file->private_data;
141 return map;
144 /* helper to convert user pointers passed inside __aligned_u64 fields */
145 static void __user *u64_to_ptr(__u64 val)
147 return (void __user *) (unsigned long) val;
150 /* last field in 'union bpf_attr' used by this command */
151 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
153 static int map_lookup_elem(union bpf_attr *attr)
155 void __user *ukey = u64_to_ptr(attr->key);
156 void __user *uvalue = u64_to_ptr(attr->value);
157 int ufd = attr->map_fd;
158 struct fd f = fdget(ufd);
159 struct bpf_map *map;
160 void *key, *value, *ptr;
161 int err;
163 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
164 return -EINVAL;
166 map = bpf_map_get(f);
167 if (IS_ERR(map))
168 return PTR_ERR(map);
170 err = -ENOMEM;
171 key = kmalloc(map->key_size, GFP_USER);
172 if (!key)
173 goto err_put;
175 err = -EFAULT;
176 if (copy_from_user(key, ukey, map->key_size) != 0)
177 goto free_key;
179 err = -ENOMEM;
180 value = kmalloc(map->value_size, GFP_USER);
181 if (!value)
182 goto free_key;
184 rcu_read_lock();
185 ptr = map->ops->map_lookup_elem(map, key);
186 if (ptr)
187 memcpy(value, ptr, map->value_size);
188 rcu_read_unlock();
190 err = -ENOENT;
191 if (!ptr)
192 goto free_value;
194 err = -EFAULT;
195 if (copy_to_user(uvalue, value, map->value_size) != 0)
196 goto free_value;
198 err = 0;
200 free_value:
201 kfree(value);
202 free_key:
203 kfree(key);
204 err_put:
205 fdput(f);
206 return err;
209 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
211 static int map_update_elem(union bpf_attr *attr)
213 void __user *ukey = u64_to_ptr(attr->key);
214 void __user *uvalue = u64_to_ptr(attr->value);
215 int ufd = attr->map_fd;
216 struct fd f = fdget(ufd);
217 struct bpf_map *map;
218 void *key, *value;
219 int err;
221 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
222 return -EINVAL;
224 map = bpf_map_get(f);
225 if (IS_ERR(map))
226 return PTR_ERR(map);
228 err = -ENOMEM;
229 key = kmalloc(map->key_size, GFP_USER);
230 if (!key)
231 goto err_put;
233 err = -EFAULT;
234 if (copy_from_user(key, ukey, map->key_size) != 0)
235 goto free_key;
237 err = -ENOMEM;
238 value = kmalloc(map->value_size, GFP_USER);
239 if (!value)
240 goto free_key;
242 err = -EFAULT;
243 if (copy_from_user(value, uvalue, map->value_size) != 0)
244 goto free_value;
246 /* eBPF program that use maps are running under rcu_read_lock(),
247 * therefore all map accessors rely on this fact, so do the same here
249 rcu_read_lock();
250 err = map->ops->map_update_elem(map, key, value, attr->flags);
251 rcu_read_unlock();
253 free_value:
254 kfree(value);
255 free_key:
256 kfree(key);
257 err_put:
258 fdput(f);
259 return err;
262 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
264 static int map_delete_elem(union bpf_attr *attr)
266 void __user *ukey = u64_to_ptr(attr->key);
267 int ufd = attr->map_fd;
268 struct fd f = fdget(ufd);
269 struct bpf_map *map;
270 void *key;
271 int err;
273 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
274 return -EINVAL;
276 map = bpf_map_get(f);
277 if (IS_ERR(map))
278 return PTR_ERR(map);
280 err = -ENOMEM;
281 key = kmalloc(map->key_size, GFP_USER);
282 if (!key)
283 goto err_put;
285 err = -EFAULT;
286 if (copy_from_user(key, ukey, map->key_size) != 0)
287 goto free_key;
289 rcu_read_lock();
290 err = map->ops->map_delete_elem(map, key);
291 rcu_read_unlock();
293 free_key:
294 kfree(key);
295 err_put:
296 fdput(f);
297 return err;
300 /* last field in 'union bpf_attr' used by this command */
301 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
303 static int map_get_next_key(union bpf_attr *attr)
305 void __user *ukey = u64_to_ptr(attr->key);
306 void __user *unext_key = u64_to_ptr(attr->next_key);
307 int ufd = attr->map_fd;
308 struct fd f = fdget(ufd);
309 struct bpf_map *map;
310 void *key, *next_key;
311 int err;
313 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
314 return -EINVAL;
316 map = bpf_map_get(f);
317 if (IS_ERR(map))
318 return PTR_ERR(map);
320 err = -ENOMEM;
321 key = kmalloc(map->key_size, GFP_USER);
322 if (!key)
323 goto err_put;
325 err = -EFAULT;
326 if (copy_from_user(key, ukey, map->key_size) != 0)
327 goto free_key;
329 err = -ENOMEM;
330 next_key = kmalloc(map->key_size, GFP_USER);
331 if (!next_key)
332 goto free_key;
334 rcu_read_lock();
335 err = map->ops->map_get_next_key(map, key, next_key);
336 rcu_read_unlock();
337 if (err)
338 goto free_next_key;
340 err = -EFAULT;
341 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
342 goto free_next_key;
344 err = 0;
346 free_next_key:
347 kfree(next_key);
348 free_key:
349 kfree(key);
350 err_put:
351 fdput(f);
352 return err;
355 static LIST_HEAD(bpf_prog_types);
357 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
359 struct bpf_prog_type_list *tl;
361 list_for_each_entry(tl, &bpf_prog_types, list_node) {
362 if (tl->type == type) {
363 prog->aux->ops = tl->ops;
364 prog->type = type;
365 return 0;
369 return -EINVAL;
372 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
374 list_add(&tl->list_node, &bpf_prog_types);
377 /* fixup insn->imm field of bpf_call instructions:
378 * if (insn->imm == BPF_FUNC_map_lookup_elem)
379 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
380 * else if (insn->imm == BPF_FUNC_map_update_elem)
381 * insn->imm = bpf_map_update_elem - __bpf_call_base;
382 * else ...
384 * this function is called after eBPF program passed verification
386 static void fixup_bpf_calls(struct bpf_prog *prog)
388 const struct bpf_func_proto *fn;
389 int i;
391 for (i = 0; i < prog->len; i++) {
392 struct bpf_insn *insn = &prog->insnsi[i];
394 if (insn->code == (BPF_JMP | BPF_CALL)) {
395 /* we reach here when program has bpf_call instructions
396 * and it passed bpf_check(), means that
397 * ops->get_func_proto must have been supplied, check it
399 BUG_ON(!prog->aux->ops->get_func_proto);
401 if (insn->imm == BPF_FUNC_tail_call) {
402 /* mark bpf_tail_call as different opcode
403 * to avoid conditional branch in
404 * interpeter for every normal call
405 * and to prevent accidental JITing by
406 * JIT compiler that doesn't support
407 * bpf_tail_call yet
409 insn->imm = 0;
410 insn->code |= BPF_X;
411 continue;
414 fn = prog->aux->ops->get_func_proto(insn->imm);
415 /* all functions that have prototype and verifier allowed
416 * programs to call them, must be real in-kernel functions
418 BUG_ON(!fn->func);
419 insn->imm = fn->func - __bpf_call_base;
424 /* drop refcnt on maps used by eBPF program and free auxilary data */
425 static void free_used_maps(struct bpf_prog_aux *aux)
427 int i;
429 for (i = 0; i < aux->used_map_cnt; i++)
430 bpf_map_put(aux->used_maps[i]);
432 kfree(aux->used_maps);
435 static void __prog_put_rcu(struct rcu_head *rcu)
437 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
439 free_used_maps(aux);
440 bpf_prog_free(aux->prog);
443 /* version of bpf_prog_put() that is called after a grace period */
444 void bpf_prog_put_rcu(struct bpf_prog *prog)
446 if (atomic_dec_and_test(&prog->aux->refcnt)) {
447 prog->aux->prog = prog;
448 call_rcu(&prog->aux->rcu, __prog_put_rcu);
452 void bpf_prog_put(struct bpf_prog *prog)
454 if (atomic_dec_and_test(&prog->aux->refcnt)) {
455 free_used_maps(prog->aux);
456 bpf_prog_free(prog);
459 EXPORT_SYMBOL_GPL(bpf_prog_put);
461 static int bpf_prog_release(struct inode *inode, struct file *filp)
463 struct bpf_prog *prog = filp->private_data;
465 bpf_prog_put_rcu(prog);
466 return 0;
469 static const struct file_operations bpf_prog_fops = {
470 .release = bpf_prog_release,
473 static struct bpf_prog *get_prog(struct fd f)
475 struct bpf_prog *prog;
477 if (!f.file)
478 return ERR_PTR(-EBADF);
480 if (f.file->f_op != &bpf_prog_fops) {
481 fdput(f);
482 return ERR_PTR(-EINVAL);
485 prog = f.file->private_data;
487 return prog;
490 /* called by sockets/tracing/seccomp before attaching program to an event
491 * pairs with bpf_prog_put()
493 struct bpf_prog *bpf_prog_get(u32 ufd)
495 struct fd f = fdget(ufd);
496 struct bpf_prog *prog;
498 prog = get_prog(f);
500 if (IS_ERR(prog))
501 return prog;
503 atomic_inc(&prog->aux->refcnt);
504 fdput(f);
505 return prog;
507 EXPORT_SYMBOL_GPL(bpf_prog_get);
509 /* last field in 'union bpf_attr' used by this command */
510 #define BPF_PROG_LOAD_LAST_FIELD kern_version
512 static int bpf_prog_load(union bpf_attr *attr)
514 enum bpf_prog_type type = attr->prog_type;
515 struct bpf_prog *prog;
516 int err;
517 char license[128];
518 bool is_gpl;
520 if (CHECK_ATTR(BPF_PROG_LOAD))
521 return -EINVAL;
523 /* copy eBPF program license from user space */
524 if (strncpy_from_user(license, u64_to_ptr(attr->license),
525 sizeof(license) - 1) < 0)
526 return -EFAULT;
527 license[sizeof(license) - 1] = 0;
529 /* eBPF programs must be GPL compatible to use GPL-ed functions */
530 is_gpl = license_is_gpl_compatible(license);
532 if (attr->insn_cnt >= BPF_MAXINSNS)
533 return -EINVAL;
535 if (type == BPF_PROG_TYPE_KPROBE &&
536 attr->kern_version != LINUX_VERSION_CODE)
537 return -EINVAL;
539 /* plain bpf_prog allocation */
540 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
541 if (!prog)
542 return -ENOMEM;
544 prog->len = attr->insn_cnt;
546 err = -EFAULT;
547 if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
548 prog->len * sizeof(struct bpf_insn)) != 0)
549 goto free_prog;
551 prog->orig_prog = NULL;
552 prog->jited = false;
554 atomic_set(&prog->aux->refcnt, 1);
555 prog->gpl_compatible = is_gpl;
557 /* find program type: socket_filter vs tracing_filter */
558 err = find_prog_type(type, prog);
559 if (err < 0)
560 goto free_prog;
562 /* run eBPF verifier */
563 err = bpf_check(&prog, attr);
564 if (err < 0)
565 goto free_used_maps;
567 /* fixup BPF_CALL->imm field */
568 fixup_bpf_calls(prog);
570 /* eBPF program is ready to be JITed */
571 err = bpf_prog_select_runtime(prog);
572 if (err < 0)
573 goto free_used_maps;
575 err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
576 if (err < 0)
577 /* failed to allocate fd */
578 goto free_used_maps;
580 return err;
582 free_used_maps:
583 free_used_maps(prog->aux);
584 free_prog:
585 bpf_prog_free(prog);
586 return err;
589 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
591 union bpf_attr attr = {};
592 int err;
594 /* the syscall is limited to root temporarily. This restriction will be
595 * lifted when security audit is clean. Note that eBPF+tracing must have
596 * this restriction, since it may pass kernel data to user space
598 if (!capable(CAP_SYS_ADMIN))
599 return -EPERM;
601 if (!access_ok(VERIFY_READ, uattr, 1))
602 return -EFAULT;
604 if (size > PAGE_SIZE) /* silly large */
605 return -E2BIG;
607 /* If we're handed a bigger struct than we know of,
608 * ensure all the unknown bits are 0 - i.e. new
609 * user-space does not rely on any kernel feature
610 * extensions we dont know about yet.
612 if (size > sizeof(attr)) {
613 unsigned char __user *addr;
614 unsigned char __user *end;
615 unsigned char val;
617 addr = (void __user *)uattr + sizeof(attr);
618 end = (void __user *)uattr + size;
620 for (; addr < end; addr++) {
621 err = get_user(val, addr);
622 if (err)
623 return err;
624 if (val)
625 return -E2BIG;
627 size = sizeof(attr);
630 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
631 if (copy_from_user(&attr, uattr, size) != 0)
632 return -EFAULT;
634 switch (cmd) {
635 case BPF_MAP_CREATE:
636 err = map_create(&attr);
637 break;
638 case BPF_MAP_LOOKUP_ELEM:
639 err = map_lookup_elem(&attr);
640 break;
641 case BPF_MAP_UPDATE_ELEM:
642 err = map_update_elem(&attr);
643 break;
644 case BPF_MAP_DELETE_ELEM:
645 err = map_delete_elem(&attr);
646 break;
647 case BPF_MAP_GET_NEXT_KEY:
648 err = map_get_next_key(&attr);
649 break;
650 case BPF_PROG_LOAD:
651 err = bpf_prog_load(&attr);
652 break;
653 default:
654 err = -EINVAL;
655 break;
658 return err;