Linux 4.9.243
[linux/fpc-iii.git] / kernel / bpf / syscall.c
blobe10314223cbfecc11fb8de20230e3cdfe48c0035
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmzone.h>
17 #include <linux/anon_inodes.h>
18 #include <linux/file.h>
19 #include <linux/license.h>
20 #include <linux/filter.h>
21 #include <linux/version.h>
23 DEFINE_PER_CPU(int, bpf_prog_active);
25 int sysctl_unprivileged_bpf_disabled __read_mostly;
27 static LIST_HEAD(bpf_map_types);
29 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
31 struct bpf_map_type_list *tl;
32 struct bpf_map *map;
34 list_for_each_entry(tl, &bpf_map_types, list_node) {
35 if (tl->type == attr->map_type) {
36 map = tl->ops->map_alloc(attr);
37 if (IS_ERR(map))
38 return map;
39 map->ops = tl->ops;
40 map->map_type = attr->map_type;
41 return map;
44 return ERR_PTR(-EINVAL);
47 /* boot time registration of different map implementations */
48 void bpf_register_map_type(struct bpf_map_type_list *tl)
50 list_add(&tl->list_node, &bpf_map_types);
53 void *bpf_map_area_alloc(size_t size)
55 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
56 * trigger under memory pressure as we really just want to
57 * fail instead.
59 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
60 void *area;
62 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
63 area = kmalloc(size, GFP_USER | flags);
64 if (area != NULL)
65 return area;
68 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
69 PAGE_KERNEL);
72 void bpf_map_area_free(void *area)
74 kvfree(area);
77 int bpf_map_precharge_memlock(u32 pages)
79 struct user_struct *user = get_current_user();
80 unsigned long memlock_limit, cur;
82 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
83 cur = atomic_long_read(&user->locked_vm);
84 free_uid(user);
85 if (cur + pages > memlock_limit)
86 return -EPERM;
87 return 0;
90 static int bpf_map_charge_memlock(struct bpf_map *map)
92 struct user_struct *user = get_current_user();
93 unsigned long memlock_limit;
95 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
97 atomic_long_add(map->pages, &user->locked_vm);
99 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
100 atomic_long_sub(map->pages, &user->locked_vm);
101 free_uid(user);
102 return -EPERM;
104 map->user = user;
105 return 0;
108 static void bpf_map_uncharge_memlock(struct bpf_map *map)
110 struct user_struct *user = map->user;
112 atomic_long_sub(map->pages, &user->locked_vm);
113 free_uid(user);
116 /* called from workqueue */
117 static void bpf_map_free_deferred(struct work_struct *work)
119 struct bpf_map *map = container_of(work, struct bpf_map, work);
121 bpf_map_uncharge_memlock(map);
122 /* implementation dependent freeing */
123 map->ops->map_free(map);
126 static void bpf_map_put_uref(struct bpf_map *map)
128 if (atomic_dec_and_test(&map->usercnt)) {
129 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
130 bpf_fd_array_map_clear(map);
134 /* decrement map refcnt and schedule it for freeing via workqueue
135 * (unrelying map implementation ops->map_free() might sleep)
137 void bpf_map_put(struct bpf_map *map)
139 if (atomic_dec_and_test(&map->refcnt)) {
140 INIT_WORK(&map->work, bpf_map_free_deferred);
141 schedule_work(&map->work);
145 void bpf_map_put_with_uref(struct bpf_map *map)
147 bpf_map_put_uref(map);
148 bpf_map_put(map);
151 static int bpf_map_release(struct inode *inode, struct file *filp)
153 struct bpf_map *map = filp->private_data;
155 if (map->ops->map_release)
156 map->ops->map_release(map, filp);
158 bpf_map_put_with_uref(map);
159 return 0;
162 #ifdef CONFIG_PROC_FS
163 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
165 const struct bpf_map *map = filp->private_data;
167 seq_printf(m,
168 "map_type:\t%u\n"
169 "key_size:\t%u\n"
170 "value_size:\t%u\n"
171 "max_entries:\t%u\n"
172 "map_flags:\t%#x\n",
173 map->map_type,
174 map->key_size,
175 map->value_size,
176 map->max_entries,
177 map->map_flags);
179 #endif
181 static const struct file_operations bpf_map_fops = {
182 #ifdef CONFIG_PROC_FS
183 .show_fdinfo = bpf_map_show_fdinfo,
184 #endif
185 .release = bpf_map_release,
188 int bpf_map_new_fd(struct bpf_map *map)
190 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
191 O_RDWR | O_CLOEXEC);
194 /* helper macro to check that unused fields 'union bpf_attr' are zero */
195 #define CHECK_ATTR(CMD) \
196 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
197 sizeof(attr->CMD##_LAST_FIELD), 0, \
198 sizeof(*attr) - \
199 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
200 sizeof(attr->CMD##_LAST_FIELD)) != NULL
202 #define BPF_MAP_CREATE_LAST_FIELD map_flags
203 /* called via syscall */
204 static int map_create(union bpf_attr *attr)
206 struct bpf_map *map;
207 int err;
209 err = CHECK_ATTR(BPF_MAP_CREATE);
210 if (err)
211 return -EINVAL;
213 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
214 map = find_and_alloc_map(attr);
215 if (IS_ERR(map))
216 return PTR_ERR(map);
218 atomic_set(&map->refcnt, 1);
219 atomic_set(&map->usercnt, 1);
221 err = bpf_map_charge_memlock(map);
222 if (err)
223 goto free_map_nouncharge;
225 err = bpf_map_new_fd(map);
226 if (err < 0)
227 /* failed to allocate fd */
228 goto free_map;
230 return err;
232 free_map:
233 bpf_map_uncharge_memlock(map);
234 free_map_nouncharge:
235 map->ops->map_free(map);
236 return err;
239 /* if error is returned, fd is released.
240 * On success caller should complete fd access with matching fdput()
242 struct bpf_map *__bpf_map_get(struct fd f)
244 if (!f.file)
245 return ERR_PTR(-EBADF);
246 if (f.file->f_op != &bpf_map_fops) {
247 fdput(f);
248 return ERR_PTR(-EINVAL);
251 return f.file->private_data;
254 /* prog's and map's refcnt limit */
255 #define BPF_MAX_REFCNT 32768
257 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
259 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
260 atomic_dec(&map->refcnt);
261 return ERR_PTR(-EBUSY);
263 if (uref)
264 atomic_inc(&map->usercnt);
265 return map;
268 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
270 struct fd f = fdget(ufd);
271 struct bpf_map *map;
273 map = __bpf_map_get(f);
274 if (IS_ERR(map))
275 return map;
277 map = bpf_map_inc(map, true);
278 fdput(f);
280 return map;
283 /* helper to convert user pointers passed inside __aligned_u64 fields */
284 static void __user *u64_to_ptr(__u64 val)
286 return (void __user *) (unsigned long) val;
289 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
291 return -ENOTSUPP;
294 /* last field in 'union bpf_attr' used by this command */
295 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
297 static int map_lookup_elem(union bpf_attr *attr)
299 void __user *ukey = u64_to_ptr(attr->key);
300 void __user *uvalue = u64_to_ptr(attr->value);
301 int ufd = attr->map_fd;
302 struct bpf_map *map;
303 void *key, *value, *ptr;
304 u32 value_size;
305 struct fd f;
306 int err;
308 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
309 return -EINVAL;
311 f = fdget(ufd);
312 map = __bpf_map_get(f);
313 if (IS_ERR(map))
314 return PTR_ERR(map);
316 err = -ENOMEM;
317 key = kmalloc(map->key_size, GFP_USER);
318 if (!key)
319 goto err_put;
321 err = -EFAULT;
322 if (copy_from_user(key, ukey, map->key_size) != 0)
323 goto free_key;
325 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
326 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
327 value_size = round_up(map->value_size, 8) * num_possible_cpus();
328 else
329 value_size = map->value_size;
331 err = -ENOMEM;
332 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
333 if (!value)
334 goto free_key;
336 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
337 err = bpf_percpu_hash_copy(map, key, value);
338 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
339 err = bpf_percpu_array_copy(map, key, value);
340 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
341 err = bpf_stackmap_copy(map, key, value);
342 } else {
343 rcu_read_lock();
344 ptr = map->ops->map_lookup_elem(map, key);
345 if (ptr)
346 memcpy(value, ptr, value_size);
347 rcu_read_unlock();
348 err = ptr ? 0 : -ENOENT;
351 if (err)
352 goto free_value;
354 err = -EFAULT;
355 if (copy_to_user(uvalue, value, value_size) != 0)
356 goto free_value;
358 err = 0;
360 free_value:
361 kfree(value);
362 free_key:
363 kfree(key);
364 err_put:
365 fdput(f);
366 return err;
369 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
371 static int map_update_elem(union bpf_attr *attr)
373 void __user *ukey = u64_to_ptr(attr->key);
374 void __user *uvalue = u64_to_ptr(attr->value);
375 int ufd = attr->map_fd;
376 struct bpf_map *map;
377 void *key, *value;
378 u32 value_size;
379 struct fd f;
380 int err;
382 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
383 return -EINVAL;
385 f = fdget(ufd);
386 map = __bpf_map_get(f);
387 if (IS_ERR(map))
388 return PTR_ERR(map);
390 err = -ENOMEM;
391 key = kmalloc(map->key_size, GFP_USER);
392 if (!key)
393 goto err_put;
395 err = -EFAULT;
396 if (copy_from_user(key, ukey, map->key_size) != 0)
397 goto free_key;
399 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
400 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
401 value_size = round_up(map->value_size, 8) * num_possible_cpus();
402 else
403 value_size = map->value_size;
405 err = -ENOMEM;
406 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
407 if (!value)
408 goto free_key;
410 err = -EFAULT;
411 if (copy_from_user(value, uvalue, value_size) != 0)
412 goto free_value;
414 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
415 * inside bpf map update or delete otherwise deadlocks are possible
417 preempt_disable();
418 __this_cpu_inc(bpf_prog_active);
419 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
420 err = bpf_percpu_hash_update(map, key, value, attr->flags);
421 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
422 err = bpf_percpu_array_update(map, key, value, attr->flags);
423 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
424 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
425 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
426 rcu_read_lock();
427 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
428 attr->flags);
429 rcu_read_unlock();
430 } else {
431 rcu_read_lock();
432 err = map->ops->map_update_elem(map, key, value, attr->flags);
433 rcu_read_unlock();
435 __this_cpu_dec(bpf_prog_active);
436 preempt_enable();
438 free_value:
439 kfree(value);
440 free_key:
441 kfree(key);
442 err_put:
443 fdput(f);
444 return err;
447 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
449 static int map_delete_elem(union bpf_attr *attr)
451 void __user *ukey = u64_to_ptr(attr->key);
452 int ufd = attr->map_fd;
453 struct bpf_map *map;
454 struct fd f;
455 void *key;
456 int err;
458 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
459 return -EINVAL;
461 f = fdget(ufd);
462 map = __bpf_map_get(f);
463 if (IS_ERR(map))
464 return PTR_ERR(map);
466 err = -ENOMEM;
467 key = kmalloc(map->key_size, GFP_USER);
468 if (!key)
469 goto err_put;
471 err = -EFAULT;
472 if (copy_from_user(key, ukey, map->key_size) != 0)
473 goto free_key;
475 preempt_disable();
476 __this_cpu_inc(bpf_prog_active);
477 rcu_read_lock();
478 err = map->ops->map_delete_elem(map, key);
479 rcu_read_unlock();
480 __this_cpu_dec(bpf_prog_active);
481 preempt_enable();
483 free_key:
484 kfree(key);
485 err_put:
486 fdput(f);
487 return err;
490 /* last field in 'union bpf_attr' used by this command */
491 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
493 static int map_get_next_key(union bpf_attr *attr)
495 void __user *ukey = u64_to_ptr(attr->key);
496 void __user *unext_key = u64_to_ptr(attr->next_key);
497 int ufd = attr->map_fd;
498 struct bpf_map *map;
499 void *key, *next_key;
500 struct fd f;
501 int err;
503 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
504 return -EINVAL;
506 f = fdget(ufd);
507 map = __bpf_map_get(f);
508 if (IS_ERR(map))
509 return PTR_ERR(map);
511 if (ukey) {
512 err = -ENOMEM;
513 key = kmalloc(map->key_size, GFP_USER);
514 if (!key)
515 goto err_put;
517 err = -EFAULT;
518 if (copy_from_user(key, ukey, map->key_size) != 0)
519 goto free_key;
520 } else {
521 key = NULL;
524 err = -ENOMEM;
525 next_key = kmalloc(map->key_size, GFP_USER);
526 if (!next_key)
527 goto free_key;
529 rcu_read_lock();
530 err = map->ops->map_get_next_key(map, key, next_key);
531 rcu_read_unlock();
532 if (err)
533 goto free_next_key;
535 err = -EFAULT;
536 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
537 goto free_next_key;
539 err = 0;
541 free_next_key:
542 kfree(next_key);
543 free_key:
544 kfree(key);
545 err_put:
546 fdput(f);
547 return err;
550 static LIST_HEAD(bpf_prog_types);
552 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
554 struct bpf_prog_type_list *tl;
556 list_for_each_entry(tl, &bpf_prog_types, list_node) {
557 if (tl->type == type) {
558 prog->aux->ops = tl->ops;
559 prog->type = type;
560 return 0;
564 return -EINVAL;
567 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
569 list_add(&tl->list_node, &bpf_prog_types);
572 /* drop refcnt on maps used by eBPF program and free auxilary data */
573 static void free_used_maps(struct bpf_prog_aux *aux)
575 int i;
577 for (i = 0; i < aux->used_map_cnt; i++)
578 bpf_map_put(aux->used_maps[i]);
580 kfree(aux->used_maps);
583 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
585 struct user_struct *user = get_current_user();
586 unsigned long memlock_limit;
588 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
590 atomic_long_add(prog->pages, &user->locked_vm);
591 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
592 atomic_long_sub(prog->pages, &user->locked_vm);
593 free_uid(user);
594 return -EPERM;
596 prog->aux->user = user;
597 return 0;
600 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
602 struct user_struct *user = prog->aux->user;
604 atomic_long_sub(prog->pages, &user->locked_vm);
605 free_uid(user);
608 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
610 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
612 free_used_maps(aux);
613 bpf_prog_uncharge_memlock(aux->prog);
614 bpf_prog_free(aux->prog);
617 void bpf_prog_put(struct bpf_prog *prog)
619 if (atomic_dec_and_test(&prog->aux->refcnt))
620 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
622 EXPORT_SYMBOL_GPL(bpf_prog_put);
624 static int bpf_prog_release(struct inode *inode, struct file *filp)
626 struct bpf_prog *prog = filp->private_data;
628 bpf_prog_put(prog);
629 return 0;
632 static const struct file_operations bpf_prog_fops = {
633 .release = bpf_prog_release,
636 int bpf_prog_new_fd(struct bpf_prog *prog)
638 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
639 O_RDWR | O_CLOEXEC);
642 static struct bpf_prog *____bpf_prog_get(struct fd f)
644 if (!f.file)
645 return ERR_PTR(-EBADF);
646 if (f.file->f_op != &bpf_prog_fops) {
647 fdput(f);
648 return ERR_PTR(-EINVAL);
651 return f.file->private_data;
654 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
656 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
657 atomic_sub(i, &prog->aux->refcnt);
658 return ERR_PTR(-EBUSY);
660 return prog;
662 EXPORT_SYMBOL_GPL(bpf_prog_add);
664 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
666 return bpf_prog_add(prog, 1);
669 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
671 struct fd f = fdget(ufd);
672 struct bpf_prog *prog;
674 prog = ____bpf_prog_get(f);
675 if (IS_ERR(prog))
676 return prog;
677 if (type && prog->type != *type) {
678 prog = ERR_PTR(-EINVAL);
679 goto out;
682 prog = bpf_prog_inc(prog);
683 out:
684 fdput(f);
685 return prog;
688 struct bpf_prog *bpf_prog_get(u32 ufd)
690 return __bpf_prog_get(ufd, NULL);
693 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
695 return __bpf_prog_get(ufd, &type);
697 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
699 /* last field in 'union bpf_attr' used by this command */
700 #define BPF_PROG_LOAD_LAST_FIELD kern_version
702 static int bpf_prog_load(union bpf_attr *attr)
704 enum bpf_prog_type type = attr->prog_type;
705 struct bpf_prog *prog;
706 int err;
707 char license[128];
708 bool is_gpl;
710 if (CHECK_ATTR(BPF_PROG_LOAD))
711 return -EINVAL;
713 /* copy eBPF program license from user space */
714 if (strncpy_from_user(license, u64_to_ptr(attr->license),
715 sizeof(license) - 1) < 0)
716 return -EFAULT;
717 license[sizeof(license) - 1] = 0;
719 /* eBPF programs must be GPL compatible to use GPL-ed functions */
720 is_gpl = license_is_gpl_compatible(license);
722 if (attr->insn_cnt >= BPF_MAXINSNS)
723 return -EINVAL;
725 if (type == BPF_PROG_TYPE_KPROBE &&
726 attr->kern_version != LINUX_VERSION_CODE)
727 return -EINVAL;
729 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
730 return -EPERM;
732 /* plain bpf_prog allocation */
733 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
734 if (!prog)
735 return -ENOMEM;
737 err = bpf_prog_charge_memlock(prog);
738 if (err)
739 goto free_prog_nouncharge;
741 prog->len = attr->insn_cnt;
743 err = -EFAULT;
744 if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
745 prog->len * sizeof(struct bpf_insn)) != 0)
746 goto free_prog;
748 prog->orig_prog = NULL;
749 prog->jited = 0;
751 atomic_set(&prog->aux->refcnt, 1);
752 prog->gpl_compatible = is_gpl ? 1 : 0;
754 /* find program type: socket_filter vs tracing_filter */
755 err = find_prog_type(type, prog);
756 if (err < 0)
757 goto free_prog;
759 /* run eBPF verifier */
760 err = bpf_check(&prog, attr);
761 if (err < 0)
762 goto free_used_maps;
764 /* eBPF program is ready to be JITed */
765 prog = bpf_prog_select_runtime(prog, &err);
766 if (err < 0)
767 goto free_used_maps;
769 err = bpf_prog_new_fd(prog);
770 if (err < 0)
771 /* failed to allocate fd */
772 goto free_used_maps;
774 return err;
776 free_used_maps:
777 free_used_maps(prog->aux);
778 free_prog:
779 bpf_prog_uncharge_memlock(prog);
780 free_prog_nouncharge:
781 bpf_prog_free(prog);
782 return err;
785 #define BPF_OBJ_LAST_FIELD bpf_fd
787 static int bpf_obj_pin(const union bpf_attr *attr)
789 if (CHECK_ATTR(BPF_OBJ))
790 return -EINVAL;
792 return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
795 static int bpf_obj_get(const union bpf_attr *attr)
797 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
798 return -EINVAL;
800 return bpf_obj_get_user(u64_to_ptr(attr->pathname));
803 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
805 union bpf_attr attr;
806 int err;
808 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
809 return -EPERM;
811 if (!access_ok(VERIFY_READ, uattr, 1))
812 return -EFAULT;
814 if (size > PAGE_SIZE) /* silly large */
815 return -E2BIG;
817 /* If we're handed a bigger struct than we know of,
818 * ensure all the unknown bits are 0 - i.e. new
819 * user-space does not rely on any kernel feature
820 * extensions we dont know about yet.
822 if (size > sizeof(attr)) {
823 unsigned char __user *addr;
824 unsigned char __user *end;
825 unsigned char val;
827 addr = (void __user *)uattr + sizeof(attr);
828 end = (void __user *)uattr + size;
830 for (; addr < end; addr++) {
831 err = get_user(val, addr);
832 if (err)
833 return err;
834 if (val)
835 return -E2BIG;
837 size = sizeof(attr);
840 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
841 memset(&attr, 0, sizeof(attr));
842 if (copy_from_user(&attr, uattr, size) != 0)
843 return -EFAULT;
845 switch (cmd) {
846 case BPF_MAP_CREATE:
847 err = map_create(&attr);
848 break;
849 case BPF_MAP_LOOKUP_ELEM:
850 err = map_lookup_elem(&attr);
851 break;
852 case BPF_MAP_UPDATE_ELEM:
853 err = map_update_elem(&attr);
854 break;
855 case BPF_MAP_DELETE_ELEM:
856 err = map_delete_elem(&attr);
857 break;
858 case BPF_MAP_GET_NEXT_KEY:
859 err = map_get_next_key(&attr);
860 break;
861 case BPF_PROG_LOAD:
862 err = bpf_prog_load(&attr);
863 break;
864 case BPF_OBJ_PIN:
865 err = bpf_obj_pin(&attr);
866 break;
867 case BPF_OBJ_GET:
868 err = bpf_obj_get(&attr);
869 break;
870 default:
871 err = -EINVAL;
872 break;
875 return err;