perf tools: Be consistent on the type of map->symbols[] interator
[linux/fpc-iii.git] / kernel / bpf / syscall.c
blob19b6129eab2383bbb80f1c259b1511269409a232
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmzone.h>
17 #include <linux/anon_inodes.h>
18 #include <linux/file.h>
19 #include <linux/license.h>
20 #include <linux/filter.h>
21 #include <linux/version.h>
22 #include <linux/kernel.h>
24 DEFINE_PER_CPU(int, bpf_prog_active);
26 int sysctl_unprivileged_bpf_disabled __read_mostly;
28 static LIST_HEAD(bpf_map_types);
30 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
32 struct bpf_map_type_list *tl;
33 struct bpf_map *map;
35 list_for_each_entry(tl, &bpf_map_types, list_node) {
36 if (tl->type == attr->map_type) {
37 map = tl->ops->map_alloc(attr);
38 if (IS_ERR(map))
39 return map;
40 map->ops = tl->ops;
41 map->map_type = attr->map_type;
42 return map;
45 return ERR_PTR(-EINVAL);
48 /* boot time registration of different map implementations */
49 void bpf_register_map_type(struct bpf_map_type_list *tl)
51 list_add(&tl->list_node, &bpf_map_types);
54 void *bpf_map_area_alloc(size_t size)
56 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
57 * trigger under memory pressure as we really just want to
58 * fail instead.
60 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
61 void *area;
63 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
64 area = kmalloc(size, GFP_USER | flags);
65 if (area != NULL)
66 return area;
69 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
70 PAGE_KERNEL);
73 void bpf_map_area_free(void *area)
75 kvfree(area);
78 int bpf_map_precharge_memlock(u32 pages)
80 struct user_struct *user = get_current_user();
81 unsigned long memlock_limit, cur;
83 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
84 cur = atomic_long_read(&user->locked_vm);
85 free_uid(user);
86 if (cur + pages > memlock_limit)
87 return -EPERM;
88 return 0;
91 static int bpf_map_charge_memlock(struct bpf_map *map)
93 struct user_struct *user = get_current_user();
94 unsigned long memlock_limit;
96 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
98 atomic_long_add(map->pages, &user->locked_vm);
100 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
101 atomic_long_sub(map->pages, &user->locked_vm);
102 free_uid(user);
103 return -EPERM;
105 map->user = user;
106 return 0;
109 static void bpf_map_uncharge_memlock(struct bpf_map *map)
111 struct user_struct *user = map->user;
113 atomic_long_sub(map->pages, &user->locked_vm);
114 free_uid(user);
117 /* called from workqueue */
118 static void bpf_map_free_deferred(struct work_struct *work)
120 struct bpf_map *map = container_of(work, struct bpf_map, work);
122 bpf_map_uncharge_memlock(map);
123 /* implementation dependent freeing */
124 map->ops->map_free(map);
127 static void bpf_map_put_uref(struct bpf_map *map)
129 if (atomic_dec_and_test(&map->usercnt)) {
130 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
131 bpf_fd_array_map_clear(map);
135 /* decrement map refcnt and schedule it for freeing via workqueue
136 * (unrelying map implementation ops->map_free() might sleep)
138 void bpf_map_put(struct bpf_map *map)
140 if (atomic_dec_and_test(&map->refcnt)) {
141 INIT_WORK(&map->work, bpf_map_free_deferred);
142 schedule_work(&map->work);
146 void bpf_map_put_with_uref(struct bpf_map *map)
148 bpf_map_put_uref(map);
149 bpf_map_put(map);
152 static int bpf_map_release(struct inode *inode, struct file *filp)
154 struct bpf_map *map = filp->private_data;
156 if (map->ops->map_release)
157 map->ops->map_release(map, filp);
159 bpf_map_put_with_uref(map);
160 return 0;
163 #ifdef CONFIG_PROC_FS
164 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
166 const struct bpf_map *map = filp->private_data;
167 const struct bpf_array *array;
168 u32 owner_prog_type = 0;
170 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
171 array = container_of(map, struct bpf_array, map);
172 owner_prog_type = array->owner_prog_type;
175 seq_printf(m,
176 "map_type:\t%u\n"
177 "key_size:\t%u\n"
178 "value_size:\t%u\n"
179 "max_entries:\t%u\n"
180 "map_flags:\t%#x\n"
181 "memlock:\t%llu\n",
182 map->map_type,
183 map->key_size,
184 map->value_size,
185 map->max_entries,
186 map->map_flags,
187 map->pages * 1ULL << PAGE_SHIFT);
189 if (owner_prog_type)
190 seq_printf(m, "owner_prog_type:\t%u\n",
191 owner_prog_type);
193 #endif
195 static const struct file_operations bpf_map_fops = {
196 #ifdef CONFIG_PROC_FS
197 .show_fdinfo = bpf_map_show_fdinfo,
198 #endif
199 .release = bpf_map_release,
202 int bpf_map_new_fd(struct bpf_map *map)
204 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
205 O_RDWR | O_CLOEXEC);
208 /* helper macro to check that unused fields 'union bpf_attr' are zero */
209 #define CHECK_ATTR(CMD) \
210 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
211 sizeof(attr->CMD##_LAST_FIELD), 0, \
212 sizeof(*attr) - \
213 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
214 sizeof(attr->CMD##_LAST_FIELD)) != NULL
216 #define BPF_MAP_CREATE_LAST_FIELD map_flags
217 /* called via syscall */
218 static int map_create(union bpf_attr *attr)
220 struct bpf_map *map;
221 int err;
223 err = CHECK_ATTR(BPF_MAP_CREATE);
224 if (err)
225 return -EINVAL;
227 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
228 map = find_and_alloc_map(attr);
229 if (IS_ERR(map))
230 return PTR_ERR(map);
232 atomic_set(&map->refcnt, 1);
233 atomic_set(&map->usercnt, 1);
235 err = bpf_map_charge_memlock(map);
236 if (err)
237 goto free_map_nouncharge;
239 err = bpf_map_new_fd(map);
240 if (err < 0)
241 /* failed to allocate fd */
242 goto free_map;
244 return err;
246 free_map:
247 bpf_map_uncharge_memlock(map);
248 free_map_nouncharge:
249 map->ops->map_free(map);
250 return err;
253 /* if error is returned, fd is released.
254 * On success caller should complete fd access with matching fdput()
256 struct bpf_map *__bpf_map_get(struct fd f)
258 if (!f.file)
259 return ERR_PTR(-EBADF);
260 if (f.file->f_op != &bpf_map_fops) {
261 fdput(f);
262 return ERR_PTR(-EINVAL);
265 return f.file->private_data;
268 /* prog's and map's refcnt limit */
269 #define BPF_MAX_REFCNT 32768
271 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
273 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
274 atomic_dec(&map->refcnt);
275 return ERR_PTR(-EBUSY);
277 if (uref)
278 atomic_inc(&map->usercnt);
279 return map;
282 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
284 struct fd f = fdget(ufd);
285 struct bpf_map *map;
287 map = __bpf_map_get(f);
288 if (IS_ERR(map))
289 return map;
291 map = bpf_map_inc(map, true);
292 fdput(f);
294 return map;
297 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
299 return -ENOTSUPP;
302 /* last field in 'union bpf_attr' used by this command */
303 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
305 static int map_lookup_elem(union bpf_attr *attr)
307 void __user *ukey = u64_to_user_ptr(attr->key);
308 void __user *uvalue = u64_to_user_ptr(attr->value);
309 int ufd = attr->map_fd;
310 struct bpf_map *map;
311 void *key, *value, *ptr;
312 u32 value_size;
313 struct fd f;
314 int err;
316 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
317 return -EINVAL;
319 f = fdget(ufd);
320 map = __bpf_map_get(f);
321 if (IS_ERR(map))
322 return PTR_ERR(map);
324 err = -ENOMEM;
325 key = kmalloc(map->key_size, GFP_USER);
326 if (!key)
327 goto err_put;
329 err = -EFAULT;
330 if (copy_from_user(key, ukey, map->key_size) != 0)
331 goto free_key;
333 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
334 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
335 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
336 value_size = round_up(map->value_size, 8) * num_possible_cpus();
337 else
338 value_size = map->value_size;
340 err = -ENOMEM;
341 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
342 if (!value)
343 goto free_key;
345 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
346 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
347 err = bpf_percpu_hash_copy(map, key, value);
348 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
349 err = bpf_percpu_array_copy(map, key, value);
350 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
351 err = bpf_stackmap_copy(map, key, value);
352 } else {
353 rcu_read_lock();
354 ptr = map->ops->map_lookup_elem(map, key);
355 if (ptr)
356 memcpy(value, ptr, value_size);
357 rcu_read_unlock();
358 err = ptr ? 0 : -ENOENT;
361 if (err)
362 goto free_value;
364 err = -EFAULT;
365 if (copy_to_user(uvalue, value, value_size) != 0)
366 goto free_value;
368 err = 0;
370 free_value:
371 kfree(value);
372 free_key:
373 kfree(key);
374 err_put:
375 fdput(f);
376 return err;
379 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
381 static int map_update_elem(union bpf_attr *attr)
383 void __user *ukey = u64_to_user_ptr(attr->key);
384 void __user *uvalue = u64_to_user_ptr(attr->value);
385 int ufd = attr->map_fd;
386 struct bpf_map *map;
387 void *key, *value;
388 u32 value_size;
389 struct fd f;
390 int err;
392 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
393 return -EINVAL;
395 f = fdget(ufd);
396 map = __bpf_map_get(f);
397 if (IS_ERR(map))
398 return PTR_ERR(map);
400 err = -ENOMEM;
401 key = kmalloc(map->key_size, GFP_USER);
402 if (!key)
403 goto err_put;
405 err = -EFAULT;
406 if (copy_from_user(key, ukey, map->key_size) != 0)
407 goto free_key;
409 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
410 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
411 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
412 value_size = round_up(map->value_size, 8) * num_possible_cpus();
413 else
414 value_size = map->value_size;
416 err = -ENOMEM;
417 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
418 if (!value)
419 goto free_key;
421 err = -EFAULT;
422 if (copy_from_user(value, uvalue, value_size) != 0)
423 goto free_value;
425 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
426 * inside bpf map update or delete otherwise deadlocks are possible
428 preempt_disable();
429 __this_cpu_inc(bpf_prog_active);
430 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
431 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
432 err = bpf_percpu_hash_update(map, key, value, attr->flags);
433 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
434 err = bpf_percpu_array_update(map, key, value, attr->flags);
435 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
436 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
437 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
438 rcu_read_lock();
439 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
440 attr->flags);
441 rcu_read_unlock();
442 } else {
443 rcu_read_lock();
444 err = map->ops->map_update_elem(map, key, value, attr->flags);
445 rcu_read_unlock();
447 __this_cpu_dec(bpf_prog_active);
448 preempt_enable();
450 free_value:
451 kfree(value);
452 free_key:
453 kfree(key);
454 err_put:
455 fdput(f);
456 return err;
459 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
461 static int map_delete_elem(union bpf_attr *attr)
463 void __user *ukey = u64_to_user_ptr(attr->key);
464 int ufd = attr->map_fd;
465 struct bpf_map *map;
466 struct fd f;
467 void *key;
468 int err;
470 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
471 return -EINVAL;
473 f = fdget(ufd);
474 map = __bpf_map_get(f);
475 if (IS_ERR(map))
476 return PTR_ERR(map);
478 err = -ENOMEM;
479 key = kmalloc(map->key_size, GFP_USER);
480 if (!key)
481 goto err_put;
483 err = -EFAULT;
484 if (copy_from_user(key, ukey, map->key_size) != 0)
485 goto free_key;
487 preempt_disable();
488 __this_cpu_inc(bpf_prog_active);
489 rcu_read_lock();
490 err = map->ops->map_delete_elem(map, key);
491 rcu_read_unlock();
492 __this_cpu_dec(bpf_prog_active);
493 preempt_enable();
495 free_key:
496 kfree(key);
497 err_put:
498 fdput(f);
499 return err;
502 /* last field in 'union bpf_attr' used by this command */
503 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
505 static int map_get_next_key(union bpf_attr *attr)
507 void __user *ukey = u64_to_user_ptr(attr->key);
508 void __user *unext_key = u64_to_user_ptr(attr->next_key);
509 int ufd = attr->map_fd;
510 struct bpf_map *map;
511 void *key, *next_key;
512 struct fd f;
513 int err;
515 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
516 return -EINVAL;
518 f = fdget(ufd);
519 map = __bpf_map_get(f);
520 if (IS_ERR(map))
521 return PTR_ERR(map);
523 err = -ENOMEM;
524 key = kmalloc(map->key_size, GFP_USER);
525 if (!key)
526 goto err_put;
528 err = -EFAULT;
529 if (copy_from_user(key, ukey, map->key_size) != 0)
530 goto free_key;
532 err = -ENOMEM;
533 next_key = kmalloc(map->key_size, GFP_USER);
534 if (!next_key)
535 goto free_key;
537 rcu_read_lock();
538 err = map->ops->map_get_next_key(map, key, next_key);
539 rcu_read_unlock();
540 if (err)
541 goto free_next_key;
543 err = -EFAULT;
544 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
545 goto free_next_key;
547 err = 0;
549 free_next_key:
550 kfree(next_key);
551 free_key:
552 kfree(key);
553 err_put:
554 fdput(f);
555 return err;
558 static LIST_HEAD(bpf_prog_types);
560 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
562 struct bpf_prog_type_list *tl;
564 list_for_each_entry(tl, &bpf_prog_types, list_node) {
565 if (tl->type == type) {
566 prog->aux->ops = tl->ops;
567 prog->type = type;
568 return 0;
572 return -EINVAL;
575 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
577 list_add(&tl->list_node, &bpf_prog_types);
580 /* fixup insn->imm field of bpf_call instructions:
581 * if (insn->imm == BPF_FUNC_map_lookup_elem)
582 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
583 * else if (insn->imm == BPF_FUNC_map_update_elem)
584 * insn->imm = bpf_map_update_elem - __bpf_call_base;
585 * else ...
587 * this function is called after eBPF program passed verification
589 static void fixup_bpf_calls(struct bpf_prog *prog)
591 const struct bpf_func_proto *fn;
592 int i;
594 for (i = 0; i < prog->len; i++) {
595 struct bpf_insn *insn = &prog->insnsi[i];
597 if (insn->code == (BPF_JMP | BPF_CALL)) {
598 /* we reach here when program has bpf_call instructions
599 * and it passed bpf_check(), means that
600 * ops->get_func_proto must have been supplied, check it
602 BUG_ON(!prog->aux->ops->get_func_proto);
604 if (insn->imm == BPF_FUNC_get_route_realm)
605 prog->dst_needed = 1;
606 if (insn->imm == BPF_FUNC_get_prandom_u32)
607 bpf_user_rnd_init_once();
608 if (insn->imm == BPF_FUNC_xdp_adjust_head)
609 prog->xdp_adjust_head = 1;
610 if (insn->imm == BPF_FUNC_tail_call) {
611 /* mark bpf_tail_call as different opcode
612 * to avoid conditional branch in
613 * interpeter for every normal call
614 * and to prevent accidental JITing by
615 * JIT compiler that doesn't support
616 * bpf_tail_call yet
618 insn->imm = 0;
619 insn->code |= BPF_X;
620 continue;
623 fn = prog->aux->ops->get_func_proto(insn->imm);
624 /* all functions that have prototype and verifier allowed
625 * programs to call them, must be real in-kernel functions
627 BUG_ON(!fn->func);
628 insn->imm = fn->func - __bpf_call_base;
633 /* drop refcnt on maps used by eBPF program and free auxilary data */
634 static void free_used_maps(struct bpf_prog_aux *aux)
636 int i;
638 for (i = 0; i < aux->used_map_cnt; i++)
639 bpf_map_put(aux->used_maps[i]);
641 kfree(aux->used_maps);
644 int __bpf_prog_charge(struct user_struct *user, u32 pages)
646 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
647 unsigned long user_bufs;
649 if (user) {
650 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
651 if (user_bufs > memlock_limit) {
652 atomic_long_sub(pages, &user->locked_vm);
653 return -EPERM;
657 return 0;
660 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
662 if (user)
663 atomic_long_sub(pages, &user->locked_vm);
666 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
668 struct user_struct *user = get_current_user();
669 int ret;
671 ret = __bpf_prog_charge(user, prog->pages);
672 if (ret) {
673 free_uid(user);
674 return ret;
677 prog->aux->user = user;
678 return 0;
681 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
683 struct user_struct *user = prog->aux->user;
685 __bpf_prog_uncharge(user, prog->pages);
686 free_uid(user);
689 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
691 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
693 free_used_maps(aux);
694 bpf_prog_uncharge_memlock(aux->prog);
695 bpf_prog_free(aux->prog);
698 void bpf_prog_put(struct bpf_prog *prog)
700 if (atomic_dec_and_test(&prog->aux->refcnt))
701 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
703 EXPORT_SYMBOL_GPL(bpf_prog_put);
705 static int bpf_prog_release(struct inode *inode, struct file *filp)
707 struct bpf_prog *prog = filp->private_data;
709 bpf_prog_put(prog);
710 return 0;
713 #ifdef CONFIG_PROC_FS
714 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
716 const struct bpf_prog *prog = filp->private_data;
717 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
719 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
720 seq_printf(m,
721 "prog_type:\t%u\n"
722 "prog_jited:\t%u\n"
723 "prog_tag:\t%s\n"
724 "memlock:\t%llu\n",
725 prog->type,
726 prog->jited,
727 prog_tag,
728 prog->pages * 1ULL << PAGE_SHIFT);
730 #endif
732 static const struct file_operations bpf_prog_fops = {
733 #ifdef CONFIG_PROC_FS
734 .show_fdinfo = bpf_prog_show_fdinfo,
735 #endif
736 .release = bpf_prog_release,
739 int bpf_prog_new_fd(struct bpf_prog *prog)
741 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
742 O_RDWR | O_CLOEXEC);
745 static struct bpf_prog *____bpf_prog_get(struct fd f)
747 if (!f.file)
748 return ERR_PTR(-EBADF);
749 if (f.file->f_op != &bpf_prog_fops) {
750 fdput(f);
751 return ERR_PTR(-EINVAL);
754 return f.file->private_data;
757 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
759 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
760 atomic_sub(i, &prog->aux->refcnt);
761 return ERR_PTR(-EBUSY);
763 return prog;
765 EXPORT_SYMBOL_GPL(bpf_prog_add);
767 void bpf_prog_sub(struct bpf_prog *prog, int i)
769 /* Only to be used for undoing previous bpf_prog_add() in some
770 * error path. We still know that another entity in our call
771 * path holds a reference to the program, thus atomic_sub() can
772 * be safely used in such cases!
774 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
776 EXPORT_SYMBOL_GPL(bpf_prog_sub);
778 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
780 return bpf_prog_add(prog, 1);
782 EXPORT_SYMBOL_GPL(bpf_prog_inc);
784 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
786 struct fd f = fdget(ufd);
787 struct bpf_prog *prog;
789 prog = ____bpf_prog_get(f);
790 if (IS_ERR(prog))
791 return prog;
792 if (type && prog->type != *type) {
793 prog = ERR_PTR(-EINVAL);
794 goto out;
797 prog = bpf_prog_inc(prog);
798 out:
799 fdput(f);
800 return prog;
803 struct bpf_prog *bpf_prog_get(u32 ufd)
805 return __bpf_prog_get(ufd, NULL);
808 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
810 return __bpf_prog_get(ufd, &type);
812 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
814 /* last field in 'union bpf_attr' used by this command */
815 #define BPF_PROG_LOAD_LAST_FIELD kern_version
817 static int bpf_prog_load(union bpf_attr *attr)
819 enum bpf_prog_type type = attr->prog_type;
820 struct bpf_prog *prog;
821 int err;
822 char license[128];
823 bool is_gpl;
825 if (CHECK_ATTR(BPF_PROG_LOAD))
826 return -EINVAL;
828 /* copy eBPF program license from user space */
829 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
830 sizeof(license) - 1) < 0)
831 return -EFAULT;
832 license[sizeof(license) - 1] = 0;
834 /* eBPF programs must be GPL compatible to use GPL-ed functions */
835 is_gpl = license_is_gpl_compatible(license);
837 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
838 return -E2BIG;
840 if (type == BPF_PROG_TYPE_KPROBE &&
841 attr->kern_version != LINUX_VERSION_CODE)
842 return -EINVAL;
844 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
845 return -EPERM;
847 /* plain bpf_prog allocation */
848 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
849 if (!prog)
850 return -ENOMEM;
852 err = bpf_prog_charge_memlock(prog);
853 if (err)
854 goto free_prog_nouncharge;
856 prog->len = attr->insn_cnt;
858 err = -EFAULT;
859 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
860 bpf_prog_insn_size(prog)) != 0)
861 goto free_prog;
863 prog->orig_prog = NULL;
864 prog->jited = 0;
866 atomic_set(&prog->aux->refcnt, 1);
867 prog->gpl_compatible = is_gpl ? 1 : 0;
869 /* find program type: socket_filter vs tracing_filter */
870 err = find_prog_type(type, prog);
871 if (err < 0)
872 goto free_prog;
874 /* run eBPF verifier */
875 err = bpf_check(&prog, attr);
876 if (err < 0)
877 goto free_used_maps;
879 /* fixup BPF_CALL->imm field */
880 fixup_bpf_calls(prog);
882 /* eBPF program is ready to be JITed */
883 prog = bpf_prog_select_runtime(prog, &err);
884 if (err < 0)
885 goto free_used_maps;
887 err = bpf_prog_new_fd(prog);
888 if (err < 0)
889 /* failed to allocate fd */
890 goto free_used_maps;
892 return err;
894 free_used_maps:
895 free_used_maps(prog->aux);
896 free_prog:
897 bpf_prog_uncharge_memlock(prog);
898 free_prog_nouncharge:
899 bpf_prog_free(prog);
900 return err;
903 #define BPF_OBJ_LAST_FIELD bpf_fd
905 static int bpf_obj_pin(const union bpf_attr *attr)
907 if (CHECK_ATTR(BPF_OBJ))
908 return -EINVAL;
910 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
913 static int bpf_obj_get(const union bpf_attr *attr)
915 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
916 return -EINVAL;
918 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
921 #ifdef CONFIG_CGROUP_BPF
923 #define BPF_PROG_ATTACH_LAST_FIELD attach_type
925 static int bpf_prog_attach(const union bpf_attr *attr)
927 struct bpf_prog *prog;
928 struct cgroup *cgrp;
929 enum bpf_prog_type ptype;
931 if (!capable(CAP_NET_ADMIN))
932 return -EPERM;
934 if (CHECK_ATTR(BPF_PROG_ATTACH))
935 return -EINVAL;
937 switch (attr->attach_type) {
938 case BPF_CGROUP_INET_INGRESS:
939 case BPF_CGROUP_INET_EGRESS:
940 ptype = BPF_PROG_TYPE_CGROUP_SKB;
941 break;
942 case BPF_CGROUP_INET_SOCK_CREATE:
943 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
944 break;
945 default:
946 return -EINVAL;
949 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
950 if (IS_ERR(prog))
951 return PTR_ERR(prog);
953 cgrp = cgroup_get_from_fd(attr->target_fd);
954 if (IS_ERR(cgrp)) {
955 bpf_prog_put(prog);
956 return PTR_ERR(cgrp);
959 cgroup_bpf_update(cgrp, prog, attr->attach_type);
960 cgroup_put(cgrp);
962 return 0;
965 #define BPF_PROG_DETACH_LAST_FIELD attach_type
967 static int bpf_prog_detach(const union bpf_attr *attr)
969 struct cgroup *cgrp;
971 if (!capable(CAP_NET_ADMIN))
972 return -EPERM;
974 if (CHECK_ATTR(BPF_PROG_DETACH))
975 return -EINVAL;
977 switch (attr->attach_type) {
978 case BPF_CGROUP_INET_INGRESS:
979 case BPF_CGROUP_INET_EGRESS:
980 case BPF_CGROUP_INET_SOCK_CREATE:
981 cgrp = cgroup_get_from_fd(attr->target_fd);
982 if (IS_ERR(cgrp))
983 return PTR_ERR(cgrp);
985 cgroup_bpf_update(cgrp, NULL, attr->attach_type);
986 cgroup_put(cgrp);
987 break;
989 default:
990 return -EINVAL;
993 return 0;
995 #endif /* CONFIG_CGROUP_BPF */
997 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
999 union bpf_attr attr = {};
1000 int err;
1002 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1003 return -EPERM;
1005 if (!access_ok(VERIFY_READ, uattr, 1))
1006 return -EFAULT;
1008 if (size > PAGE_SIZE) /* silly large */
1009 return -E2BIG;
1011 /* If we're handed a bigger struct than we know of,
1012 * ensure all the unknown bits are 0 - i.e. new
1013 * user-space does not rely on any kernel feature
1014 * extensions we dont know about yet.
1016 if (size > sizeof(attr)) {
1017 unsigned char __user *addr;
1018 unsigned char __user *end;
1019 unsigned char val;
1021 addr = (void __user *)uattr + sizeof(attr);
1022 end = (void __user *)uattr + size;
1024 for (; addr < end; addr++) {
1025 err = get_user(val, addr);
1026 if (err)
1027 return err;
1028 if (val)
1029 return -E2BIG;
1031 size = sizeof(attr);
1034 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1035 if (copy_from_user(&attr, uattr, size) != 0)
1036 return -EFAULT;
1038 switch (cmd) {
1039 case BPF_MAP_CREATE:
1040 err = map_create(&attr);
1041 break;
1042 case BPF_MAP_LOOKUP_ELEM:
1043 err = map_lookup_elem(&attr);
1044 break;
1045 case BPF_MAP_UPDATE_ELEM:
1046 err = map_update_elem(&attr);
1047 break;
1048 case BPF_MAP_DELETE_ELEM:
1049 err = map_delete_elem(&attr);
1050 break;
1051 case BPF_MAP_GET_NEXT_KEY:
1052 err = map_get_next_key(&attr);
1053 break;
1054 case BPF_PROG_LOAD:
1055 err = bpf_prog_load(&attr);
1056 break;
1057 case BPF_OBJ_PIN:
1058 err = bpf_obj_pin(&attr);
1059 break;
1060 case BPF_OBJ_GET:
1061 err = bpf_obj_get(&attr);
1062 break;
1064 #ifdef CONFIG_CGROUP_BPF
1065 case BPF_PROG_ATTACH:
1066 err = bpf_prog_attach(&attr);
1067 break;
1068 case BPF_PROG_DETACH:
1069 err = bpf_prog_detach(&attr);
1070 break;
1071 #endif
1073 default:
1074 err = -EINVAL;
1075 break;
1078 return err;