1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmzone.h>
17 #include <linux/anon_inodes.h>
18 #include <linux/file.h>
19 #include <linux/license.h>
20 #include <linux/filter.h>
21 #include <linux/version.h>
23 DEFINE_PER_CPU(int, bpf_prog_active
);
25 int sysctl_unprivileged_bpf_disabled __read_mostly
;
27 static LIST_HEAD(bpf_map_types
);
29 static struct bpf_map
*find_and_alloc_map(union bpf_attr
*attr
)
31 struct bpf_map_type_list
*tl
;
34 list_for_each_entry(tl
, &bpf_map_types
, list_node
) {
35 if (tl
->type
== attr
->map_type
) {
36 map
= tl
->ops
->map_alloc(attr
);
40 map
->map_type
= attr
->map_type
;
44 return ERR_PTR(-EINVAL
);
47 /* boot time registration of different map implementations */
48 void bpf_register_map_type(struct bpf_map_type_list
*tl
)
50 list_add(&tl
->list_node
, &bpf_map_types
);
53 void *bpf_map_area_alloc(size_t size
)
55 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
56 * trigger under memory pressure as we really just want to
59 const gfp_t flags
= __GFP_NOWARN
| __GFP_NORETRY
| __GFP_ZERO
;
62 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
)) {
63 area
= kmalloc(size
, GFP_USER
| flags
);
68 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
| flags
,
72 void bpf_map_area_free(void *area
)
77 int bpf_map_precharge_memlock(u32 pages
)
79 struct user_struct
*user
= get_current_user();
80 unsigned long memlock_limit
, cur
;
82 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
83 cur
= atomic_long_read(&user
->locked_vm
);
85 if (cur
+ pages
> memlock_limit
)
90 static int bpf_map_charge_memlock(struct bpf_map
*map
)
92 struct user_struct
*user
= get_current_user();
93 unsigned long memlock_limit
;
95 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
97 atomic_long_add(map
->pages
, &user
->locked_vm
);
99 if (atomic_long_read(&user
->locked_vm
) > memlock_limit
) {
100 atomic_long_sub(map
->pages
, &user
->locked_vm
);
108 static void bpf_map_uncharge_memlock(struct bpf_map
*map
)
110 struct user_struct
*user
= map
->user
;
112 atomic_long_sub(map
->pages
, &user
->locked_vm
);
116 /* called from workqueue */
117 static void bpf_map_free_deferred(struct work_struct
*work
)
119 struct bpf_map
*map
= container_of(work
, struct bpf_map
, work
);
121 bpf_map_uncharge_memlock(map
);
122 /* implementation dependent freeing */
123 map
->ops
->map_free(map
);
126 static void bpf_map_put_uref(struct bpf_map
*map
)
128 if (atomic_dec_and_test(&map
->usercnt
)) {
129 if (map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
)
130 bpf_fd_array_map_clear(map
);
134 /* decrement map refcnt and schedule it for freeing via workqueue
135 * (unrelying map implementation ops->map_free() might sleep)
137 void bpf_map_put(struct bpf_map
*map
)
139 if (atomic_dec_and_test(&map
->refcnt
)) {
140 INIT_WORK(&map
->work
, bpf_map_free_deferred
);
141 schedule_work(&map
->work
);
145 void bpf_map_put_with_uref(struct bpf_map
*map
)
147 bpf_map_put_uref(map
);
151 static int bpf_map_release(struct inode
*inode
, struct file
*filp
)
153 struct bpf_map
*map
= filp
->private_data
;
155 if (map
->ops
->map_release
)
156 map
->ops
->map_release(map
, filp
);
158 bpf_map_put_with_uref(map
);
162 #ifdef CONFIG_PROC_FS
163 static void bpf_map_show_fdinfo(struct seq_file
*m
, struct file
*filp
)
165 const struct bpf_map
*map
= filp
->private_data
;
181 static const struct file_operations bpf_map_fops
= {
182 #ifdef CONFIG_PROC_FS
183 .show_fdinfo
= bpf_map_show_fdinfo
,
185 .release
= bpf_map_release
,
188 int bpf_map_new_fd(struct bpf_map
*map
)
190 return anon_inode_getfd("bpf-map", &bpf_map_fops
, map
,
194 /* helper macro to check that unused fields 'union bpf_attr' are zero */
195 #define CHECK_ATTR(CMD) \
196 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
197 sizeof(attr->CMD##_LAST_FIELD), 0, \
199 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
200 sizeof(attr->CMD##_LAST_FIELD)) != NULL
202 #define BPF_MAP_CREATE_LAST_FIELD map_flags
203 /* called via syscall */
204 static int map_create(union bpf_attr
*attr
)
209 err
= CHECK_ATTR(BPF_MAP_CREATE
);
213 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
214 map
= find_and_alloc_map(attr
);
218 atomic_set(&map
->refcnt
, 1);
219 atomic_set(&map
->usercnt
, 1);
221 err
= bpf_map_charge_memlock(map
);
223 goto free_map_nouncharge
;
225 err
= bpf_map_new_fd(map
);
227 /* failed to allocate fd */
233 bpf_map_uncharge_memlock(map
);
235 map
->ops
->map_free(map
);
239 /* if error is returned, fd is released.
240 * On success caller should complete fd access with matching fdput()
242 struct bpf_map
*__bpf_map_get(struct fd f
)
245 return ERR_PTR(-EBADF
);
246 if (f
.file
->f_op
!= &bpf_map_fops
) {
248 return ERR_PTR(-EINVAL
);
251 return f
.file
->private_data
;
254 /* prog's and map's refcnt limit */
255 #define BPF_MAX_REFCNT 32768
257 struct bpf_map
*bpf_map_inc(struct bpf_map
*map
, bool uref
)
259 if (atomic_inc_return(&map
->refcnt
) > BPF_MAX_REFCNT
) {
260 atomic_dec(&map
->refcnt
);
261 return ERR_PTR(-EBUSY
);
264 atomic_inc(&map
->usercnt
);
268 struct bpf_map
*bpf_map_get_with_uref(u32 ufd
)
270 struct fd f
= fdget(ufd
);
273 map
= __bpf_map_get(f
);
277 map
= bpf_map_inc(map
, true);
283 /* helper to convert user pointers passed inside __aligned_u64 fields */
284 static void __user
*u64_to_ptr(__u64 val
)
286 return (void __user
*) (unsigned long) val
;
289 int __weak
bpf_stackmap_copy(struct bpf_map
*map
, void *key
, void *value
)
294 /* last field in 'union bpf_attr' used by this command */
295 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
297 static int map_lookup_elem(union bpf_attr
*attr
)
299 void __user
*ukey
= u64_to_ptr(attr
->key
);
300 void __user
*uvalue
= u64_to_ptr(attr
->value
);
301 int ufd
= attr
->map_fd
;
303 void *key
, *value
, *ptr
;
308 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM
))
312 map
= __bpf_map_get(f
);
317 key
= kmalloc(map
->key_size
, GFP_USER
);
322 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
325 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
326 map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
327 value_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
329 value_size
= map
->value_size
;
332 value
= kmalloc(value_size
, GFP_USER
| __GFP_NOWARN
);
336 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
) {
337 err
= bpf_percpu_hash_copy(map
, key
, value
);
338 } else if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
339 err
= bpf_percpu_array_copy(map
, key
, value
);
340 } else if (map
->map_type
== BPF_MAP_TYPE_STACK_TRACE
) {
341 err
= bpf_stackmap_copy(map
, key
, value
);
344 ptr
= map
->ops
->map_lookup_elem(map
, key
);
346 memcpy(value
, ptr
, value_size
);
348 err
= ptr
? 0 : -ENOENT
;
355 if (copy_to_user(uvalue
, value
, value_size
) != 0)
369 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
371 static int map_update_elem(union bpf_attr
*attr
)
373 void __user
*ukey
= u64_to_ptr(attr
->key
);
374 void __user
*uvalue
= u64_to_ptr(attr
->value
);
375 int ufd
= attr
->map_fd
;
382 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM
))
386 map
= __bpf_map_get(f
);
391 key
= kmalloc(map
->key_size
, GFP_USER
);
396 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
399 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
400 map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
401 value_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
403 value_size
= map
->value_size
;
406 value
= kmalloc(value_size
, GFP_USER
| __GFP_NOWARN
);
411 if (copy_from_user(value
, uvalue
, value_size
) != 0)
414 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
415 * inside bpf map update or delete otherwise deadlocks are possible
418 __this_cpu_inc(bpf_prog_active
);
419 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
) {
420 err
= bpf_percpu_hash_update(map
, key
, value
, attr
->flags
);
421 } else if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
422 err
= bpf_percpu_array_update(map
, key
, value
, attr
->flags
);
423 } else if (map
->map_type
== BPF_MAP_TYPE_PERF_EVENT_ARRAY
||
424 map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
||
425 map
->map_type
== BPF_MAP_TYPE_CGROUP_ARRAY
) {
427 err
= bpf_fd_array_map_update_elem(map
, f
.file
, key
, value
,
432 err
= map
->ops
->map_update_elem(map
, key
, value
, attr
->flags
);
435 __this_cpu_dec(bpf_prog_active
);
447 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
449 static int map_delete_elem(union bpf_attr
*attr
)
451 void __user
*ukey
= u64_to_ptr(attr
->key
);
452 int ufd
= attr
->map_fd
;
458 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM
))
462 map
= __bpf_map_get(f
);
467 key
= kmalloc(map
->key_size
, GFP_USER
);
472 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
476 __this_cpu_inc(bpf_prog_active
);
478 err
= map
->ops
->map_delete_elem(map
, key
);
480 __this_cpu_dec(bpf_prog_active
);
490 /* last field in 'union bpf_attr' used by this command */
491 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
493 static int map_get_next_key(union bpf_attr
*attr
)
495 void __user
*ukey
= u64_to_ptr(attr
->key
);
496 void __user
*unext_key
= u64_to_ptr(attr
->next_key
);
497 int ufd
= attr
->map_fd
;
499 void *key
, *next_key
;
503 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY
))
507 map
= __bpf_map_get(f
);
513 key
= kmalloc(map
->key_size
, GFP_USER
);
518 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
525 next_key
= kmalloc(map
->key_size
, GFP_USER
);
530 err
= map
->ops
->map_get_next_key(map
, key
, next_key
);
536 if (copy_to_user(unext_key
, next_key
, map
->key_size
) != 0)
550 static LIST_HEAD(bpf_prog_types
);
552 static int find_prog_type(enum bpf_prog_type type
, struct bpf_prog
*prog
)
554 struct bpf_prog_type_list
*tl
;
556 list_for_each_entry(tl
, &bpf_prog_types
, list_node
) {
557 if (tl
->type
== type
) {
558 prog
->aux
->ops
= tl
->ops
;
567 void bpf_register_prog_type(struct bpf_prog_type_list
*tl
)
569 list_add(&tl
->list_node
, &bpf_prog_types
);
572 /* drop refcnt on maps used by eBPF program and free auxilary data */
573 static void free_used_maps(struct bpf_prog_aux
*aux
)
577 for (i
= 0; i
< aux
->used_map_cnt
; i
++)
578 bpf_map_put(aux
->used_maps
[i
]);
580 kfree(aux
->used_maps
);
583 static int bpf_prog_charge_memlock(struct bpf_prog
*prog
)
585 struct user_struct
*user
= get_current_user();
586 unsigned long memlock_limit
;
588 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
590 atomic_long_add(prog
->pages
, &user
->locked_vm
);
591 if (atomic_long_read(&user
->locked_vm
) > memlock_limit
) {
592 atomic_long_sub(prog
->pages
, &user
->locked_vm
);
596 prog
->aux
->user
= user
;
600 static void bpf_prog_uncharge_memlock(struct bpf_prog
*prog
)
602 struct user_struct
*user
= prog
->aux
->user
;
604 atomic_long_sub(prog
->pages
, &user
->locked_vm
);
608 static void __bpf_prog_put_rcu(struct rcu_head
*rcu
)
610 struct bpf_prog_aux
*aux
= container_of(rcu
, struct bpf_prog_aux
, rcu
);
613 bpf_prog_uncharge_memlock(aux
->prog
);
614 bpf_prog_free(aux
->prog
);
617 void bpf_prog_put(struct bpf_prog
*prog
)
619 if (atomic_dec_and_test(&prog
->aux
->refcnt
))
620 call_rcu(&prog
->aux
->rcu
, __bpf_prog_put_rcu
);
622 EXPORT_SYMBOL_GPL(bpf_prog_put
);
624 static int bpf_prog_release(struct inode
*inode
, struct file
*filp
)
626 struct bpf_prog
*prog
= filp
->private_data
;
632 static const struct file_operations bpf_prog_fops
= {
633 .release
= bpf_prog_release
,
636 int bpf_prog_new_fd(struct bpf_prog
*prog
)
638 return anon_inode_getfd("bpf-prog", &bpf_prog_fops
, prog
,
642 static struct bpf_prog
*____bpf_prog_get(struct fd f
)
645 return ERR_PTR(-EBADF
);
646 if (f
.file
->f_op
!= &bpf_prog_fops
) {
648 return ERR_PTR(-EINVAL
);
651 return f
.file
->private_data
;
654 struct bpf_prog
*bpf_prog_add(struct bpf_prog
*prog
, int i
)
656 if (atomic_add_return(i
, &prog
->aux
->refcnt
) > BPF_MAX_REFCNT
) {
657 atomic_sub(i
, &prog
->aux
->refcnt
);
658 return ERR_PTR(-EBUSY
);
662 EXPORT_SYMBOL_GPL(bpf_prog_add
);
664 struct bpf_prog
*bpf_prog_inc(struct bpf_prog
*prog
)
666 return bpf_prog_add(prog
, 1);
669 static struct bpf_prog
*__bpf_prog_get(u32 ufd
, enum bpf_prog_type
*type
)
671 struct fd f
= fdget(ufd
);
672 struct bpf_prog
*prog
;
674 prog
= ____bpf_prog_get(f
);
677 if (type
&& prog
->type
!= *type
) {
678 prog
= ERR_PTR(-EINVAL
);
682 prog
= bpf_prog_inc(prog
);
688 struct bpf_prog
*bpf_prog_get(u32 ufd
)
690 return __bpf_prog_get(ufd
, NULL
);
693 struct bpf_prog
*bpf_prog_get_type(u32 ufd
, enum bpf_prog_type type
)
695 return __bpf_prog_get(ufd
, &type
);
697 EXPORT_SYMBOL_GPL(bpf_prog_get_type
);
699 /* last field in 'union bpf_attr' used by this command */
700 #define BPF_PROG_LOAD_LAST_FIELD kern_version
702 static int bpf_prog_load(union bpf_attr
*attr
)
704 enum bpf_prog_type type
= attr
->prog_type
;
705 struct bpf_prog
*prog
;
710 if (CHECK_ATTR(BPF_PROG_LOAD
))
713 /* copy eBPF program license from user space */
714 if (strncpy_from_user(license
, u64_to_ptr(attr
->license
),
715 sizeof(license
) - 1) < 0)
717 license
[sizeof(license
) - 1] = 0;
719 /* eBPF programs must be GPL compatible to use GPL-ed functions */
720 is_gpl
= license_is_gpl_compatible(license
);
722 if (attr
->insn_cnt
>= BPF_MAXINSNS
)
725 if (type
== BPF_PROG_TYPE_KPROBE
&&
726 attr
->kern_version
!= LINUX_VERSION_CODE
)
729 if (type
!= BPF_PROG_TYPE_SOCKET_FILTER
&& !capable(CAP_SYS_ADMIN
))
732 /* plain bpf_prog allocation */
733 prog
= bpf_prog_alloc(bpf_prog_size(attr
->insn_cnt
), GFP_USER
);
737 err
= bpf_prog_charge_memlock(prog
);
739 goto free_prog_nouncharge
;
741 prog
->len
= attr
->insn_cnt
;
744 if (copy_from_user(prog
->insns
, u64_to_ptr(attr
->insns
),
745 prog
->len
* sizeof(struct bpf_insn
)) != 0)
748 prog
->orig_prog
= NULL
;
751 atomic_set(&prog
->aux
->refcnt
, 1);
752 prog
->gpl_compatible
= is_gpl
? 1 : 0;
754 /* find program type: socket_filter vs tracing_filter */
755 err
= find_prog_type(type
, prog
);
759 /* run eBPF verifier */
760 err
= bpf_check(&prog
, attr
);
764 /* eBPF program is ready to be JITed */
765 prog
= bpf_prog_select_runtime(prog
, &err
);
769 err
= bpf_prog_new_fd(prog
);
771 /* failed to allocate fd */
777 free_used_maps(prog
->aux
);
779 bpf_prog_uncharge_memlock(prog
);
780 free_prog_nouncharge
:
785 #define BPF_OBJ_LAST_FIELD bpf_fd
787 static int bpf_obj_pin(const union bpf_attr
*attr
)
789 if (CHECK_ATTR(BPF_OBJ
))
792 return bpf_obj_pin_user(attr
->bpf_fd
, u64_to_ptr(attr
->pathname
));
795 static int bpf_obj_get(const union bpf_attr
*attr
)
797 if (CHECK_ATTR(BPF_OBJ
) || attr
->bpf_fd
!= 0)
800 return bpf_obj_get_user(u64_to_ptr(attr
->pathname
));
803 SYSCALL_DEFINE3(bpf
, int, cmd
, union bpf_attr __user
*, uattr
, unsigned int, size
)
808 if (sysctl_unprivileged_bpf_disabled
&& !capable(CAP_SYS_ADMIN
))
811 if (!access_ok(VERIFY_READ
, uattr
, 1))
814 if (size
> PAGE_SIZE
) /* silly large */
817 /* If we're handed a bigger struct than we know of,
818 * ensure all the unknown bits are 0 - i.e. new
819 * user-space does not rely on any kernel feature
820 * extensions we dont know about yet.
822 if (size
> sizeof(attr
)) {
823 unsigned char __user
*addr
;
824 unsigned char __user
*end
;
827 addr
= (void __user
*)uattr
+ sizeof(attr
);
828 end
= (void __user
*)uattr
+ size
;
830 for (; addr
< end
; addr
++) {
831 err
= get_user(val
, addr
);
840 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
841 memset(&attr
, 0, sizeof(attr
));
842 if (copy_from_user(&attr
, uattr
, size
) != 0)
847 err
= map_create(&attr
);
849 case BPF_MAP_LOOKUP_ELEM
:
850 err
= map_lookup_elem(&attr
);
852 case BPF_MAP_UPDATE_ELEM
:
853 err
= map_update_elem(&attr
);
855 case BPF_MAP_DELETE_ELEM
:
856 err
= map_delete_elem(&attr
);
858 case BPF_MAP_GET_NEXT_KEY
:
859 err
= map_get_next_key(&attr
);
862 err
= bpf_prog_load(&attr
);
865 err
= bpf_obj_pin(&attr
);
868 err
= bpf_obj_get(&attr
);