1 // SPDX-License-Identifier: GPL-2.0-only
3 * Minimal file system backend for holding eBPF maps and programs,
4 * used by bpf(2) object pinning.
8 * Daniel Borkmann <daniel@iogearbox.net>
11 #include <linux/init.h>
12 #include <linux/magic.h>
13 #include <linux/major.h>
14 #include <linux/mount.h>
15 #include <linux/namei.h>
17 #include <linux/fs_context.h>
18 #include <linux/fs_parser.h>
19 #include <linux/kdev_t.h>
20 #include <linux/filter.h>
21 #include <linux/bpf.h>
22 #include <linux/bpf_trace.h>
23 #include "preload/bpf_preload.h"
32 static void *bpf_any_get(void *raw
, enum bpf_type type
)
39 bpf_map_inc_with_uref(raw
);
52 static void bpf_any_put(void *raw
, enum bpf_type type
)
59 bpf_map_put_with_uref(raw
);
70 static void *bpf_fd_probe_obj(u32 ufd
, enum bpf_type
*type
)
74 raw
= bpf_map_get_with_uref(ufd
);
80 raw
= bpf_prog_get(ufd
);
82 *type
= BPF_TYPE_PROG
;
86 raw
= bpf_link_get_from_fd(ufd
);
88 *type
= BPF_TYPE_LINK
;
92 return ERR_PTR(-EINVAL
);
95 static const struct inode_operations bpf_dir_iops
;
97 static const struct inode_operations bpf_prog_iops
= { };
98 static const struct inode_operations bpf_map_iops
= { };
99 static const struct inode_operations bpf_link_iops
= { };
101 static struct inode
*bpf_get_inode(struct super_block
*sb
,
102 const struct inode
*dir
,
107 switch (mode
& S_IFMT
) {
113 return ERR_PTR(-EINVAL
);
116 inode
= new_inode(sb
);
118 return ERR_PTR(-ENOSPC
);
120 inode
->i_ino
= get_next_ino();
121 inode
->i_atime
= current_time(inode
);
122 inode
->i_mtime
= inode
->i_atime
;
123 inode
->i_ctime
= inode
->i_atime
;
125 inode_init_owner(inode
, dir
, mode
);
130 static int bpf_inode_type(const struct inode
*inode
, enum bpf_type
*type
)
132 *type
= BPF_TYPE_UNSPEC
;
133 if (inode
->i_op
== &bpf_prog_iops
)
134 *type
= BPF_TYPE_PROG
;
135 else if (inode
->i_op
== &bpf_map_iops
)
136 *type
= BPF_TYPE_MAP
;
137 else if (inode
->i_op
== &bpf_link_iops
)
138 *type
= BPF_TYPE_LINK
;
145 static void bpf_dentry_finalize(struct dentry
*dentry
, struct inode
*inode
,
148 d_instantiate(dentry
, inode
);
151 dir
->i_mtime
= current_time(dir
);
152 dir
->i_ctime
= dir
->i_mtime
;
155 static int bpf_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
159 inode
= bpf_get_inode(dir
->i_sb
, dir
, mode
| S_IFDIR
);
161 return PTR_ERR(inode
);
163 inode
->i_op
= &bpf_dir_iops
;
164 inode
->i_fop
= &simple_dir_operations
;
169 bpf_dentry_finalize(dentry
, inode
, dir
);
178 static struct map_iter
*map_iter(struct seq_file
*m
)
183 static struct bpf_map
*seq_file_to_map(struct seq_file
*m
)
185 return file_inode(m
->file
)->i_private
;
188 static void map_iter_free(struct map_iter
*iter
)
196 static struct map_iter
*map_iter_alloc(struct bpf_map
*map
)
198 struct map_iter
*iter
;
200 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
| __GFP_NOWARN
);
204 iter
->key
= kzalloc(map
->key_size
, GFP_KERNEL
| __GFP_NOWARN
);
215 static void *map_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
217 struct bpf_map
*map
= seq_file_to_map(m
);
218 void *key
= map_iter(m
)->key
;
222 if (map_iter(m
)->done
)
225 if (unlikely(v
== SEQ_START_TOKEN
))
231 if (map
->ops
->map_get_next_key(map
, prev_key
, key
)) {
232 map_iter(m
)->done
= true;
239 static void *map_seq_start(struct seq_file
*m
, loff_t
*pos
)
241 if (map_iter(m
)->done
)
244 return *pos
? map_iter(m
)->key
: SEQ_START_TOKEN
;
247 static void map_seq_stop(struct seq_file
*m
, void *v
)
251 static int map_seq_show(struct seq_file
*m
, void *v
)
253 struct bpf_map
*map
= seq_file_to_map(m
);
254 void *key
= map_iter(m
)->key
;
256 if (unlikely(v
== SEQ_START_TOKEN
)) {
257 seq_puts(m
, "# WARNING!! The output is for debug purpose only\n");
258 seq_puts(m
, "# WARNING!! The output format will change\n");
260 map
->ops
->map_seq_show_elem(map
, key
, m
);
266 static const struct seq_operations bpffs_map_seq_ops
= {
267 .start
= map_seq_start
,
268 .next
= map_seq_next
,
269 .show
= map_seq_show
,
270 .stop
= map_seq_stop
,
273 static int bpffs_map_open(struct inode
*inode
, struct file
*file
)
275 struct bpf_map
*map
= inode
->i_private
;
276 struct map_iter
*iter
;
280 iter
= map_iter_alloc(map
);
284 err
= seq_open(file
, &bpffs_map_seq_ops
);
290 m
= file
->private_data
;
296 static int bpffs_map_release(struct inode
*inode
, struct file
*file
)
298 struct seq_file
*m
= file
->private_data
;
300 map_iter_free(map_iter(m
));
302 return seq_release(inode
, file
);
305 /* bpffs_map_fops should only implement the basic
306 * read operation for a BPF map. The purpose is to
307 * provide a simple user intuitive way to do
308 * "cat bpffs/pathto/a-pinned-map".
310 * Other operations (e.g. write, lookup...) should be realized by
311 * the userspace tools (e.g. bpftool) through the
312 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
315 static const struct file_operations bpffs_map_fops
= {
316 .open
= bpffs_map_open
,
318 .release
= bpffs_map_release
,
321 static int bpffs_obj_open(struct inode
*inode
, struct file
*file
)
326 static const struct file_operations bpffs_obj_fops
= {
327 .open
= bpffs_obj_open
,
330 static int bpf_mkobj_ops(struct dentry
*dentry
, umode_t mode
, void *raw
,
331 const struct inode_operations
*iops
,
332 const struct file_operations
*fops
)
334 struct inode
*dir
= dentry
->d_parent
->d_inode
;
335 struct inode
*inode
= bpf_get_inode(dir
->i_sb
, dir
, mode
);
337 return PTR_ERR(inode
);
341 inode
->i_private
= raw
;
343 bpf_dentry_finalize(dentry
, inode
, dir
);
347 static int bpf_mkprog(struct dentry
*dentry
, umode_t mode
, void *arg
)
349 return bpf_mkobj_ops(dentry
, mode
, arg
, &bpf_prog_iops
,
353 static int bpf_mkmap(struct dentry
*dentry
, umode_t mode
, void *arg
)
355 struct bpf_map
*map
= arg
;
357 return bpf_mkobj_ops(dentry
, mode
, arg
, &bpf_map_iops
,
358 bpf_map_support_seq_show(map
) ?
359 &bpffs_map_fops
: &bpffs_obj_fops
);
362 static int bpf_mklink(struct dentry
*dentry
, umode_t mode
, void *arg
)
364 struct bpf_link
*link
= arg
;
366 return bpf_mkobj_ops(dentry
, mode
, arg
, &bpf_link_iops
,
367 bpf_link_is_iter(link
) ?
368 &bpf_iter_fops
: &bpffs_obj_fops
);
371 static struct dentry
*
372 bpf_lookup(struct inode
*dir
, struct dentry
*dentry
, unsigned flags
)
374 /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
375 * extensions. That allows popoulate_bpffs() create special files.
377 if ((dir
->i_mode
& S_IALLUGO
) &&
378 strchr(dentry
->d_name
.name
, '.'))
379 return ERR_PTR(-EPERM
);
381 return simple_lookup(dir
, dentry
, flags
);
384 static int bpf_symlink(struct inode
*dir
, struct dentry
*dentry
,
387 char *link
= kstrdup(target
, GFP_USER
| __GFP_NOWARN
);
393 inode
= bpf_get_inode(dir
->i_sb
, dir
, S_IRWXUGO
| S_IFLNK
);
396 return PTR_ERR(inode
);
399 inode
->i_op
= &simple_symlink_inode_operations
;
400 inode
->i_link
= link
;
402 bpf_dentry_finalize(dentry
, inode
, dir
);
406 static const struct inode_operations bpf_dir_iops
= {
407 .lookup
= bpf_lookup
,
409 .symlink
= bpf_symlink
,
410 .rmdir
= simple_rmdir
,
411 .rename
= simple_rename
,
413 .unlink
= simple_unlink
,
416 /* pin iterator link into bpffs */
417 static int bpf_iter_link_pin_kernel(struct dentry
*parent
,
418 const char *name
, struct bpf_link
*link
)
420 umode_t mode
= S_IFREG
| S_IRUSR
;
421 struct dentry
*dentry
;
424 inode_lock(parent
->d_inode
);
425 dentry
= lookup_one_len(name
, parent
, strlen(name
));
426 if (IS_ERR(dentry
)) {
427 inode_unlock(parent
->d_inode
);
428 return PTR_ERR(dentry
);
430 ret
= bpf_mkobj_ops(dentry
, mode
, link
, &bpf_link_iops
,
433 inode_unlock(parent
->d_inode
);
437 static int bpf_obj_do_pin(const char __user
*pathname
, void *raw
,
440 struct dentry
*dentry
;
446 dentry
= user_path_create(AT_FDCWD
, pathname
, &path
, 0);
448 return PTR_ERR(dentry
);
450 mode
= S_IFREG
| ((S_IRUSR
| S_IWUSR
) & ~current_umask());
452 ret
= security_path_mknod(&path
, dentry
, mode
, 0);
456 dir
= d_inode(path
.dentry
);
457 if (dir
->i_op
!= &bpf_dir_iops
) {
464 ret
= vfs_mkobj(dentry
, mode
, bpf_mkprog
, raw
);
467 ret
= vfs_mkobj(dentry
, mode
, bpf_mkmap
, raw
);
470 ret
= vfs_mkobj(dentry
, mode
, bpf_mklink
, raw
);
476 done_path_create(&path
, dentry
);
480 int bpf_obj_pin_user(u32 ufd
, const char __user
*pathname
)
486 raw
= bpf_fd_probe_obj(ufd
, &type
);
490 ret
= bpf_obj_do_pin(pathname
, raw
, type
);
492 bpf_any_put(raw
, type
);
497 static void *bpf_obj_do_get(const char __user
*pathname
,
498 enum bpf_type
*type
, int flags
)
505 ret
= user_path_at(AT_FDCWD
, pathname
, LOOKUP_FOLLOW
, &path
);
509 inode
= d_backing_inode(path
.dentry
);
510 ret
= inode_permission(inode
, ACC_MODE(flags
));
514 ret
= bpf_inode_type(inode
, type
);
518 raw
= bpf_any_get(inode
->i_private
, *type
);
529 int bpf_obj_get_user(const char __user
*pathname
, int flags
)
531 enum bpf_type type
= BPF_TYPE_UNSPEC
;
536 f_flags
= bpf_get_file_flag(flags
);
540 raw
= bpf_obj_do_get(pathname
, &type
, f_flags
);
544 if (type
== BPF_TYPE_PROG
)
545 ret
= bpf_prog_new_fd(raw
);
546 else if (type
== BPF_TYPE_MAP
)
547 ret
= bpf_map_new_fd(raw
, f_flags
);
548 else if (type
== BPF_TYPE_LINK
)
549 ret
= bpf_link_new_fd(raw
);
554 bpf_any_put(raw
, type
);
558 static struct bpf_prog
*__get_prog_inode(struct inode
*inode
, enum bpf_prog_type type
)
560 struct bpf_prog
*prog
;
561 int ret
= inode_permission(inode
, MAY_READ
);
565 if (inode
->i_op
== &bpf_map_iops
)
566 return ERR_PTR(-EINVAL
);
567 if (inode
->i_op
== &bpf_link_iops
)
568 return ERR_PTR(-EINVAL
);
569 if (inode
->i_op
!= &bpf_prog_iops
)
570 return ERR_PTR(-EACCES
);
572 prog
= inode
->i_private
;
574 ret
= security_bpf_prog(prog
);
578 if (!bpf_prog_get_ok(prog
, &type
, false))
579 return ERR_PTR(-EINVAL
);
585 struct bpf_prog
*bpf_prog_get_type_path(const char *name
, enum bpf_prog_type type
)
587 struct bpf_prog
*prog
;
589 int ret
= kern_path(name
, LOOKUP_FOLLOW
, &path
);
592 prog
= __get_prog_inode(d_backing_inode(path
.dentry
), type
);
598 EXPORT_SYMBOL(bpf_prog_get_type_path
);
601 * Display the mount options in /proc/mounts.
603 static int bpf_show_options(struct seq_file
*m
, struct dentry
*root
)
605 umode_t mode
= d_inode(root
)->i_mode
& S_IALLUGO
& ~S_ISVTX
;
607 if (mode
!= S_IRWXUGO
)
608 seq_printf(m
, ",mode=%o", mode
);
612 static void bpf_free_inode(struct inode
*inode
)
616 if (S_ISLNK(inode
->i_mode
))
617 kfree(inode
->i_link
);
618 if (!bpf_inode_type(inode
, &type
))
619 bpf_any_put(inode
->i_private
, type
);
620 free_inode_nonrcu(inode
);
623 static const struct super_operations bpf_super_ops
= {
624 .statfs
= simple_statfs
,
625 .drop_inode
= generic_delete_inode
,
626 .show_options
= bpf_show_options
,
627 .free_inode
= bpf_free_inode
,
634 static const struct fs_parameter_spec bpf_fs_parameters
[] = {
635 fsparam_u32oct ("mode", OPT_MODE
),
639 struct bpf_mount_opts
{
643 static int bpf_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
)
645 struct bpf_mount_opts
*opts
= fc
->fs_private
;
646 struct fs_parse_result result
;
649 opt
= fs_parse(fc
, bpf_fs_parameters
, param
, &result
);
651 /* We might like to report bad mount options here, but
652 * traditionally we've ignored all mount options, so we'd
653 * better continue to ignore non-existing options for bpf.
655 return opt
== -ENOPARAM
? 0 : opt
;
659 opts
->mode
= result
.uint_32
& S_IALLUGO
;
666 struct bpf_preload_ops
*bpf_preload_ops
;
667 EXPORT_SYMBOL_GPL(bpf_preload_ops
);
669 static bool bpf_preload_mod_get(void)
671 /* If bpf_preload.ko wasn't loaded earlier then load it now.
672 * When bpf_preload is built into vmlinux the module's __init
673 * function will populate it.
675 if (!bpf_preload_ops
) {
676 request_module("bpf_preload");
677 if (!bpf_preload_ops
)
680 /* And grab the reference, so the module doesn't disappear while the
681 * kernel is interacting with the kernel module and its UMD.
683 if (!try_module_get(bpf_preload_ops
->owner
)) {
684 pr_err("bpf_preload module get failed.\n");
690 static void bpf_preload_mod_put(void)
693 /* now user can "rmmod bpf_preload" if necessary */
694 module_put(bpf_preload_ops
->owner
);
697 static DEFINE_MUTEX(bpf_preload_lock
);
699 static int populate_bpffs(struct dentry
*parent
)
701 struct bpf_preload_info objs
[BPF_PRELOAD_LINKS
] = {};
702 struct bpf_link
*links
[BPF_PRELOAD_LINKS
] = {};
705 /* grab the mutex to make sure the kernel interactions with bpf_preload
708 mutex_lock(&bpf_preload_lock
);
710 /* if bpf_preload.ko wasn't built into vmlinux then load it */
711 if (!bpf_preload_mod_get())
714 if (!bpf_preload_ops
->info
.tgid
) {
715 /* preload() will start UMD that will load BPF iterator programs */
716 err
= bpf_preload_ops
->preload(objs
);
719 for (i
= 0; i
< BPF_PRELOAD_LINKS
; i
++) {
720 links
[i
] = bpf_link_by_id(objs
[i
].link_id
);
721 if (IS_ERR(links
[i
])) {
722 err
= PTR_ERR(links
[i
]);
726 for (i
= 0; i
< BPF_PRELOAD_LINKS
; i
++) {
727 err
= bpf_iter_link_pin_kernel(parent
,
728 objs
[i
].link_name
, links
[i
]);
731 /* do not unlink successfully pinned links even
732 * if later link fails to pin
736 /* finish() will tell UMD process to exit */
737 err
= bpf_preload_ops
->finish();
742 bpf_preload_mod_put();
744 mutex_unlock(&bpf_preload_lock
);
745 for (i
= 0; i
< BPF_PRELOAD_LINKS
&& err
; i
++)
746 if (!IS_ERR_OR_NULL(links
[i
]))
747 bpf_link_put(links
[i
]);
751 static int bpf_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
753 static const struct tree_descr bpf_rfiles
[] = { { "" } };
754 struct bpf_mount_opts
*opts
= fc
->fs_private
;
758 ret
= simple_fill_super(sb
, BPF_FS_MAGIC
, bpf_rfiles
);
762 sb
->s_op
= &bpf_super_ops
;
764 inode
= sb
->s_root
->d_inode
;
765 inode
->i_op
= &bpf_dir_iops
;
766 inode
->i_mode
&= ~S_IALLUGO
;
767 populate_bpffs(sb
->s_root
);
768 inode
->i_mode
|= S_ISVTX
| opts
->mode
;
772 static int bpf_get_tree(struct fs_context
*fc
)
774 return get_tree_nodev(fc
, bpf_fill_super
);
777 static void bpf_free_fc(struct fs_context
*fc
)
779 kfree(fc
->fs_private
);
782 static const struct fs_context_operations bpf_context_ops
= {
784 .parse_param
= bpf_parse_param
,
785 .get_tree
= bpf_get_tree
,
789 * Set up the filesystem mount context.
791 static int bpf_init_fs_context(struct fs_context
*fc
)
793 struct bpf_mount_opts
*opts
;
795 opts
= kzalloc(sizeof(struct bpf_mount_opts
), GFP_KERNEL
);
799 opts
->mode
= S_IRWXUGO
;
801 fc
->fs_private
= opts
;
802 fc
->ops
= &bpf_context_ops
;
806 static struct file_system_type bpf_fs_type
= {
807 .owner
= THIS_MODULE
,
809 .init_fs_context
= bpf_init_fs_context
,
810 .parameters
= bpf_fs_parameters
,
811 .kill_sb
= kill_litter_super
,
814 static int __init
bpf_init(void)
818 mutex_init(&bpf_preload_lock
);
820 ret
= sysfs_create_mount_point(fs_kobj
, "bpf");
824 ret
= register_filesystem(&bpf_fs_type
);
826 sysfs_remove_mount_point(fs_kobj
, "bpf");
830 fs_initcall(bpf_init
);