1 // SPDX-License-Identifier: GPL-2.0-only
3 * Minimal file system backend for holding eBPF maps and programs,
4 * used by bpf(2) object pinning.
8 * Daniel Borkmann <daniel@iogearbox.net>
11 #include <linux/init.h>
12 #include <linux/magic.h>
13 #include <linux/major.h>
14 #include <linux/mount.h>
15 #include <linux/namei.h>
17 #include <linux/fs_context.h>
18 #include <linux/fs_parser.h>
19 #include <linux/kdev_t.h>
20 #include <linux/filter.h>
21 #include <linux/bpf.h>
22 #include <linux/bpf_trace.h>
31 static void *bpf_any_get(void *raw
, enum bpf_type type
)
38 bpf_map_inc_with_uref(raw
);
51 static void bpf_any_put(void *raw
, enum bpf_type type
)
58 bpf_map_put_with_uref(raw
);
69 static void *bpf_fd_probe_obj(u32 ufd
, enum bpf_type
*type
)
73 raw
= bpf_map_get_with_uref(ufd
);
79 raw
= bpf_prog_get(ufd
);
81 *type
= BPF_TYPE_PROG
;
85 raw
= bpf_link_get_from_fd(ufd
);
87 *type
= BPF_TYPE_LINK
;
91 return ERR_PTR(-EINVAL
);
94 static const struct inode_operations bpf_dir_iops
;
96 static const struct inode_operations bpf_prog_iops
= { };
97 static const struct inode_operations bpf_map_iops
= { };
98 static const struct inode_operations bpf_link_iops
= { };
100 static struct inode
*bpf_get_inode(struct super_block
*sb
,
101 const struct inode
*dir
,
106 switch (mode
& S_IFMT
) {
112 return ERR_PTR(-EINVAL
);
115 inode
= new_inode(sb
);
117 return ERR_PTR(-ENOSPC
);
119 inode
->i_ino
= get_next_ino();
120 inode
->i_atime
= current_time(inode
);
121 inode
->i_mtime
= inode
->i_atime
;
122 inode
->i_ctime
= inode
->i_atime
;
124 inode_init_owner(inode
, dir
, mode
);
129 static int bpf_inode_type(const struct inode
*inode
, enum bpf_type
*type
)
131 *type
= BPF_TYPE_UNSPEC
;
132 if (inode
->i_op
== &bpf_prog_iops
)
133 *type
= BPF_TYPE_PROG
;
134 else if (inode
->i_op
== &bpf_map_iops
)
135 *type
= BPF_TYPE_MAP
;
136 else if (inode
->i_op
== &bpf_link_iops
)
137 *type
= BPF_TYPE_LINK
;
144 static void bpf_dentry_finalize(struct dentry
*dentry
, struct inode
*inode
,
147 d_instantiate(dentry
, inode
);
150 dir
->i_mtime
= current_time(dir
);
151 dir
->i_ctime
= dir
->i_mtime
;
154 static int bpf_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
158 inode
= bpf_get_inode(dir
->i_sb
, dir
, mode
| S_IFDIR
);
160 return PTR_ERR(inode
);
162 inode
->i_op
= &bpf_dir_iops
;
163 inode
->i_fop
= &simple_dir_operations
;
168 bpf_dentry_finalize(dentry
, inode
, dir
);
177 static struct map_iter
*map_iter(struct seq_file
*m
)
182 static struct bpf_map
*seq_file_to_map(struct seq_file
*m
)
184 return file_inode(m
->file
)->i_private
;
187 static void map_iter_free(struct map_iter
*iter
)
195 static struct map_iter
*map_iter_alloc(struct bpf_map
*map
)
197 struct map_iter
*iter
;
199 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
| __GFP_NOWARN
);
203 iter
->key
= kzalloc(map
->key_size
, GFP_KERNEL
| __GFP_NOWARN
);
214 static void *map_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
216 struct bpf_map
*map
= seq_file_to_map(m
);
217 void *key
= map_iter(m
)->key
;
221 if (map_iter(m
)->done
)
224 if (unlikely(v
== SEQ_START_TOKEN
))
229 if (map
->ops
->map_get_next_key(map
, prev_key
, key
)) {
230 map_iter(m
)->done
= true;
236 static void *map_seq_start(struct seq_file
*m
, loff_t
*pos
)
238 if (map_iter(m
)->done
)
241 return *pos
? map_iter(m
)->key
: SEQ_START_TOKEN
;
244 static void map_seq_stop(struct seq_file
*m
, void *v
)
248 static int map_seq_show(struct seq_file
*m
, void *v
)
250 struct bpf_map
*map
= seq_file_to_map(m
);
251 void *key
= map_iter(m
)->key
;
253 if (unlikely(v
== SEQ_START_TOKEN
)) {
254 seq_puts(m
, "# WARNING!! The output is for debug purpose only\n");
255 seq_puts(m
, "# WARNING!! The output format will change\n");
257 map
->ops
->map_seq_show_elem(map
, key
, m
);
263 static const struct seq_operations bpffs_map_seq_ops
= {
264 .start
= map_seq_start
,
265 .next
= map_seq_next
,
266 .show
= map_seq_show
,
267 .stop
= map_seq_stop
,
270 static int bpffs_map_open(struct inode
*inode
, struct file
*file
)
272 struct bpf_map
*map
= inode
->i_private
;
273 struct map_iter
*iter
;
277 iter
= map_iter_alloc(map
);
281 err
= seq_open(file
, &bpffs_map_seq_ops
);
287 m
= file
->private_data
;
293 static int bpffs_map_release(struct inode
*inode
, struct file
*file
)
295 struct seq_file
*m
= file
->private_data
;
297 map_iter_free(map_iter(m
));
299 return seq_release(inode
, file
);
302 /* bpffs_map_fops should only implement the basic
303 * read operation for a BPF map. The purpose is to
304 * provide a simple user intuitive way to do
305 * "cat bpffs/pathto/a-pinned-map".
307 * Other operations (e.g. write, lookup...) should be realized by
308 * the userspace tools (e.g. bpftool) through the
309 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
312 static const struct file_operations bpffs_map_fops
= {
313 .open
= bpffs_map_open
,
315 .release
= bpffs_map_release
,
318 static int bpffs_obj_open(struct inode
*inode
, struct file
*file
)
323 static const struct file_operations bpffs_obj_fops
= {
324 .open
= bpffs_obj_open
,
327 static int bpf_mkobj_ops(struct dentry
*dentry
, umode_t mode
, void *raw
,
328 const struct inode_operations
*iops
,
329 const struct file_operations
*fops
)
331 struct inode
*dir
= dentry
->d_parent
->d_inode
;
332 struct inode
*inode
= bpf_get_inode(dir
->i_sb
, dir
, mode
);
334 return PTR_ERR(inode
);
338 inode
->i_private
= raw
;
340 bpf_dentry_finalize(dentry
, inode
, dir
);
344 static int bpf_mkprog(struct dentry
*dentry
, umode_t mode
, void *arg
)
346 return bpf_mkobj_ops(dentry
, mode
, arg
, &bpf_prog_iops
,
350 static int bpf_mkmap(struct dentry
*dentry
, umode_t mode
, void *arg
)
352 struct bpf_map
*map
= arg
;
354 return bpf_mkobj_ops(dentry
, mode
, arg
, &bpf_map_iops
,
355 bpf_map_support_seq_show(map
) ?
356 &bpffs_map_fops
: &bpffs_obj_fops
);
359 static int bpf_mklink(struct dentry
*dentry
, umode_t mode
, void *arg
)
361 struct bpf_link
*link
= arg
;
363 return bpf_mkobj_ops(dentry
, mode
, arg
, &bpf_link_iops
,
364 bpf_link_is_iter(link
) ?
365 &bpf_iter_fops
: &bpffs_obj_fops
);
368 static struct dentry
*
369 bpf_lookup(struct inode
*dir
, struct dentry
*dentry
, unsigned flags
)
371 /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
374 if (strchr(dentry
->d_name
.name
, '.'))
375 return ERR_PTR(-EPERM
);
377 return simple_lookup(dir
, dentry
, flags
);
380 static int bpf_symlink(struct inode
*dir
, struct dentry
*dentry
,
383 char *link
= kstrdup(target
, GFP_USER
| __GFP_NOWARN
);
389 inode
= bpf_get_inode(dir
->i_sb
, dir
, S_IRWXUGO
| S_IFLNK
);
392 return PTR_ERR(inode
);
395 inode
->i_op
= &simple_symlink_inode_operations
;
396 inode
->i_link
= link
;
398 bpf_dentry_finalize(dentry
, inode
, dir
);
402 static const struct inode_operations bpf_dir_iops
= {
403 .lookup
= bpf_lookup
,
405 .symlink
= bpf_symlink
,
406 .rmdir
= simple_rmdir
,
407 .rename
= simple_rename
,
409 .unlink
= simple_unlink
,
412 static int bpf_obj_do_pin(const char __user
*pathname
, void *raw
,
415 struct dentry
*dentry
;
421 dentry
= user_path_create(AT_FDCWD
, pathname
, &path
, 0);
423 return PTR_ERR(dentry
);
425 mode
= S_IFREG
| ((S_IRUSR
| S_IWUSR
) & ~current_umask());
427 ret
= security_path_mknod(&path
, dentry
, mode
, 0);
431 dir
= d_inode(path
.dentry
);
432 if (dir
->i_op
!= &bpf_dir_iops
) {
439 ret
= vfs_mkobj(dentry
, mode
, bpf_mkprog
, raw
);
442 ret
= vfs_mkobj(dentry
, mode
, bpf_mkmap
, raw
);
445 ret
= vfs_mkobj(dentry
, mode
, bpf_mklink
, raw
);
451 done_path_create(&path
, dentry
);
455 int bpf_obj_pin_user(u32 ufd
, const char __user
*pathname
)
461 raw
= bpf_fd_probe_obj(ufd
, &type
);
465 ret
= bpf_obj_do_pin(pathname
, raw
, type
);
467 bpf_any_put(raw
, type
);
472 static void *bpf_obj_do_get(const char __user
*pathname
,
473 enum bpf_type
*type
, int flags
)
480 ret
= user_path_at(AT_FDCWD
, pathname
, LOOKUP_FOLLOW
, &path
);
484 inode
= d_backing_inode(path
.dentry
);
485 ret
= inode_permission(inode
, ACC_MODE(flags
));
489 ret
= bpf_inode_type(inode
, type
);
493 raw
= bpf_any_get(inode
->i_private
, *type
);
504 int bpf_obj_get_user(const char __user
*pathname
, int flags
)
506 enum bpf_type type
= BPF_TYPE_UNSPEC
;
511 f_flags
= bpf_get_file_flag(flags
);
515 raw
= bpf_obj_do_get(pathname
, &type
, f_flags
);
519 if (type
== BPF_TYPE_PROG
)
520 ret
= bpf_prog_new_fd(raw
);
521 else if (type
== BPF_TYPE_MAP
)
522 ret
= bpf_map_new_fd(raw
, f_flags
);
523 else if (type
== BPF_TYPE_LINK
)
524 ret
= bpf_link_new_fd(raw
);
529 bpf_any_put(raw
, type
);
533 static struct bpf_prog
*__get_prog_inode(struct inode
*inode
, enum bpf_prog_type type
)
535 struct bpf_prog
*prog
;
536 int ret
= inode_permission(inode
, MAY_READ
);
540 if (inode
->i_op
== &bpf_map_iops
)
541 return ERR_PTR(-EINVAL
);
542 if (inode
->i_op
== &bpf_link_iops
)
543 return ERR_PTR(-EINVAL
);
544 if (inode
->i_op
!= &bpf_prog_iops
)
545 return ERR_PTR(-EACCES
);
547 prog
= inode
->i_private
;
549 ret
= security_bpf_prog(prog
);
553 if (!bpf_prog_get_ok(prog
, &type
, false))
554 return ERR_PTR(-EINVAL
);
560 struct bpf_prog
*bpf_prog_get_type_path(const char *name
, enum bpf_prog_type type
)
562 struct bpf_prog
*prog
;
564 int ret
= kern_path(name
, LOOKUP_FOLLOW
, &path
);
567 prog
= __get_prog_inode(d_backing_inode(path
.dentry
), type
);
573 EXPORT_SYMBOL(bpf_prog_get_type_path
);
576 * Display the mount options in /proc/mounts.
578 static int bpf_show_options(struct seq_file
*m
, struct dentry
*root
)
580 umode_t mode
= d_inode(root
)->i_mode
& S_IALLUGO
& ~S_ISVTX
;
582 if (mode
!= S_IRWXUGO
)
583 seq_printf(m
, ",mode=%o", mode
);
587 static void bpf_free_inode(struct inode
*inode
)
591 if (S_ISLNK(inode
->i_mode
))
592 kfree(inode
->i_link
);
593 if (!bpf_inode_type(inode
, &type
))
594 bpf_any_put(inode
->i_private
, type
);
595 free_inode_nonrcu(inode
);
598 static const struct super_operations bpf_super_ops
= {
599 .statfs
= simple_statfs
,
600 .drop_inode
= generic_delete_inode
,
601 .show_options
= bpf_show_options
,
602 .free_inode
= bpf_free_inode
,
609 static const struct fs_parameter_spec bpf_fs_parameters
[] = {
610 fsparam_u32oct ("mode", OPT_MODE
),
614 struct bpf_mount_opts
{
618 static int bpf_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
)
620 struct bpf_mount_opts
*opts
= fc
->fs_private
;
621 struct fs_parse_result result
;
624 opt
= fs_parse(fc
, bpf_fs_parameters
, param
, &result
);
626 /* We might like to report bad mount options here, but
627 * traditionally we've ignored all mount options, so we'd
628 * better continue to ignore non-existing options for bpf.
630 return opt
== -ENOPARAM
? 0 : opt
;
634 opts
->mode
= result
.uint_32
& S_IALLUGO
;
641 static int bpf_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
643 static const struct tree_descr bpf_rfiles
[] = { { "" } };
644 struct bpf_mount_opts
*opts
= fc
->fs_private
;
648 ret
= simple_fill_super(sb
, BPF_FS_MAGIC
, bpf_rfiles
);
652 sb
->s_op
= &bpf_super_ops
;
654 inode
= sb
->s_root
->d_inode
;
655 inode
->i_op
= &bpf_dir_iops
;
656 inode
->i_mode
&= ~S_IALLUGO
;
657 inode
->i_mode
|= S_ISVTX
| opts
->mode
;
662 static int bpf_get_tree(struct fs_context
*fc
)
664 return get_tree_nodev(fc
, bpf_fill_super
);
667 static void bpf_free_fc(struct fs_context
*fc
)
669 kfree(fc
->fs_private
);
672 static const struct fs_context_operations bpf_context_ops
= {
674 .parse_param
= bpf_parse_param
,
675 .get_tree
= bpf_get_tree
,
679 * Set up the filesystem mount context.
681 static int bpf_init_fs_context(struct fs_context
*fc
)
683 struct bpf_mount_opts
*opts
;
685 opts
= kzalloc(sizeof(struct bpf_mount_opts
), GFP_KERNEL
);
689 opts
->mode
= S_IRWXUGO
;
691 fc
->fs_private
= opts
;
692 fc
->ops
= &bpf_context_ops
;
696 static struct file_system_type bpf_fs_type
= {
697 .owner
= THIS_MODULE
,
699 .init_fs_context
= bpf_init_fs_context
,
700 .parameters
= bpf_fs_parameters
,
701 .kill_sb
= kill_litter_super
,
704 static int __init
bpf_init(void)
708 ret
= sysfs_create_mount_point(fs_kobj
, "bpf");
712 ret
= register_filesystem(&bpf_fs_type
);
714 sysfs_remove_mount_point(fs_kobj
, "bpf");
718 fs_initcall(bpf_init
);