2 * Minimal file system backend for holding eBPF maps and programs,
3 * used by bpf(2) object pinning.
7 * Daniel Borkmann <daniel@iogearbox.net>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
14 #include <linux/init.h>
15 #include <linux/magic.h>
16 #include <linux/major.h>
17 #include <linux/mount.h>
18 #include <linux/namei.h>
20 #include <linux/kdev_t.h>
21 #include <linux/parser.h>
22 #include <linux/filter.h>
23 #include <linux/bpf.h>
24 #include <linux/bpf_trace.h>
32 static void *bpf_any_get(void *raw
, enum bpf_type type
)
36 raw
= bpf_prog_inc(raw
);
39 raw
= bpf_map_inc(raw
, true);
49 static void bpf_any_put(void *raw
, enum bpf_type type
)
56 bpf_map_put_with_uref(raw
);
64 static void *bpf_fd_probe_obj(u32 ufd
, enum bpf_type
*type
)
69 raw
= bpf_map_get_with_uref(ufd
);
71 *type
= BPF_TYPE_PROG
;
72 raw
= bpf_prog_get(ufd
);
78 static const struct inode_operations bpf_dir_iops
;
80 static const struct inode_operations bpf_prog_iops
= { };
81 static const struct inode_operations bpf_map_iops
= { };
83 static struct inode
*bpf_get_inode(struct super_block
*sb
,
84 const struct inode
*dir
,
89 switch (mode
& S_IFMT
) {
95 return ERR_PTR(-EINVAL
);
98 inode
= new_inode(sb
);
100 return ERR_PTR(-ENOSPC
);
102 inode
->i_ino
= get_next_ino();
103 inode
->i_atime
= current_time(inode
);
104 inode
->i_mtime
= inode
->i_atime
;
105 inode
->i_ctime
= inode
->i_atime
;
107 inode_init_owner(inode
, dir
, mode
);
112 static int bpf_inode_type(const struct inode
*inode
, enum bpf_type
*type
)
114 *type
= BPF_TYPE_UNSPEC
;
115 if (inode
->i_op
== &bpf_prog_iops
)
116 *type
= BPF_TYPE_PROG
;
117 else if (inode
->i_op
== &bpf_map_iops
)
118 *type
= BPF_TYPE_MAP
;
125 static void bpf_dentry_finalize(struct dentry
*dentry
, struct inode
*inode
,
128 d_instantiate(dentry
, inode
);
131 dir
->i_mtime
= current_time(dir
);
132 dir
->i_ctime
= dir
->i_mtime
;
135 static int bpf_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
139 inode
= bpf_get_inode(dir
->i_sb
, dir
, mode
| S_IFDIR
);
141 return PTR_ERR(inode
);
143 inode
->i_op
= &bpf_dir_iops
;
144 inode
->i_fop
= &simple_dir_operations
;
149 bpf_dentry_finalize(dentry
, inode
, dir
);
158 static struct map_iter
*map_iter(struct seq_file
*m
)
163 static struct bpf_map
*seq_file_to_map(struct seq_file
*m
)
165 return file_inode(m
->file
)->i_private
;
168 static void map_iter_free(struct map_iter
*iter
)
176 static struct map_iter
*map_iter_alloc(struct bpf_map
*map
)
178 struct map_iter
*iter
;
180 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
| __GFP_NOWARN
);
184 iter
->key
= kzalloc(map
->key_size
, GFP_KERNEL
| __GFP_NOWARN
);
195 static void *map_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
197 struct bpf_map
*map
= seq_file_to_map(m
);
198 void *key
= map_iter(m
)->key
;
201 if (map_iter(m
)->done
)
204 if (unlikely(v
== SEQ_START_TOKEN
))
209 if (map
->ops
->map_get_next_key(map
, prev_key
, key
)) {
210 map_iter(m
)->done
= true;
218 static void *map_seq_start(struct seq_file
*m
, loff_t
*pos
)
220 if (map_iter(m
)->done
)
223 return *pos
? map_iter(m
)->key
: SEQ_START_TOKEN
;
226 static void map_seq_stop(struct seq_file
*m
, void *v
)
230 static int map_seq_show(struct seq_file
*m
, void *v
)
232 struct bpf_map
*map
= seq_file_to_map(m
);
233 void *key
= map_iter(m
)->key
;
235 if (unlikely(v
== SEQ_START_TOKEN
)) {
236 seq_puts(m
, "# WARNING!! The output is for debug purpose only\n");
237 seq_puts(m
, "# WARNING!! The output format will change\n");
239 map
->ops
->map_seq_show_elem(map
, key
, m
);
245 static const struct seq_operations bpffs_map_seq_ops
= {
246 .start
= map_seq_start
,
247 .next
= map_seq_next
,
248 .show
= map_seq_show
,
249 .stop
= map_seq_stop
,
252 static int bpffs_map_open(struct inode
*inode
, struct file
*file
)
254 struct bpf_map
*map
= inode
->i_private
;
255 struct map_iter
*iter
;
259 iter
= map_iter_alloc(map
);
263 err
= seq_open(file
, &bpffs_map_seq_ops
);
269 m
= file
->private_data
;
275 static int bpffs_map_release(struct inode
*inode
, struct file
*file
)
277 struct seq_file
*m
= file
->private_data
;
279 map_iter_free(map_iter(m
));
281 return seq_release(inode
, file
);
284 /* bpffs_map_fops should only implement the basic
285 * read operation for a BPF map. The purpose is to
286 * provide a simple user intuitive way to do
287 * "cat bpffs/pathto/a-pinned-map".
289 * Other operations (e.g. write, lookup...) should be realized by
290 * the userspace tools (e.g. bpftool) through the
291 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
294 static const struct file_operations bpffs_map_fops
= {
295 .open
= bpffs_map_open
,
297 .release
= bpffs_map_release
,
300 static int bpffs_obj_open(struct inode
*inode
, struct file
*file
)
305 static const struct file_operations bpffs_obj_fops
= {
306 .open
= bpffs_obj_open
,
309 static int bpf_mkobj_ops(struct dentry
*dentry
, umode_t mode
, void *raw
,
310 const struct inode_operations
*iops
,
311 const struct file_operations
*fops
)
313 struct inode
*dir
= dentry
->d_parent
->d_inode
;
314 struct inode
*inode
= bpf_get_inode(dir
->i_sb
, dir
, mode
);
316 return PTR_ERR(inode
);
320 inode
->i_private
= raw
;
322 bpf_dentry_finalize(dentry
, inode
, dir
);
326 static int bpf_mkprog(struct dentry
*dentry
, umode_t mode
, void *arg
)
328 return bpf_mkobj_ops(dentry
, mode
, arg
, &bpf_prog_iops
,
332 static int bpf_mkmap(struct dentry
*dentry
, umode_t mode
, void *arg
)
334 struct bpf_map
*map
= arg
;
336 return bpf_mkobj_ops(dentry
, mode
, arg
, &bpf_map_iops
,
337 bpf_map_support_seq_show(map
) ?
338 &bpffs_map_fops
: &bpffs_obj_fops
);
341 static struct dentry
*
342 bpf_lookup(struct inode
*dir
, struct dentry
*dentry
, unsigned flags
)
344 /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
347 if (strchr(dentry
->d_name
.name
, '.'))
348 return ERR_PTR(-EPERM
);
350 return simple_lookup(dir
, dentry
, flags
);
353 static int bpf_symlink(struct inode
*dir
, struct dentry
*dentry
,
356 char *link
= kstrdup(target
, GFP_USER
| __GFP_NOWARN
);
362 inode
= bpf_get_inode(dir
->i_sb
, dir
, S_IRWXUGO
| S_IFLNK
);
365 return PTR_ERR(inode
);
368 inode
->i_op
= &simple_symlink_inode_operations
;
369 inode
->i_link
= link
;
371 bpf_dentry_finalize(dentry
, inode
, dir
);
375 static const struct inode_operations bpf_dir_iops
= {
376 .lookup
= bpf_lookup
,
378 .symlink
= bpf_symlink
,
379 .rmdir
= simple_rmdir
,
380 .rename
= simple_rename
,
382 .unlink
= simple_unlink
,
385 static int bpf_obj_do_pin(const struct filename
*pathname
, void *raw
,
388 struct dentry
*dentry
;
394 dentry
= kern_path_create(AT_FDCWD
, pathname
->name
, &path
, 0);
396 return PTR_ERR(dentry
);
398 mode
= S_IFREG
| ((S_IRUSR
| S_IWUSR
) & ~current_umask());
400 ret
= security_path_mknod(&path
, dentry
, mode
, 0);
404 dir
= d_inode(path
.dentry
);
405 if (dir
->i_op
!= &bpf_dir_iops
) {
412 ret
= vfs_mkobj(dentry
, mode
, bpf_mkprog
, raw
);
415 ret
= vfs_mkobj(dentry
, mode
, bpf_mkmap
, raw
);
421 done_path_create(&path
, dentry
);
425 int bpf_obj_pin_user(u32 ufd
, const char __user
*pathname
)
427 struct filename
*pname
;
432 pname
= getname(pathname
);
434 return PTR_ERR(pname
);
436 raw
= bpf_fd_probe_obj(ufd
, &type
);
442 ret
= bpf_obj_do_pin(pname
, raw
, type
);
444 bpf_any_put(raw
, type
);
450 static void *bpf_obj_do_get(const struct filename
*pathname
,
451 enum bpf_type
*type
, int flags
)
458 ret
= kern_path(pathname
->name
, LOOKUP_FOLLOW
, &path
);
462 inode
= d_backing_inode(path
.dentry
);
463 ret
= inode_permission(inode
, ACC_MODE(flags
));
467 ret
= bpf_inode_type(inode
, type
);
471 raw
= bpf_any_get(inode
->i_private
, *type
);
482 int bpf_obj_get_user(const char __user
*pathname
, int flags
)
484 enum bpf_type type
= BPF_TYPE_UNSPEC
;
485 struct filename
*pname
;
490 f_flags
= bpf_get_file_flag(flags
);
494 pname
= getname(pathname
);
496 return PTR_ERR(pname
);
498 raw
= bpf_obj_do_get(pname
, &type
, f_flags
);
504 if (type
== BPF_TYPE_PROG
)
505 ret
= bpf_prog_new_fd(raw
);
506 else if (type
== BPF_TYPE_MAP
)
507 ret
= bpf_map_new_fd(raw
, f_flags
);
512 bpf_any_put(raw
, type
);
518 static struct bpf_prog
*__get_prog_inode(struct inode
*inode
, enum bpf_prog_type type
)
520 struct bpf_prog
*prog
;
521 int ret
= inode_permission(inode
, MAY_READ
| MAY_WRITE
);
525 if (inode
->i_op
== &bpf_map_iops
)
526 return ERR_PTR(-EINVAL
);
527 if (inode
->i_op
!= &bpf_prog_iops
)
528 return ERR_PTR(-EACCES
);
530 prog
= inode
->i_private
;
532 ret
= security_bpf_prog(prog
);
536 if (!bpf_prog_get_ok(prog
, &type
, false))
537 return ERR_PTR(-EINVAL
);
539 return bpf_prog_inc(prog
);
542 struct bpf_prog
*bpf_prog_get_type_path(const char *name
, enum bpf_prog_type type
)
544 struct bpf_prog
*prog
;
546 int ret
= kern_path(name
, LOOKUP_FOLLOW
, &path
);
549 prog
= __get_prog_inode(d_backing_inode(path
.dentry
), type
);
555 EXPORT_SYMBOL(bpf_prog_get_type_path
);
557 static void bpf_evict_inode(struct inode
*inode
)
561 truncate_inode_pages_final(&inode
->i_data
);
564 if (S_ISLNK(inode
->i_mode
))
565 kfree(inode
->i_link
);
566 if (!bpf_inode_type(inode
, &type
))
567 bpf_any_put(inode
->i_private
, type
);
571 * Display the mount options in /proc/mounts.
573 static int bpf_show_options(struct seq_file
*m
, struct dentry
*root
)
575 umode_t mode
= d_inode(root
)->i_mode
& S_IALLUGO
& ~S_ISVTX
;
577 if (mode
!= S_IRWXUGO
)
578 seq_printf(m
, ",mode=%o", mode
);
582 static const struct super_operations bpf_super_ops
= {
583 .statfs
= simple_statfs
,
584 .drop_inode
= generic_delete_inode
,
585 .show_options
= bpf_show_options
,
586 .evict_inode
= bpf_evict_inode
,
594 static const match_table_t bpf_mount_tokens
= {
595 { OPT_MODE
, "mode=%o" },
599 struct bpf_mount_opts
{
603 static int bpf_parse_options(char *data
, struct bpf_mount_opts
*opts
)
605 substring_t args
[MAX_OPT_ARGS
];
609 opts
->mode
= S_IRWXUGO
;
611 while ((ptr
= strsep(&data
, ",")) != NULL
) {
615 token
= match_token(ptr
, bpf_mount_tokens
, args
);
618 if (match_octal(&args
[0], &option
))
620 opts
->mode
= option
& S_IALLUGO
;
622 /* We might like to report bad mount options here, but
623 * traditionally we've ignored all mount options, so we'd
624 * better continue to ignore non-existing options for bpf.
632 static int bpf_fill_super(struct super_block
*sb
, void *data
, int silent
)
634 static const struct tree_descr bpf_rfiles
[] = { { "" } };
635 struct bpf_mount_opts opts
;
639 ret
= bpf_parse_options(data
, &opts
);
643 ret
= simple_fill_super(sb
, BPF_FS_MAGIC
, bpf_rfiles
);
647 sb
->s_op
= &bpf_super_ops
;
649 inode
= sb
->s_root
->d_inode
;
650 inode
->i_op
= &bpf_dir_iops
;
651 inode
->i_mode
&= ~S_IALLUGO
;
652 inode
->i_mode
|= S_ISVTX
| opts
.mode
;
657 static struct dentry
*bpf_mount(struct file_system_type
*type
, int flags
,
658 const char *dev_name
, void *data
)
660 return mount_nodev(type
, flags
, data
, bpf_fill_super
);
663 static struct file_system_type bpf_fs_type
= {
664 .owner
= THIS_MODULE
,
667 .kill_sb
= kill_litter_super
,
670 static int __init
bpf_init(void)
674 ret
= sysfs_create_mount_point(fs_kobj
, "bpf");
678 ret
= register_filesystem(&bpf_fs_type
);
680 sysfs_remove_mount_point(fs_kobj
, "bpf");
684 fs_initcall(bpf_init
);