2 * Minimal file system backend for holding eBPF maps and programs,
3 * used by bpf(2) object pinning.
7 * Daniel Borkmann <daniel@iogearbox.net>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
14 #include <linux/init.h>
15 #include <linux/magic.h>
16 #include <linux/major.h>
17 #include <linux/mount.h>
18 #include <linux/namei.h>
20 #include <linux/kdev_t.h>
21 #include <linux/parser.h>
22 #include <linux/filter.h>
23 #include <linux/bpf.h>
24 #include <linux/bpf_trace.h>
32 static void *bpf_any_get(void *raw
, enum bpf_type type
)
36 raw
= bpf_prog_inc(raw
);
39 raw
= bpf_map_inc(raw
, true);
49 static void bpf_any_put(void *raw
, enum bpf_type type
)
56 bpf_map_put_with_uref(raw
);
64 static void *bpf_fd_probe_obj(u32 ufd
, enum bpf_type
*type
)
69 raw
= bpf_map_get_with_uref(ufd
);
71 *type
= BPF_TYPE_PROG
;
72 raw
= bpf_prog_get(ufd
);
78 static const struct inode_operations bpf_dir_iops
;
80 static const struct inode_operations bpf_prog_iops
= { };
81 static const struct inode_operations bpf_map_iops
= { };
83 static struct inode
*bpf_get_inode(struct super_block
*sb
,
84 const struct inode
*dir
,
89 switch (mode
& S_IFMT
) {
95 return ERR_PTR(-EINVAL
);
98 inode
= new_inode(sb
);
100 return ERR_PTR(-ENOSPC
);
102 inode
->i_ino
= get_next_ino();
103 inode
->i_atime
= current_time(inode
);
104 inode
->i_mtime
= inode
->i_atime
;
105 inode
->i_ctime
= inode
->i_atime
;
107 inode_init_owner(inode
, dir
, mode
);
112 static int bpf_inode_type(const struct inode
*inode
, enum bpf_type
*type
)
114 *type
= BPF_TYPE_UNSPEC
;
115 if (inode
->i_op
== &bpf_prog_iops
)
116 *type
= BPF_TYPE_PROG
;
117 else if (inode
->i_op
== &bpf_map_iops
)
118 *type
= BPF_TYPE_MAP
;
125 static void bpf_dentry_finalize(struct dentry
*dentry
, struct inode
*inode
,
128 d_instantiate(dentry
, inode
);
131 dir
->i_mtime
= current_time(dir
);
132 dir
->i_ctime
= dir
->i_mtime
;
135 static int bpf_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
139 inode
= bpf_get_inode(dir
->i_sb
, dir
, mode
| S_IFDIR
);
141 return PTR_ERR(inode
);
143 inode
->i_op
= &bpf_dir_iops
;
144 inode
->i_fop
= &simple_dir_operations
;
149 bpf_dentry_finalize(dentry
, inode
, dir
);
153 static int bpf_mkobj_ops(struct inode
*dir
, struct dentry
*dentry
,
154 umode_t mode
, const struct inode_operations
*iops
)
158 inode
= bpf_get_inode(dir
->i_sb
, dir
, mode
| S_IFREG
);
160 return PTR_ERR(inode
);
163 inode
->i_private
= dentry
->d_fsdata
;
165 bpf_dentry_finalize(dentry
, inode
, dir
);
169 static int bpf_mkobj(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
,
172 enum bpf_type type
= MINOR(devt
);
174 if (MAJOR(devt
) != UNNAMED_MAJOR
|| !S_ISREG(mode
) ||
175 dentry
->d_fsdata
== NULL
)
180 return bpf_mkobj_ops(dir
, dentry
, mode
, &bpf_prog_iops
);
182 return bpf_mkobj_ops(dir
, dentry
, mode
, &bpf_map_iops
);
188 static struct dentry
*
189 bpf_lookup(struct inode
*dir
, struct dentry
*dentry
, unsigned flags
)
191 if (strchr(dentry
->d_name
.name
, '.'))
192 return ERR_PTR(-EPERM
);
194 return simple_lookup(dir
, dentry
, flags
);
197 static int bpf_symlink(struct inode
*dir
, struct dentry
*dentry
,
200 char *link
= kstrdup(target
, GFP_USER
| __GFP_NOWARN
);
206 inode
= bpf_get_inode(dir
->i_sb
, dir
, S_IRWXUGO
| S_IFLNK
);
209 return PTR_ERR(inode
);
212 inode
->i_op
= &simple_symlink_inode_operations
;
213 inode
->i_link
= link
;
215 bpf_dentry_finalize(dentry
, inode
, dir
);
219 static const struct inode_operations bpf_dir_iops
= {
220 .lookup
= bpf_lookup
,
223 .symlink
= bpf_symlink
,
224 .rmdir
= simple_rmdir
,
225 .rename
= simple_rename
,
227 .unlink
= simple_unlink
,
230 static int bpf_obj_do_pin(const struct filename
*pathname
, void *raw
,
233 struct dentry
*dentry
;
240 dentry
= kern_path_create(AT_FDCWD
, pathname
->name
, &path
, 0);
242 return PTR_ERR(dentry
);
244 mode
= S_IFREG
| ((S_IRUSR
| S_IWUSR
) & ~current_umask());
245 devt
= MKDEV(UNNAMED_MAJOR
, type
);
247 ret
= security_path_mknod(&path
, dentry
, mode
, devt
);
251 dir
= d_inode(path
.dentry
);
252 if (dir
->i_op
!= &bpf_dir_iops
) {
257 dentry
->d_fsdata
= raw
;
258 ret
= vfs_mknod(dir
, dentry
, mode
, devt
);
259 dentry
->d_fsdata
= NULL
;
261 done_path_create(&path
, dentry
);
265 int bpf_obj_pin_user(u32 ufd
, const char __user
*pathname
)
267 struct filename
*pname
;
272 pname
= getname(pathname
);
274 return PTR_ERR(pname
);
276 raw
= bpf_fd_probe_obj(ufd
, &type
);
282 ret
= bpf_obj_do_pin(pname
, raw
, type
);
284 bpf_any_put(raw
, type
);
285 if ((trace_bpf_obj_pin_prog_enabled() ||
286 trace_bpf_obj_pin_map_enabled()) && !ret
) {
287 if (type
== BPF_TYPE_PROG
)
288 trace_bpf_obj_pin_prog(raw
, ufd
, pname
);
289 if (type
== BPF_TYPE_MAP
)
290 trace_bpf_obj_pin_map(raw
, ufd
, pname
);
297 static void *bpf_obj_do_get(const struct filename
*pathname
,
305 ret
= kern_path(pathname
->name
, LOOKUP_FOLLOW
, &path
);
309 inode
= d_backing_inode(path
.dentry
);
310 ret
= inode_permission(inode
, MAY_WRITE
);
314 ret
= bpf_inode_type(inode
, type
);
318 raw
= bpf_any_get(inode
->i_private
, *type
);
329 int bpf_obj_get_user(const char __user
*pathname
)
331 enum bpf_type type
= BPF_TYPE_UNSPEC
;
332 struct filename
*pname
;
336 pname
= getname(pathname
);
338 return PTR_ERR(pname
);
340 raw
= bpf_obj_do_get(pname
, &type
);
346 if (type
== BPF_TYPE_PROG
)
347 ret
= bpf_prog_new_fd(raw
);
348 else if (type
== BPF_TYPE_MAP
)
349 ret
= bpf_map_new_fd(raw
);
354 bpf_any_put(raw
, type
);
355 } else if (trace_bpf_obj_get_prog_enabled() ||
356 trace_bpf_obj_get_map_enabled()) {
357 if (type
== BPF_TYPE_PROG
)
358 trace_bpf_obj_get_prog(raw
, ret
, pname
);
359 if (type
== BPF_TYPE_MAP
)
360 trace_bpf_obj_get_map(raw
, ret
, pname
);
367 static void bpf_evict_inode(struct inode
*inode
)
371 truncate_inode_pages_final(&inode
->i_data
);
374 if (S_ISLNK(inode
->i_mode
))
375 kfree(inode
->i_link
);
376 if (!bpf_inode_type(inode
, &type
))
377 bpf_any_put(inode
->i_private
, type
);
380 static const struct super_operations bpf_super_ops
= {
381 .statfs
= simple_statfs
,
382 .drop_inode
= generic_delete_inode
,
383 .show_options
= generic_show_options
,
384 .evict_inode
= bpf_evict_inode
,
392 static const match_table_t bpf_mount_tokens
= {
393 { OPT_MODE
, "mode=%o" },
397 struct bpf_mount_opts
{
401 static int bpf_parse_options(char *data
, struct bpf_mount_opts
*opts
)
403 substring_t args
[MAX_OPT_ARGS
];
407 opts
->mode
= S_IRWXUGO
;
409 while ((ptr
= strsep(&data
, ",")) != NULL
) {
413 token
= match_token(ptr
, bpf_mount_tokens
, args
);
416 if (match_octal(&args
[0], &option
))
418 opts
->mode
= option
& S_IALLUGO
;
420 /* We might like to report bad mount options here, but
421 * traditionally we've ignored all mount options, so we'd
422 * better continue to ignore non-existing options for bpf.
430 static int bpf_fill_super(struct super_block
*sb
, void *data
, int silent
)
432 static struct tree_descr bpf_rfiles
[] = { { "" } };
433 struct bpf_mount_opts opts
;
437 save_mount_options(sb
, data
);
439 ret
= bpf_parse_options(data
, &opts
);
443 ret
= simple_fill_super(sb
, BPF_FS_MAGIC
, bpf_rfiles
);
447 sb
->s_op
= &bpf_super_ops
;
449 inode
= sb
->s_root
->d_inode
;
450 inode
->i_op
= &bpf_dir_iops
;
451 inode
->i_mode
&= ~S_IALLUGO
;
452 inode
->i_mode
|= S_ISVTX
| opts
.mode
;
457 static struct dentry
*bpf_mount(struct file_system_type
*type
, int flags
,
458 const char *dev_name
, void *data
)
460 return mount_nodev(type
, flags
, data
, bpf_fill_super
);
463 static struct file_system_type bpf_fs_type
= {
464 .owner
= THIS_MODULE
,
467 .kill_sb
= kill_litter_super
,
470 static int __init
bpf_init(void)
474 ret
= sysfs_create_mount_point(fs_kobj
, "bpf");
478 ret
= register_filesystem(&bpf_fs_type
);
480 sysfs_remove_mount_point(fs_kobj
, "bpf");
484 fs_initcall(bpf_init
);