2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/fs_context.h>
19 #include <linux/fs_parser.h>
20 #include <linux/statfs.h>
21 #include <linux/random.h>
22 #include <linux/sched.h>
23 #include <linux/exportfs.h>
24 #include <linux/posix_acl.h>
25 #include <linux/pid_namespace.h>
26 #include <uapi/linux/magic.h>
28 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
29 MODULE_DESCRIPTION("Filesystem in Userspace");
30 MODULE_LICENSE("GPL");
32 static struct kmem_cache
*fuse_inode_cachep
;
33 struct list_head fuse_conn_list
;
34 DEFINE_MUTEX(fuse_mutex
);
36 static int set_global_limit(const char *val
, const struct kernel_param
*kp
);
38 unsigned max_user_bgreq
;
39 module_param_call(max_user_bgreq
, set_global_limit
, param_get_uint
,
40 &max_user_bgreq
, 0644);
41 __MODULE_PARM_TYPE(max_user_bgreq
, "uint");
42 MODULE_PARM_DESC(max_user_bgreq
,
43 "Global limit for the maximum number of backgrounded requests an "
44 "unprivileged user can set");
46 unsigned max_user_congthresh
;
47 module_param_call(max_user_congthresh
, set_global_limit
, param_get_uint
,
48 &max_user_congthresh
, 0644);
49 __MODULE_PARM_TYPE(max_user_congthresh
, "uint");
50 MODULE_PARM_DESC(max_user_congthresh
,
51 "Global limit for the maximum congestion threshold an "
52 "unprivileged user can set");
54 #define FUSE_DEFAULT_BLKSIZE 512
56 /** Maximum number of outstanding background requests */
57 #define FUSE_DEFAULT_MAX_BACKGROUND 12
59 /** Congestion starts at 75% of maximum */
60 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
63 static struct file_system_type fuseblk_fs_type
;
66 struct fuse_forget_link
*fuse_alloc_forget(void)
68 return kzalloc(sizeof(struct fuse_forget_link
), GFP_KERNEL_ACCOUNT
);
71 static struct fuse_submount_lookup
*fuse_alloc_submount_lookup(void)
73 struct fuse_submount_lookup
*sl
;
75 sl
= kzalloc(sizeof(struct fuse_submount_lookup
), GFP_KERNEL_ACCOUNT
);
78 sl
->forget
= fuse_alloc_forget();
89 static struct inode
*fuse_alloc_inode(struct super_block
*sb
)
91 struct fuse_inode
*fi
;
93 fi
= alloc_inode_sb(sb
, fuse_inode_cachep
, GFP_KERNEL
);
101 fi
->attr_version
= 0;
104 fi
->submount_lookup
= NULL
;
105 mutex_init(&fi
->mutex
);
106 spin_lock_init(&fi
->lock
);
107 fi
->forget
= fuse_alloc_forget();
111 if (IS_ENABLED(CONFIG_FUSE_DAX
) && !fuse_dax_inode_alloc(sb
, fi
))
112 goto out_free_forget
;
114 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
115 fuse_inode_backing_set(fi
, NULL
);
122 kmem_cache_free(fuse_inode_cachep
, fi
);
126 static void fuse_free_inode(struct inode
*inode
)
128 struct fuse_inode
*fi
= get_fuse_inode(inode
);
130 mutex_destroy(&fi
->mutex
);
132 #ifdef CONFIG_FUSE_DAX
135 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
136 fuse_backing_put(fuse_inode_backing(fi
));
138 kmem_cache_free(fuse_inode_cachep
, fi
);
141 static void fuse_cleanup_submount_lookup(struct fuse_conn
*fc
,
142 struct fuse_submount_lookup
*sl
)
144 if (!refcount_dec_and_test(&sl
->count
))
147 fuse_queue_forget(fc
, sl
->forget
, sl
->nodeid
, 1);
152 static void fuse_evict_inode(struct inode
*inode
)
154 struct fuse_inode
*fi
= get_fuse_inode(inode
);
156 /* Will write inode on close/munmap and in all other dirtiers */
157 WARN_ON(inode
->i_state
& I_DIRTY_INODE
);
159 truncate_inode_pages_final(&inode
->i_data
);
161 if (inode
->i_sb
->s_flags
& SB_ACTIVE
) {
162 struct fuse_conn
*fc
= get_fuse_conn(inode
);
164 if (FUSE_IS_DAX(inode
))
165 fuse_dax_inode_cleanup(inode
);
167 fuse_queue_forget(fc
, fi
->forget
, fi
->nodeid
,
172 if (fi
->submount_lookup
) {
173 fuse_cleanup_submount_lookup(fc
, fi
->submount_lookup
);
174 fi
->submount_lookup
= NULL
;
177 if (S_ISREG(inode
->i_mode
) && !fuse_is_bad(inode
)) {
178 WARN_ON(fi
->iocachectr
!= 0);
179 WARN_ON(!list_empty(&fi
->write_files
));
180 WARN_ON(!list_empty(&fi
->queued_writes
));
184 static int fuse_reconfigure(struct fs_context
*fsc
)
186 struct super_block
*sb
= fsc
->root
->d_sb
;
189 if (fsc
->sb_flags
& SB_MANDLOCK
)
196 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
197 * so that it will fit.
199 static ino_t
fuse_squash_ino(u64 ino64
)
201 ino_t ino
= (ino_t
) ino64
;
202 if (sizeof(ino_t
) < sizeof(u64
))
203 ino
^= ino64
>> (sizeof(u64
) - sizeof(ino_t
)) * 8;
207 void fuse_change_attributes_common(struct inode
*inode
, struct fuse_attr
*attr
,
208 struct fuse_statx
*sx
,
209 u64 attr_valid
, u32 cache_mask
)
211 struct fuse_conn
*fc
= get_fuse_conn(inode
);
212 struct fuse_inode
*fi
= get_fuse_inode(inode
);
214 lockdep_assert_held(&fi
->lock
);
216 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
217 fi
->i_time
= attr_valid
;
218 /* Clear basic stats from invalid mask */
219 set_mask_bits(&fi
->inval_mask
, STATX_BASIC_STATS
, 0);
221 inode
->i_ino
= fuse_squash_ino(attr
->ino
);
222 inode
->i_mode
= (inode
->i_mode
& S_IFMT
) | (attr
->mode
& 07777);
223 set_nlink(inode
, attr
->nlink
);
224 inode
->i_uid
= make_kuid(fc
->user_ns
, attr
->uid
);
225 inode
->i_gid
= make_kgid(fc
->user_ns
, attr
->gid
);
226 inode
->i_blocks
= attr
->blocks
;
229 attr
->atimensec
= min_t(u32
, attr
->atimensec
, NSEC_PER_SEC
- 1);
230 attr
->mtimensec
= min_t(u32
, attr
->mtimensec
, NSEC_PER_SEC
- 1);
231 attr
->ctimensec
= min_t(u32
, attr
->ctimensec
, NSEC_PER_SEC
- 1);
233 inode_set_atime(inode
, attr
->atime
, attr
->atimensec
);
234 /* mtime from server may be stale due to local buffered write */
235 if (!(cache_mask
& STATX_MTIME
)) {
236 inode_set_mtime(inode
, attr
->mtime
, attr
->mtimensec
);
238 if (!(cache_mask
& STATX_CTIME
)) {
239 inode_set_ctime(inode
, attr
->ctime
, attr
->ctimensec
);
244 min_t(u32
, sx
->btime
.tv_nsec
, NSEC_PER_SEC
- 1);
247 * Btime has been queried, cache is valid (whether or not btime
248 * is available or not) so clear STATX_BTIME from inval_mask.
250 * Availability of the btime attribute is indicated in
253 set_mask_bits(&fi
->inval_mask
, STATX_BTIME
, 0);
254 if (sx
->mask
& STATX_BTIME
) {
255 set_bit(FUSE_I_BTIME
, &fi
->state
);
256 fi
->i_btime
.tv_sec
= sx
->btime
.tv_sec
;
257 fi
->i_btime
.tv_nsec
= sx
->btime
.tv_nsec
;
261 if (attr
->blksize
!= 0)
262 inode
->i_blkbits
= ilog2(attr
->blksize
);
264 inode
->i_blkbits
= inode
->i_sb
->s_blocksize_bits
;
267 * Don't set the sticky bit in i_mode, unless we want the VFS
268 * to check permissions. This prevents failures due to the
269 * check in may_delete().
271 fi
->orig_i_mode
= inode
->i_mode
;
272 if (!fc
->default_permissions
)
273 inode
->i_mode
&= ~S_ISVTX
;
275 fi
->orig_ino
= attr
->ino
;
278 * We are refreshing inode data and it is possible that another
279 * client set suid/sgid or security.capability xattr. So clear
280 * S_NOSEC. Ideally, we could have cleared it only if suid/sgid
281 * was set or if security.capability xattr was set. But we don't
282 * know if security.capability has been set or not. So clear it
283 * anyway. Its less efficient but should be safe.
285 inode
->i_flags
&= ~S_NOSEC
;
288 u32
fuse_get_cache_mask(struct inode
*inode
)
290 struct fuse_conn
*fc
= get_fuse_conn(inode
);
292 if (!fc
->writeback_cache
|| !S_ISREG(inode
->i_mode
))
295 return STATX_MTIME
| STATX_CTIME
| STATX_SIZE
;
298 void fuse_change_attributes(struct inode
*inode
, struct fuse_attr
*attr
,
299 struct fuse_statx
*sx
,
300 u64 attr_valid
, u64 attr_version
)
302 struct fuse_conn
*fc
= get_fuse_conn(inode
);
303 struct fuse_inode
*fi
= get_fuse_inode(inode
);
306 struct timespec64 old_mtime
;
308 spin_lock(&fi
->lock
);
310 * In case of writeback_cache enabled, writes update mtime, ctime and
311 * may update i_size. In these cases trust the cached value in the
314 cache_mask
= fuse_get_cache_mask(inode
);
315 if (cache_mask
& STATX_SIZE
)
316 attr
->size
= i_size_read(inode
);
318 if (cache_mask
& STATX_MTIME
) {
319 attr
->mtime
= inode_get_mtime_sec(inode
);
320 attr
->mtimensec
= inode_get_mtime_nsec(inode
);
322 if (cache_mask
& STATX_CTIME
) {
323 attr
->ctime
= inode_get_ctime_sec(inode
);
324 attr
->ctimensec
= inode_get_ctime_nsec(inode
);
327 if ((attr_version
!= 0 && fi
->attr_version
> attr_version
) ||
328 test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
329 spin_unlock(&fi
->lock
);
333 old_mtime
= inode_get_mtime(inode
);
334 fuse_change_attributes_common(inode
, attr
, sx
, attr_valid
, cache_mask
);
336 oldsize
= inode
->i_size
;
338 * In case of writeback_cache enabled, the cached writes beyond EOF
339 * extend local i_size without keeping userspace server in sync. So,
340 * attr->size coming from server can be stale. We cannot trust it.
342 if (!(cache_mask
& STATX_SIZE
))
343 i_size_write(inode
, attr
->size
);
344 spin_unlock(&fi
->lock
);
346 if (!cache_mask
&& S_ISREG(inode
->i_mode
)) {
349 if (oldsize
!= attr
->size
) {
350 truncate_pagecache(inode
, attr
->size
);
351 if (!fc
->explicit_inval_data
)
353 } else if (fc
->auto_inval_data
) {
354 struct timespec64 new_mtime
= {
355 .tv_sec
= attr
->mtime
,
356 .tv_nsec
= attr
->mtimensec
,
360 * Auto inval mode also checks and invalidates if mtime
363 if (!timespec64_equal(&old_mtime
, &new_mtime
))
368 invalidate_inode_pages2(inode
->i_mapping
);
371 if (IS_ENABLED(CONFIG_FUSE_DAX
))
372 fuse_dax_dontcache(inode
, attr
->flags
);
375 static void fuse_init_submount_lookup(struct fuse_submount_lookup
*sl
,
379 refcount_set(&sl
->count
, 1);
382 static void fuse_init_inode(struct inode
*inode
, struct fuse_attr
*attr
,
383 struct fuse_conn
*fc
)
385 inode
->i_mode
= attr
->mode
& S_IFMT
;
386 inode
->i_size
= attr
->size
;
387 inode_set_mtime(inode
, attr
->mtime
, attr
->mtimensec
);
388 inode_set_ctime(inode
, attr
->ctime
, attr
->ctimensec
);
389 if (S_ISREG(inode
->i_mode
)) {
390 fuse_init_common(inode
);
391 fuse_init_file_inode(inode
, attr
->flags
);
392 } else if (S_ISDIR(inode
->i_mode
))
393 fuse_init_dir(inode
);
394 else if (S_ISLNK(inode
->i_mode
))
395 fuse_init_symlink(inode
);
396 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
) ||
397 S_ISFIFO(inode
->i_mode
) || S_ISSOCK(inode
->i_mode
)) {
398 fuse_init_common(inode
);
399 init_special_inode(inode
, inode
->i_mode
,
400 new_decode_dev(attr
->rdev
));
404 * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL
405 * so they see the exact same behavior as before.
408 inode
->i_acl
= inode
->i_default_acl
= ACL_DONT_CACHE
;
411 static int fuse_inode_eq(struct inode
*inode
, void *_nodeidp
)
413 u64 nodeid
= *(u64
*) _nodeidp
;
414 if (get_node_id(inode
) == nodeid
)
420 static int fuse_inode_set(struct inode
*inode
, void *_nodeidp
)
422 u64 nodeid
= *(u64
*) _nodeidp
;
423 get_fuse_inode(inode
)->nodeid
= nodeid
;
427 struct inode
*fuse_iget(struct super_block
*sb
, u64 nodeid
,
428 int generation
, struct fuse_attr
*attr
,
429 u64 attr_valid
, u64 attr_version
)
432 struct fuse_inode
*fi
;
433 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
436 * Auto mount points get their node id from the submount root, which is
437 * not a unique identifier within this filesystem.
439 * To avoid conflicts, do not place submount points into the inode hash
442 if (fc
->auto_submounts
&& (attr
->flags
& FUSE_ATTR_SUBMOUNT
) &&
443 S_ISDIR(attr
->mode
)) {
444 struct fuse_inode
*fi
;
446 inode
= new_inode(sb
);
450 fuse_init_inode(inode
, attr
, fc
);
451 fi
= get_fuse_inode(inode
);
453 fi
->submount_lookup
= fuse_alloc_submount_lookup();
454 if (!fi
->submount_lookup
) {
458 /* Sets nlookup = 1 on fi->submount_lookup->nlookup */
459 fuse_init_submount_lookup(fi
->submount_lookup
, nodeid
);
460 inode
->i_flags
|= S_AUTOMOUNT
;
465 inode
= iget5_locked(sb
, nodeid
, fuse_inode_eq
, fuse_inode_set
, &nodeid
);
469 if ((inode
->i_state
& I_NEW
)) {
470 inode
->i_flags
|= S_NOATIME
;
471 if (!fc
->writeback_cache
|| !S_ISREG(attr
->mode
))
472 inode
->i_flags
|= S_NOCMTIME
;
473 inode
->i_generation
= generation
;
474 fuse_init_inode(inode
, attr
, fc
);
475 unlock_new_inode(inode
);
476 } else if (fuse_stale_inode(inode
, generation
, attr
)) {
477 /* nodeid was reused, any I/O on the old inode should fail */
478 fuse_make_bad(inode
);
479 if (inode
!= d_inode(sb
->s_root
)) {
480 remove_inode_hash(inode
);
485 fi
= get_fuse_inode(inode
);
486 spin_lock(&fi
->lock
);
488 spin_unlock(&fi
->lock
);
490 fuse_change_attributes(inode
, attr
, NULL
, attr_valid
, attr_version
);
495 struct inode
*fuse_ilookup(struct fuse_conn
*fc
, u64 nodeid
,
496 struct fuse_mount
**fm
)
498 struct fuse_mount
*fm_iter
;
501 WARN_ON(!rwsem_is_locked(&fc
->killsb
));
502 list_for_each_entry(fm_iter
, &fc
->mounts
, fc_entry
) {
506 inode
= ilookup5(fm_iter
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
517 int fuse_reverse_inval_inode(struct fuse_conn
*fc
, u64 nodeid
,
518 loff_t offset
, loff_t len
)
520 struct fuse_inode
*fi
;
525 inode
= fuse_ilookup(fc
, nodeid
, NULL
);
529 fi
= get_fuse_inode(inode
);
530 spin_lock(&fi
->lock
);
531 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
532 spin_unlock(&fi
->lock
);
534 fuse_invalidate_attr(inode
);
535 forget_all_cached_acls(inode
);
537 pg_start
= offset
>> PAGE_SHIFT
;
541 pg_end
= (offset
+ len
- 1) >> PAGE_SHIFT
;
542 invalidate_inode_pages2_range(inode
->i_mapping
,
549 bool fuse_lock_inode(struct inode
*inode
)
553 if (!get_fuse_conn(inode
)->parallel_dirops
) {
554 mutex_lock(&get_fuse_inode(inode
)->mutex
);
561 void fuse_unlock_inode(struct inode
*inode
, bool locked
)
564 mutex_unlock(&get_fuse_inode(inode
)->mutex
);
567 static void fuse_umount_begin(struct super_block
*sb
)
569 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
571 if (fc
->no_force_umount
)
576 // Only retire block-device-based superblocks.
577 if (sb
->s_bdev
!= NULL
)
581 static void fuse_send_destroy(struct fuse_mount
*fm
)
583 if (fm
->fc
->conn_init
) {
586 args
.opcode
= FUSE_DESTROY
;
589 fuse_simple_request(fm
, &args
);
593 static void convert_fuse_statfs(struct kstatfs
*stbuf
, struct fuse_kstatfs
*attr
)
595 stbuf
->f_type
= FUSE_SUPER_MAGIC
;
596 stbuf
->f_bsize
= attr
->bsize
;
597 stbuf
->f_frsize
= attr
->frsize
;
598 stbuf
->f_blocks
= attr
->blocks
;
599 stbuf
->f_bfree
= attr
->bfree
;
600 stbuf
->f_bavail
= attr
->bavail
;
601 stbuf
->f_files
= attr
->files
;
602 stbuf
->f_ffree
= attr
->ffree
;
603 stbuf
->f_namelen
= attr
->namelen
;
604 /* fsid is left zero */
607 static int fuse_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
609 struct super_block
*sb
= dentry
->d_sb
;
610 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
612 struct fuse_statfs_out outarg
;
615 if (!fuse_allow_current_process(fm
->fc
)) {
616 buf
->f_type
= FUSE_SUPER_MAGIC
;
620 memset(&outarg
, 0, sizeof(outarg
));
622 args
.opcode
= FUSE_STATFS
;
623 args
.nodeid
= get_node_id(d_inode(dentry
));
624 args
.out_numargs
= 1;
625 args
.out_args
[0].size
= sizeof(outarg
);
626 args
.out_args
[0].value
= &outarg
;
627 err
= fuse_simple_request(fm
, &args
);
629 convert_fuse_statfs(buf
, &outarg
.st
);
633 static struct fuse_sync_bucket
*fuse_sync_bucket_alloc(void)
635 struct fuse_sync_bucket
*bucket
;
637 bucket
= kzalloc(sizeof(*bucket
), GFP_KERNEL
| __GFP_NOFAIL
);
639 init_waitqueue_head(&bucket
->waitq
);
640 /* Initial active count */
641 atomic_set(&bucket
->count
, 1);
646 static void fuse_sync_fs_writes(struct fuse_conn
*fc
)
648 struct fuse_sync_bucket
*bucket
, *new_bucket
;
651 new_bucket
= fuse_sync_bucket_alloc();
652 spin_lock(&fc
->lock
);
653 bucket
= rcu_dereference_protected(fc
->curr_bucket
, 1);
654 count
= atomic_read(&bucket
->count
);
656 /* No outstanding writes? */
658 spin_unlock(&fc
->lock
);
664 * Completion of new bucket depends on completion of this bucket, so add
667 atomic_inc(&new_bucket
->count
);
668 rcu_assign_pointer(fc
->curr_bucket
, new_bucket
);
669 spin_unlock(&fc
->lock
);
671 * Drop initial active count. At this point if all writes in this and
672 * ancestor buckets complete, the count will go to zero and this task
675 atomic_dec(&bucket
->count
);
677 wait_event(bucket
->waitq
, atomic_read(&bucket
->count
) == 0);
679 /* Drop temp count on descendant bucket */
680 fuse_sync_bucket_dec(new_bucket
);
681 kfree_rcu(bucket
, rcu
);
684 static int fuse_sync_fs(struct super_block
*sb
, int wait
)
686 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
687 struct fuse_conn
*fc
= fm
->fc
;
688 struct fuse_syncfs_in inarg
;
693 * Userspace cannot handle the wait == 0 case. Avoid a
694 * gratuitous roundtrip.
699 /* The filesystem is being unmounted. Nothing to do. */
706 fuse_sync_fs_writes(fc
);
708 memset(&inarg
, 0, sizeof(inarg
));
710 args
.in_args
[0].size
= sizeof(inarg
);
711 args
.in_args
[0].value
= &inarg
;
712 args
.opcode
= FUSE_SYNCFS
;
713 args
.nodeid
= get_node_id(sb
->s_root
->d_inode
);
714 args
.out_numargs
= 0;
716 err
= fuse_simple_request(fm
, &args
);
717 if (err
== -ENOSYS
) {
732 OPT_DEFAULT_PERMISSIONS
,
739 static const struct fs_parameter_spec fuse_fs_parameters
[] = {
740 fsparam_string ("source", OPT_SOURCE
),
741 fsparam_u32 ("fd", OPT_FD
),
742 fsparam_u32oct ("rootmode", OPT_ROOTMODE
),
743 fsparam_uid ("user_id", OPT_USER_ID
),
744 fsparam_gid ("group_id", OPT_GROUP_ID
),
745 fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS
),
746 fsparam_flag ("allow_other", OPT_ALLOW_OTHER
),
747 fsparam_u32 ("max_read", OPT_MAX_READ
),
748 fsparam_u32 ("blksize", OPT_BLKSIZE
),
749 fsparam_string ("subtype", OPT_SUBTYPE
),
753 static int fuse_parse_param(struct fs_context
*fsc
, struct fs_parameter
*param
)
755 struct fs_parse_result result
;
756 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
761 if (fsc
->purpose
== FS_CONTEXT_FOR_RECONFIGURE
) {
763 * Ignore options coming from mount(MS_REMOUNT) for backward
769 return invalfc(fsc
, "No changes allowed in reconfigure");
772 opt
= fs_parse(fsc
, fuse_fs_parameters
, param
, &result
);
779 return invalfc(fsc
, "Multiple sources specified");
780 fsc
->source
= param
->string
;
781 param
->string
= NULL
;
786 return invalfc(fsc
, "Multiple subtypes specified");
787 ctx
->subtype
= param
->string
;
788 param
->string
= NULL
;
792 ctx
->fd
= result
.uint_32
;
793 ctx
->fd_present
= true;
797 if (!fuse_valid_type(result
.uint_32
))
798 return invalfc(fsc
, "Invalid rootmode");
799 ctx
->rootmode
= result
.uint_32
;
800 ctx
->rootmode_present
= true;
806 * The requested uid must be representable in the
807 * filesystem's idmapping.
809 if (!kuid_has_mapping(fsc
->user_ns
, kuid
))
810 return invalfc(fsc
, "Invalid user_id");
812 ctx
->user_id_present
= true;
818 * The requested gid must be representable in the
819 * filesystem's idmapping.
821 if (!kgid_has_mapping(fsc
->user_ns
, kgid
))
822 return invalfc(fsc
, "Invalid group_id");
823 ctx
->group_id
= kgid
;
824 ctx
->group_id_present
= true;
827 case OPT_DEFAULT_PERMISSIONS
:
828 ctx
->default_permissions
= true;
831 case OPT_ALLOW_OTHER
:
832 ctx
->allow_other
= true;
836 ctx
->max_read
= result
.uint_32
;
841 return invalfc(fsc
, "blksize only supported for fuseblk");
842 ctx
->blksize
= result
.uint_32
;
852 static void fuse_free_fsc(struct fs_context
*fsc
)
854 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
862 static int fuse_show_options(struct seq_file
*m
, struct dentry
*root
)
864 struct super_block
*sb
= root
->d_sb
;
865 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
867 if (fc
->legacy_opts_show
) {
868 seq_printf(m
, ",user_id=%u",
869 from_kuid_munged(fc
->user_ns
, fc
->user_id
));
870 seq_printf(m
, ",group_id=%u",
871 from_kgid_munged(fc
->user_ns
, fc
->group_id
));
872 if (fc
->default_permissions
)
873 seq_puts(m
, ",default_permissions");
875 seq_puts(m
, ",allow_other");
876 if (fc
->max_read
!= ~0)
877 seq_printf(m
, ",max_read=%u", fc
->max_read
);
878 if (sb
->s_bdev
&& sb
->s_blocksize
!= FUSE_DEFAULT_BLKSIZE
)
879 seq_printf(m
, ",blksize=%lu", sb
->s_blocksize
);
881 #ifdef CONFIG_FUSE_DAX
882 if (fc
->dax_mode
== FUSE_DAX_ALWAYS
)
883 seq_puts(m
, ",dax=always");
884 else if (fc
->dax_mode
== FUSE_DAX_NEVER
)
885 seq_puts(m
, ",dax=never");
886 else if (fc
->dax_mode
== FUSE_DAX_INODE_USER
)
887 seq_puts(m
, ",dax=inode");
893 static void fuse_iqueue_init(struct fuse_iqueue
*fiq
,
894 const struct fuse_iqueue_ops
*ops
,
897 memset(fiq
, 0, sizeof(struct fuse_iqueue
));
898 spin_lock_init(&fiq
->lock
);
899 init_waitqueue_head(&fiq
->waitq
);
900 INIT_LIST_HEAD(&fiq
->pending
);
901 INIT_LIST_HEAD(&fiq
->interrupts
);
902 fiq
->forget_list_tail
= &fiq
->forget_list_head
;
908 static void fuse_pqueue_init(struct fuse_pqueue
*fpq
)
912 spin_lock_init(&fpq
->lock
);
913 for (i
= 0; i
< FUSE_PQ_HASH_SIZE
; i
++)
914 INIT_LIST_HEAD(&fpq
->processing
[i
]);
915 INIT_LIST_HEAD(&fpq
->io
);
919 void fuse_conn_init(struct fuse_conn
*fc
, struct fuse_mount
*fm
,
920 struct user_namespace
*user_ns
,
921 const struct fuse_iqueue_ops
*fiq_ops
, void *fiq_priv
)
923 memset(fc
, 0, sizeof(*fc
));
924 spin_lock_init(&fc
->lock
);
925 spin_lock_init(&fc
->bg_lock
);
926 init_rwsem(&fc
->killsb
);
927 refcount_set(&fc
->count
, 1);
928 atomic_set(&fc
->dev_count
, 1);
929 init_waitqueue_head(&fc
->blocked_waitq
);
930 fuse_iqueue_init(&fc
->iq
, fiq_ops
, fiq_priv
);
931 INIT_LIST_HEAD(&fc
->bg_queue
);
932 INIT_LIST_HEAD(&fc
->entry
);
933 INIT_LIST_HEAD(&fc
->devices
);
934 atomic_set(&fc
->num_waiting
, 0);
935 fc
->max_background
= FUSE_DEFAULT_MAX_BACKGROUND
;
936 fc
->congestion_threshold
= FUSE_DEFAULT_CONGESTION_THRESHOLD
;
937 atomic64_set(&fc
->khctr
, 0);
938 fc
->polled_files
= RB_ROOT
;
942 atomic64_set(&fc
->attr_version
, 1);
943 get_random_bytes(&fc
->scramble_key
, sizeof(fc
->scramble_key
));
944 fc
->pid_ns
= get_pid_ns(task_active_pid_ns(current
));
945 fc
->user_ns
= get_user_ns(user_ns
);
946 fc
->max_pages
= FUSE_DEFAULT_MAX_PAGES_PER_REQ
;
947 fc
->max_pages_limit
= FUSE_MAX_MAX_PAGES
;
949 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
950 fuse_backing_files_init(fc
);
952 INIT_LIST_HEAD(&fc
->mounts
);
953 list_add(&fm
->fc_entry
, &fc
->mounts
);
956 EXPORT_SYMBOL_GPL(fuse_conn_init
);
958 static void delayed_release(struct rcu_head
*p
)
960 struct fuse_conn
*fc
= container_of(p
, struct fuse_conn
, rcu
);
962 put_user_ns(fc
->user_ns
);
966 void fuse_conn_put(struct fuse_conn
*fc
)
968 if (refcount_dec_and_test(&fc
->count
)) {
969 struct fuse_iqueue
*fiq
= &fc
->iq
;
970 struct fuse_sync_bucket
*bucket
;
972 if (IS_ENABLED(CONFIG_FUSE_DAX
))
973 fuse_dax_conn_free(fc
);
974 if (fiq
->ops
->release
)
975 fiq
->ops
->release(fiq
);
976 put_pid_ns(fc
->pid_ns
);
977 bucket
= rcu_dereference_protected(fc
->curr_bucket
, 1);
979 WARN_ON(atomic_read(&bucket
->count
) != 1);
982 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
983 fuse_backing_files_free(fc
);
984 call_rcu(&fc
->rcu
, delayed_release
);
987 EXPORT_SYMBOL_GPL(fuse_conn_put
);
989 struct fuse_conn
*fuse_conn_get(struct fuse_conn
*fc
)
991 refcount_inc(&fc
->count
);
994 EXPORT_SYMBOL_GPL(fuse_conn_get
);
996 static struct inode
*fuse_get_root_inode(struct super_block
*sb
, unsigned mode
)
998 struct fuse_attr attr
;
999 memset(&attr
, 0, sizeof(attr
));
1002 attr
.ino
= FUSE_ROOT_ID
;
1004 return fuse_iget(sb
, FUSE_ROOT_ID
, 0, &attr
, 0, 0);
1007 struct fuse_inode_handle
{
1012 static struct dentry
*fuse_get_dentry(struct super_block
*sb
,
1013 struct fuse_inode_handle
*handle
)
1015 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
1016 struct inode
*inode
;
1017 struct dentry
*entry
;
1020 if (handle
->nodeid
== 0)
1023 inode
= ilookup5(sb
, handle
->nodeid
, fuse_inode_eq
, &handle
->nodeid
);
1025 struct fuse_entry_out outarg
;
1026 const struct qstr name
= QSTR_INIT(".", 1);
1028 if (!fc
->export_support
)
1031 err
= fuse_lookup_name(sb
, handle
->nodeid
, &name
, &outarg
,
1033 if (err
&& err
!= -ENOENT
)
1035 if (err
|| !inode
) {
1040 if (get_node_id(inode
) != handle
->nodeid
)
1044 if (inode
->i_generation
!= handle
->generation
)
1047 entry
= d_obtain_alias(inode
);
1048 if (!IS_ERR(entry
) && get_node_id(inode
) != FUSE_ROOT_ID
)
1049 fuse_invalidate_entry_cache(entry
);
1056 return ERR_PTR(err
);
1059 static int fuse_encode_fh(struct inode
*inode
, u32
*fh
, int *max_len
,
1060 struct inode
*parent
)
1062 int len
= parent
? 6 : 3;
1066 if (*max_len
< len
) {
1068 return FILEID_INVALID
;
1071 nodeid
= get_fuse_inode(inode
)->nodeid
;
1072 generation
= inode
->i_generation
;
1074 fh
[0] = (u32
)(nodeid
>> 32);
1075 fh
[1] = (u32
)(nodeid
& 0xffffffff);
1079 nodeid
= get_fuse_inode(parent
)->nodeid
;
1080 generation
= parent
->i_generation
;
1082 fh
[3] = (u32
)(nodeid
>> 32);
1083 fh
[4] = (u32
)(nodeid
& 0xffffffff);
1088 return parent
? FILEID_INO64_GEN_PARENT
: FILEID_INO64_GEN
;
1091 static struct dentry
*fuse_fh_to_dentry(struct super_block
*sb
,
1092 struct fid
*fid
, int fh_len
, int fh_type
)
1094 struct fuse_inode_handle handle
;
1096 if ((fh_type
!= FILEID_INO64_GEN
&&
1097 fh_type
!= FILEID_INO64_GEN_PARENT
) || fh_len
< 3)
1100 handle
.nodeid
= (u64
) fid
->raw
[0] << 32;
1101 handle
.nodeid
|= (u64
) fid
->raw
[1];
1102 handle
.generation
= fid
->raw
[2];
1103 return fuse_get_dentry(sb
, &handle
);
1106 static struct dentry
*fuse_fh_to_parent(struct super_block
*sb
,
1107 struct fid
*fid
, int fh_len
, int fh_type
)
1109 struct fuse_inode_handle parent
;
1111 if (fh_type
!= FILEID_INO64_GEN_PARENT
|| fh_len
< 6)
1114 parent
.nodeid
= (u64
) fid
->raw
[3] << 32;
1115 parent
.nodeid
|= (u64
) fid
->raw
[4];
1116 parent
.generation
= fid
->raw
[5];
1117 return fuse_get_dentry(sb
, &parent
);
1120 static struct dentry
*fuse_get_parent(struct dentry
*child
)
1122 struct inode
*child_inode
= d_inode(child
);
1123 struct fuse_conn
*fc
= get_fuse_conn(child_inode
);
1124 struct inode
*inode
;
1125 struct dentry
*parent
;
1126 struct fuse_entry_out outarg
;
1129 if (!fc
->export_support
)
1130 return ERR_PTR(-ESTALE
);
1132 err
= fuse_lookup_name(child_inode
->i_sb
, get_node_id(child_inode
),
1133 &dotdot_name
, &outarg
, &inode
);
1136 return ERR_PTR(-ESTALE
);
1137 return ERR_PTR(err
);
1140 parent
= d_obtain_alias(inode
);
1141 if (!IS_ERR(parent
) && get_node_id(inode
) != FUSE_ROOT_ID
)
1142 fuse_invalidate_entry_cache(parent
);
1147 /* only for fid encoding; no support for file handle */
1148 static const struct export_operations fuse_export_fid_operations
= {
1149 .encode_fh
= fuse_encode_fh
,
1152 static const struct export_operations fuse_export_operations
= {
1153 .fh_to_dentry
= fuse_fh_to_dentry
,
1154 .fh_to_parent
= fuse_fh_to_parent
,
1155 .encode_fh
= fuse_encode_fh
,
1156 .get_parent
= fuse_get_parent
,
1159 static const struct super_operations fuse_super_operations
= {
1160 .alloc_inode
= fuse_alloc_inode
,
1161 .free_inode
= fuse_free_inode
,
1162 .evict_inode
= fuse_evict_inode
,
1163 .write_inode
= fuse_write_inode
,
1164 .drop_inode
= generic_delete_inode
,
1165 .umount_begin
= fuse_umount_begin
,
1166 .statfs
= fuse_statfs
,
1167 .sync_fs
= fuse_sync_fs
,
1168 .show_options
= fuse_show_options
,
1171 static void sanitize_global_limit(unsigned *limit
)
1174 * The default maximum number of async requests is calculated to consume
1175 * 1/2^13 of the total memory, assuming 392 bytes per request.
1178 *limit
= ((totalram_pages() << PAGE_SHIFT
) >> 13) / 392;
1180 if (*limit
>= 1 << 16)
1181 *limit
= (1 << 16) - 1;
1184 static int set_global_limit(const char *val
, const struct kernel_param
*kp
)
1188 rv
= param_set_uint(val
, kp
);
1192 sanitize_global_limit((unsigned *)kp
->arg
);
1197 static void process_init_limits(struct fuse_conn
*fc
, struct fuse_init_out
*arg
)
1199 int cap_sys_admin
= capable(CAP_SYS_ADMIN
);
1201 if (arg
->minor
< 13)
1204 sanitize_global_limit(&max_user_bgreq
);
1205 sanitize_global_limit(&max_user_congthresh
);
1207 spin_lock(&fc
->bg_lock
);
1208 if (arg
->max_background
) {
1209 fc
->max_background
= arg
->max_background
;
1211 if (!cap_sys_admin
&& fc
->max_background
> max_user_bgreq
)
1212 fc
->max_background
= max_user_bgreq
;
1214 if (arg
->congestion_threshold
) {
1215 fc
->congestion_threshold
= arg
->congestion_threshold
;
1217 if (!cap_sys_admin
&&
1218 fc
->congestion_threshold
> max_user_congthresh
)
1219 fc
->congestion_threshold
= max_user_congthresh
;
1221 spin_unlock(&fc
->bg_lock
);
1224 struct fuse_init_args
{
1225 struct fuse_args args
;
1226 struct fuse_init_in in
;
1227 struct fuse_init_out out
;
1230 static void process_init_reply(struct fuse_mount
*fm
, struct fuse_args
*args
,
1233 struct fuse_conn
*fc
= fm
->fc
;
1234 struct fuse_init_args
*ia
= container_of(args
, typeof(*ia
), args
);
1235 struct fuse_init_out
*arg
= &ia
->out
;
1238 if (error
|| arg
->major
!= FUSE_KERNEL_VERSION
)
1241 unsigned long ra_pages
;
1243 process_init_limits(fc
, arg
);
1245 if (arg
->minor
>= 6) {
1246 u64 flags
= arg
->flags
;
1248 if (flags
& FUSE_INIT_EXT
)
1249 flags
|= (u64
) arg
->flags2
<< 32;
1251 ra_pages
= arg
->max_readahead
/ PAGE_SIZE
;
1252 if (flags
& FUSE_ASYNC_READ
)
1254 if (!(flags
& FUSE_POSIX_LOCKS
))
1256 if (arg
->minor
>= 17) {
1257 if (!(flags
& FUSE_FLOCK_LOCKS
))
1260 if (!(flags
& FUSE_POSIX_LOCKS
))
1263 if (flags
& FUSE_ATOMIC_O_TRUNC
)
1264 fc
->atomic_o_trunc
= 1;
1265 if (arg
->minor
>= 9) {
1266 /* LOOKUP has dependency on proto version */
1267 if (flags
& FUSE_EXPORT_SUPPORT
)
1268 fc
->export_support
= 1;
1270 if (flags
& FUSE_BIG_WRITES
)
1272 if (flags
& FUSE_DONT_MASK
)
1274 if (flags
& FUSE_AUTO_INVAL_DATA
)
1275 fc
->auto_inval_data
= 1;
1276 else if (flags
& FUSE_EXPLICIT_INVAL_DATA
)
1277 fc
->explicit_inval_data
= 1;
1278 if (flags
& FUSE_DO_READDIRPLUS
) {
1279 fc
->do_readdirplus
= 1;
1280 if (flags
& FUSE_READDIRPLUS_AUTO
)
1281 fc
->readdirplus_auto
= 1;
1283 if (flags
& FUSE_ASYNC_DIO
)
1285 if (flags
& FUSE_WRITEBACK_CACHE
)
1286 fc
->writeback_cache
= 1;
1287 if (flags
& FUSE_PARALLEL_DIROPS
)
1288 fc
->parallel_dirops
= 1;
1289 if (flags
& FUSE_HANDLE_KILLPRIV
)
1290 fc
->handle_killpriv
= 1;
1291 if (arg
->time_gran
&& arg
->time_gran
<= 1000000000)
1292 fm
->sb
->s_time_gran
= arg
->time_gran
;
1293 if ((flags
& FUSE_POSIX_ACL
)) {
1294 fc
->default_permissions
= 1;
1297 if (flags
& FUSE_CACHE_SYMLINKS
)
1298 fc
->cache_symlinks
= 1;
1299 if (flags
& FUSE_ABORT_ERROR
)
1301 if (flags
& FUSE_MAX_PAGES
) {
1303 min_t(unsigned int, fc
->max_pages_limit
,
1304 max_t(unsigned int, arg
->max_pages
, 1));
1306 if (IS_ENABLED(CONFIG_FUSE_DAX
)) {
1307 if (flags
& FUSE_MAP_ALIGNMENT
&&
1308 !fuse_dax_check_alignment(fc
, arg
->map_alignment
)) {
1311 if (flags
& FUSE_HAS_INODE_DAX
)
1314 if (flags
& FUSE_HANDLE_KILLPRIV_V2
) {
1315 fc
->handle_killpriv_v2
= 1;
1316 fm
->sb
->s_flags
|= SB_NOSEC
;
1318 if (flags
& FUSE_SETXATTR_EXT
)
1319 fc
->setxattr_ext
= 1;
1320 if (flags
& FUSE_SECURITY_CTX
)
1321 fc
->init_security
= 1;
1322 if (flags
& FUSE_CREATE_SUPP_GROUP
)
1323 fc
->create_supp_group
= 1;
1324 if (flags
& FUSE_DIRECT_IO_ALLOW_MMAP
)
1325 fc
->direct_io_allow_mmap
= 1;
1327 * max_stack_depth is the max stack depth of FUSE fs,
1328 * so it has to be at least 1 to support passthrough
1331 * with max_stack_depth > 1, the backing files can be
1332 * on a stacked fs (e.g. overlayfs) themselves and with
1333 * max_stack_depth == 1, FUSE fs can be stacked as the
1334 * underlying fs of a stacked fs (e.g. overlayfs).
1336 * Also don't allow the combination of FUSE_PASSTHROUGH
1337 * and FUSE_WRITEBACK_CACHE, current design doesn't handle
1340 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
) &&
1341 (flags
& FUSE_PASSTHROUGH
) &&
1342 arg
->max_stack_depth
> 0 &&
1343 arg
->max_stack_depth
<= FILESYSTEM_MAX_STACK_DEPTH
&&
1344 !(flags
& FUSE_WRITEBACK_CACHE
)) {
1345 fc
->passthrough
= 1;
1346 fc
->max_stack_depth
= arg
->max_stack_depth
;
1347 fm
->sb
->s_stack_depth
= arg
->max_stack_depth
;
1349 if (flags
& FUSE_NO_EXPORT_SUPPORT
)
1350 fm
->sb
->s_export_op
= &fuse_export_fid_operations
;
1351 if (flags
& FUSE_ALLOW_IDMAP
) {
1352 if (fc
->default_permissions
)
1353 fm
->sb
->s_iflags
&= ~SB_I_NOIDMAP
;
1358 ra_pages
= fc
->max_read
/ PAGE_SIZE
;
1363 fm
->sb
->s_bdi
->ra_pages
=
1364 min(fm
->sb
->s_bdi
->ra_pages
, ra_pages
);
1365 fc
->minor
= arg
->minor
;
1366 fc
->max_write
= arg
->minor
< 5 ? 4096 : arg
->max_write
;
1367 fc
->max_write
= max_t(unsigned, 4096, fc
->max_write
);
1377 fuse_set_initialized(fc
);
1378 wake_up_all(&fc
->blocked_waitq
);
1381 void fuse_send_init(struct fuse_mount
*fm
)
1383 struct fuse_init_args
*ia
;
1386 ia
= kzalloc(sizeof(*ia
), GFP_KERNEL
| __GFP_NOFAIL
);
1388 ia
->in
.major
= FUSE_KERNEL_VERSION
;
1389 ia
->in
.minor
= FUSE_KERNEL_MINOR_VERSION
;
1390 ia
->in
.max_readahead
= fm
->sb
->s_bdi
->ra_pages
* PAGE_SIZE
;
1392 FUSE_ASYNC_READ
| FUSE_POSIX_LOCKS
| FUSE_ATOMIC_O_TRUNC
|
1393 FUSE_EXPORT_SUPPORT
| FUSE_BIG_WRITES
| FUSE_DONT_MASK
|
1394 FUSE_SPLICE_WRITE
| FUSE_SPLICE_MOVE
| FUSE_SPLICE_READ
|
1395 FUSE_FLOCK_LOCKS
| FUSE_HAS_IOCTL_DIR
| FUSE_AUTO_INVAL_DATA
|
1396 FUSE_DO_READDIRPLUS
| FUSE_READDIRPLUS_AUTO
| FUSE_ASYNC_DIO
|
1397 FUSE_WRITEBACK_CACHE
| FUSE_NO_OPEN_SUPPORT
|
1398 FUSE_PARALLEL_DIROPS
| FUSE_HANDLE_KILLPRIV
| FUSE_POSIX_ACL
|
1399 FUSE_ABORT_ERROR
| FUSE_MAX_PAGES
| FUSE_CACHE_SYMLINKS
|
1400 FUSE_NO_OPENDIR_SUPPORT
| FUSE_EXPLICIT_INVAL_DATA
|
1401 FUSE_HANDLE_KILLPRIV_V2
| FUSE_SETXATTR_EXT
| FUSE_INIT_EXT
|
1402 FUSE_SECURITY_CTX
| FUSE_CREATE_SUPP_GROUP
|
1403 FUSE_HAS_EXPIRE_ONLY
| FUSE_DIRECT_IO_ALLOW_MMAP
|
1404 FUSE_NO_EXPORT_SUPPORT
| FUSE_HAS_RESEND
| FUSE_ALLOW_IDMAP
;
1405 #ifdef CONFIG_FUSE_DAX
1407 flags
|= FUSE_MAP_ALIGNMENT
;
1408 if (fuse_is_inode_dax_mode(fm
->fc
->dax_mode
))
1409 flags
|= FUSE_HAS_INODE_DAX
;
1411 if (fm
->fc
->auto_submounts
)
1412 flags
|= FUSE_SUBMOUNTS
;
1413 if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH
))
1414 flags
|= FUSE_PASSTHROUGH
;
1416 ia
->in
.flags
= flags
;
1417 ia
->in
.flags2
= flags
>> 32;
1419 ia
->args
.opcode
= FUSE_INIT
;
1420 ia
->args
.in_numargs
= 1;
1421 ia
->args
.in_args
[0].size
= sizeof(ia
->in
);
1422 ia
->args
.in_args
[0].value
= &ia
->in
;
1423 ia
->args
.out_numargs
= 1;
1424 /* Variable length argument used for backward compatibility
1425 with interface version < 7.5. Rest of init_out is zeroed
1426 by do_get_request(), so a short reply is not a problem */
1427 ia
->args
.out_argvar
= true;
1428 ia
->args
.out_args
[0].size
= sizeof(ia
->out
);
1429 ia
->args
.out_args
[0].value
= &ia
->out
;
1430 ia
->args
.force
= true;
1431 ia
->args
.nocreds
= true;
1432 ia
->args
.end
= process_init_reply
;
1434 if (fuse_simple_background(fm
, &ia
->args
, GFP_KERNEL
) != 0)
1435 process_init_reply(fm
, &ia
->args
, -ENOTCONN
);
1437 EXPORT_SYMBOL_GPL(fuse_send_init
);
1439 void fuse_free_conn(struct fuse_conn
*fc
)
1441 WARN_ON(!list_empty(&fc
->devices
));
1444 EXPORT_SYMBOL_GPL(fuse_free_conn
);
1446 static int fuse_bdi_init(struct fuse_conn
*fc
, struct super_block
*sb
)
1452 suffix
= "-fuseblk";
1454 * sb->s_bdi points to blkdev's bdi however we want to redirect
1455 * it to our private bdi...
1458 sb
->s_bdi
= &noop_backing_dev_info
;
1460 err
= super_setup_bdi_name(sb
, "%u:%u%s", MAJOR(fc
->dev
),
1461 MINOR(fc
->dev
), suffix
);
1465 /* fuse does it's own writeback accounting */
1466 sb
->s_bdi
->capabilities
&= ~BDI_CAP_WRITEBACK_ACCT
;
1467 sb
->s_bdi
->capabilities
|= BDI_CAP_STRICTLIMIT
;
1470 * For a single fuse filesystem use max 1% of dirty +
1471 * writeback threshold.
1473 * This gives about 1M of write buffer for memory maps on a
1474 * machine with 1G and 10% dirty_ratio, which should be more
1477 * Privileged users can raise it by writing to
1479 * /sys/class/bdi/<bdi>/max_ratio
1481 bdi_set_max_ratio(sb
->s_bdi
, 1);
1486 struct fuse_dev
*fuse_dev_alloc(void)
1488 struct fuse_dev
*fud
;
1489 struct list_head
*pq
;
1491 fud
= kzalloc(sizeof(struct fuse_dev
), GFP_KERNEL
);
1495 pq
= kcalloc(FUSE_PQ_HASH_SIZE
, sizeof(struct list_head
), GFP_KERNEL
);
1501 fud
->pq
.processing
= pq
;
1502 fuse_pqueue_init(&fud
->pq
);
1506 EXPORT_SYMBOL_GPL(fuse_dev_alloc
);
1508 void fuse_dev_install(struct fuse_dev
*fud
, struct fuse_conn
*fc
)
1510 fud
->fc
= fuse_conn_get(fc
);
1511 spin_lock(&fc
->lock
);
1512 list_add_tail(&fud
->entry
, &fc
->devices
);
1513 spin_unlock(&fc
->lock
);
1515 EXPORT_SYMBOL_GPL(fuse_dev_install
);
1517 struct fuse_dev
*fuse_dev_alloc_install(struct fuse_conn
*fc
)
1519 struct fuse_dev
*fud
;
1521 fud
= fuse_dev_alloc();
1525 fuse_dev_install(fud
, fc
);
1528 EXPORT_SYMBOL_GPL(fuse_dev_alloc_install
);
1530 void fuse_dev_free(struct fuse_dev
*fud
)
1532 struct fuse_conn
*fc
= fud
->fc
;
1535 spin_lock(&fc
->lock
);
1536 list_del(&fud
->entry
);
1537 spin_unlock(&fc
->lock
);
1541 kfree(fud
->pq
.processing
);
1544 EXPORT_SYMBOL_GPL(fuse_dev_free
);
1546 static void fuse_fill_attr_from_inode(struct fuse_attr
*attr
,
1547 const struct fuse_inode
*fi
)
1549 struct timespec64 atime
= inode_get_atime(&fi
->inode
);
1550 struct timespec64 mtime
= inode_get_mtime(&fi
->inode
);
1551 struct timespec64 ctime
= inode_get_ctime(&fi
->inode
);
1553 *attr
= (struct fuse_attr
){
1554 .ino
= fi
->inode
.i_ino
,
1555 .size
= fi
->inode
.i_size
,
1556 .blocks
= fi
->inode
.i_blocks
,
1557 .atime
= atime
.tv_sec
,
1558 .mtime
= mtime
.tv_sec
,
1559 .ctime
= ctime
.tv_sec
,
1560 .atimensec
= atime
.tv_nsec
,
1561 .mtimensec
= mtime
.tv_nsec
,
1562 .ctimensec
= ctime
.tv_nsec
,
1563 .mode
= fi
->inode
.i_mode
,
1564 .nlink
= fi
->inode
.i_nlink
,
1565 .uid
= __kuid_val(fi
->inode
.i_uid
),
1566 .gid
= __kgid_val(fi
->inode
.i_gid
),
1567 .rdev
= fi
->inode
.i_rdev
,
1568 .blksize
= 1u << fi
->inode
.i_blkbits
,
1572 static void fuse_sb_defaults(struct super_block
*sb
)
1574 sb
->s_magic
= FUSE_SUPER_MAGIC
;
1575 sb
->s_op
= &fuse_super_operations
;
1576 sb
->s_xattr
= fuse_xattr_handlers
;
1577 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1578 sb
->s_time_gran
= 1;
1579 sb
->s_export_op
= &fuse_export_operations
;
1580 sb
->s_iflags
|= SB_I_IMA_UNVERIFIABLE_SIGNATURE
;
1581 sb
->s_iflags
|= SB_I_NOIDMAP
;
1582 if (sb
->s_user_ns
!= &init_user_ns
)
1583 sb
->s_iflags
|= SB_I_UNTRUSTED_MOUNTER
;
1584 sb
->s_flags
&= ~(SB_NOSEC
| SB_I_VERSION
);
1587 static int fuse_fill_super_submount(struct super_block
*sb
,
1588 struct fuse_inode
*parent_fi
)
1590 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1591 struct super_block
*parent_sb
= parent_fi
->inode
.i_sb
;
1592 struct fuse_attr root_attr
;
1594 struct fuse_submount_lookup
*sl
;
1595 struct fuse_inode
*fi
;
1597 fuse_sb_defaults(sb
);
1600 WARN_ON(sb
->s_bdi
!= &noop_backing_dev_info
);
1601 sb
->s_bdi
= bdi_get(parent_sb
->s_bdi
);
1603 sb
->s_xattr
= parent_sb
->s_xattr
;
1604 sb
->s_export_op
= parent_sb
->s_export_op
;
1605 sb
->s_time_gran
= parent_sb
->s_time_gran
;
1606 sb
->s_blocksize
= parent_sb
->s_blocksize
;
1607 sb
->s_blocksize_bits
= parent_sb
->s_blocksize_bits
;
1608 sb
->s_subtype
= kstrdup(parent_sb
->s_subtype
, GFP_KERNEL
);
1609 if (parent_sb
->s_subtype
&& !sb
->s_subtype
)
1612 fuse_fill_attr_from_inode(&root_attr
, parent_fi
);
1613 root
= fuse_iget(sb
, parent_fi
->nodeid
, 0, &root_attr
, 0, 0);
1615 * This inode is just a duplicate, so it is not looked up and
1616 * its nlookup should not be incremented. fuse_iget() does
1617 * that, though, so undo it here.
1619 fi
= get_fuse_inode(root
);
1622 sb
->s_d_op
= &fuse_dentry_operations
;
1623 sb
->s_root
= d_make_root(root
);
1628 * Grab the parent's submount_lookup pointer and take a
1629 * reference on the shared nlookup from the parent. This is to
1630 * prevent the last forget for this nodeid from getting
1631 * triggered until all users have finished with it.
1633 sl
= parent_fi
->submount_lookup
;
1636 refcount_inc(&sl
->count
);
1637 fi
->submount_lookup
= sl
;
1643 /* Filesystem context private data holds the FUSE inode of the mount point */
1644 static int fuse_get_tree_submount(struct fs_context
*fsc
)
1646 struct fuse_mount
*fm
;
1647 struct fuse_inode
*mp_fi
= fsc
->fs_private
;
1648 struct fuse_conn
*fc
= get_fuse_conn(&mp_fi
->inode
);
1649 struct super_block
*sb
;
1652 fm
= kzalloc(sizeof(struct fuse_mount
), GFP_KERNEL
);
1656 fm
->fc
= fuse_conn_get(fc
);
1657 fsc
->s_fs_info
= fm
;
1658 sb
= sget_fc(fsc
, NULL
, set_anon_super_fc
);
1660 fuse_mount_destroy(fm
);
1664 /* Initialize superblock, making @mp_fi its root */
1665 err
= fuse_fill_super_submount(sb
, mp_fi
);
1667 deactivate_locked_super(sb
);
1671 down_write(&fc
->killsb
);
1672 list_add_tail(&fm
->fc_entry
, &fc
->mounts
);
1673 up_write(&fc
->killsb
);
1675 sb
->s_flags
|= SB_ACTIVE
;
1676 fsc
->root
= dget(sb
->s_root
);
1681 static const struct fs_context_operations fuse_context_submount_ops
= {
1682 .get_tree
= fuse_get_tree_submount
,
1685 int fuse_init_fs_context_submount(struct fs_context
*fsc
)
1687 fsc
->ops
= &fuse_context_submount_ops
;
1690 EXPORT_SYMBOL_GPL(fuse_init_fs_context_submount
);
1692 int fuse_fill_super_common(struct super_block
*sb
, struct fuse_fs_context
*ctx
)
1694 struct fuse_dev
*fud
= NULL
;
1695 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1696 struct fuse_conn
*fc
= fm
->fc
;
1698 struct dentry
*root_dentry
;
1702 if (sb
->s_flags
& SB_MANDLOCK
)
1705 rcu_assign_pointer(fc
->curr_bucket
, fuse_sync_bucket_alloc());
1706 fuse_sb_defaults(sb
);
1711 if (!sb_set_blocksize(sb
, ctx
->blksize
))
1715 sb
->s_blocksize
= PAGE_SIZE
;
1716 sb
->s_blocksize_bits
= PAGE_SHIFT
;
1719 sb
->s_subtype
= ctx
->subtype
;
1720 ctx
->subtype
= NULL
;
1721 if (IS_ENABLED(CONFIG_FUSE_DAX
)) {
1722 err
= fuse_dax_conn_alloc(fc
, ctx
->dax_mode
, ctx
->dax_dev
);
1729 fud
= fuse_dev_alloc_install(fc
);
1734 fc
->dev
= sb
->s_dev
;
1736 err
= fuse_bdi_init(fc
, sb
);
1740 /* Handle umasking inside the fuse code */
1741 if (sb
->s_flags
& SB_POSIXACL
)
1743 sb
->s_flags
|= SB_POSIXACL
;
1745 fc
->default_permissions
= ctx
->default_permissions
;
1746 fc
->allow_other
= ctx
->allow_other
;
1747 fc
->user_id
= ctx
->user_id
;
1748 fc
->group_id
= ctx
->group_id
;
1749 fc
->legacy_opts_show
= ctx
->legacy_opts_show
;
1750 fc
->max_read
= max_t(unsigned int, 4096, ctx
->max_read
);
1751 fc
->destroy
= ctx
->destroy
;
1752 fc
->no_control
= ctx
->no_control
;
1753 fc
->no_force_umount
= ctx
->no_force_umount
;
1756 root
= fuse_get_root_inode(sb
, ctx
->rootmode
);
1757 sb
->s_d_op
= &fuse_root_dentry_operations
;
1758 root_dentry
= d_make_root(root
);
1761 /* Root dentry doesn't have .d_revalidate */
1762 sb
->s_d_op
= &fuse_dentry_operations
;
1764 mutex_lock(&fuse_mutex
);
1766 if (ctx
->fudptr
&& *ctx
->fudptr
)
1769 err
= fuse_ctl_add_conn(fc
);
1773 list_add_tail(&fc
->entry
, &fuse_conn_list
);
1774 sb
->s_root
= root_dentry
;
1777 mutex_unlock(&fuse_mutex
);
1781 mutex_unlock(&fuse_mutex
);
1787 if (IS_ENABLED(CONFIG_FUSE_DAX
))
1788 fuse_dax_conn_free(fc
);
1792 EXPORT_SYMBOL_GPL(fuse_fill_super_common
);
1794 static int fuse_fill_super(struct super_block
*sb
, struct fs_context
*fsc
)
1796 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
1799 if (!ctx
->file
|| !ctx
->rootmode_present
||
1800 !ctx
->user_id_present
|| !ctx
->group_id_present
)
1804 * Require mount to happen from the same user namespace which
1805 * opened /dev/fuse to prevent potential attacks.
1807 if ((ctx
->file
->f_op
!= &fuse_dev_operations
) ||
1808 (ctx
->file
->f_cred
->user_ns
!= sb
->s_user_ns
))
1810 ctx
->fudptr
= &ctx
->file
->private_data
;
1812 err
= fuse_fill_super_common(sb
, ctx
);
1815 /* file->private_data shall be visible on all CPUs after this */
1817 fuse_send_init(get_fuse_mount_super(sb
));
1822 * This is the path where user supplied an already initialized fuse dev. In
1823 * this case never create a new super if the old one is gone.
1825 static int fuse_set_no_super(struct super_block
*sb
, struct fs_context
*fsc
)
1830 static int fuse_test_super(struct super_block
*sb
, struct fs_context
*fsc
)
1833 return fsc
->sget_key
== get_fuse_conn_super(sb
);
1836 static int fuse_get_tree(struct fs_context
*fsc
)
1838 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
1839 struct fuse_dev
*fud
;
1840 struct fuse_conn
*fc
;
1841 struct fuse_mount
*fm
;
1842 struct super_block
*sb
;
1845 fc
= kmalloc(sizeof(*fc
), GFP_KERNEL
);
1849 fm
= kzalloc(sizeof(*fm
), GFP_KERNEL
);
1855 fuse_conn_init(fc
, fm
, fsc
->user_ns
, &fuse_dev_fiq_ops
, NULL
);
1856 fc
->release
= fuse_free_conn
;
1858 fsc
->s_fs_info
= fm
;
1860 if (ctx
->fd_present
)
1861 ctx
->file
= fget(ctx
->fd
);
1863 if (IS_ENABLED(CONFIG_BLOCK
) && ctx
->is_bdev
) {
1864 err
= get_tree_bdev(fsc
, fuse_fill_super
);
1868 * While block dev mount can be initialized with a dummy device fd
1869 * (found by device name), normal fuse mounts can't
1876 * Allow creating a fuse mount with an already initialized fuse
1879 fud
= READ_ONCE(ctx
->file
->private_data
);
1880 if (ctx
->file
->f_op
== &fuse_dev_operations
&& fud
) {
1881 fsc
->sget_key
= fud
->fc
;
1882 sb
= sget_fc(fsc
, fuse_test_super
, fuse_set_no_super
);
1883 err
= PTR_ERR_OR_ZERO(sb
);
1885 fsc
->root
= dget(sb
->s_root
);
1887 err
= get_tree_nodev(fsc
, fuse_fill_super
);
1891 fuse_mount_destroy(fm
);
1897 static const struct fs_context_operations fuse_context_ops
= {
1898 .free
= fuse_free_fsc
,
1899 .parse_param
= fuse_parse_param
,
1900 .reconfigure
= fuse_reconfigure
,
1901 .get_tree
= fuse_get_tree
,
1905 * Set up the filesystem mount context.
1907 static int fuse_init_fs_context(struct fs_context
*fsc
)
1909 struct fuse_fs_context
*ctx
;
1911 ctx
= kzalloc(sizeof(struct fuse_fs_context
), GFP_KERNEL
);
1916 ctx
->blksize
= FUSE_DEFAULT_BLKSIZE
;
1917 ctx
->legacy_opts_show
= true;
1920 if (fsc
->fs_type
== &fuseblk_fs_type
) {
1921 ctx
->is_bdev
= true;
1922 ctx
->destroy
= true;
1926 fsc
->fs_private
= ctx
;
1927 fsc
->ops
= &fuse_context_ops
;
1931 bool fuse_mount_remove(struct fuse_mount
*fm
)
1933 struct fuse_conn
*fc
= fm
->fc
;
1936 down_write(&fc
->killsb
);
1937 list_del_init(&fm
->fc_entry
);
1938 if (list_empty(&fc
->mounts
))
1940 up_write(&fc
->killsb
);
1944 EXPORT_SYMBOL_GPL(fuse_mount_remove
);
1946 void fuse_conn_destroy(struct fuse_mount
*fm
)
1948 struct fuse_conn
*fc
= fm
->fc
;
1951 fuse_send_destroy(fm
);
1953 fuse_abort_conn(fc
);
1954 fuse_wait_aborted(fc
);
1956 if (!list_empty(&fc
->entry
)) {
1957 mutex_lock(&fuse_mutex
);
1958 list_del(&fc
->entry
);
1959 fuse_ctl_remove_conn(fc
);
1960 mutex_unlock(&fuse_mutex
);
1963 EXPORT_SYMBOL_GPL(fuse_conn_destroy
);
1965 static void fuse_sb_destroy(struct super_block
*sb
)
1967 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1971 last
= fuse_mount_remove(fm
);
1973 fuse_conn_destroy(fm
);
1977 void fuse_mount_destroy(struct fuse_mount
*fm
)
1979 fuse_conn_put(fm
->fc
);
1982 EXPORT_SYMBOL(fuse_mount_destroy
);
1984 static void fuse_kill_sb_anon(struct super_block
*sb
)
1986 fuse_sb_destroy(sb
);
1987 kill_anon_super(sb
);
1988 fuse_mount_destroy(get_fuse_mount_super(sb
));
1991 static struct file_system_type fuse_fs_type
= {
1992 .owner
= THIS_MODULE
,
1994 .fs_flags
= FS_HAS_SUBTYPE
| FS_USERNS_MOUNT
| FS_ALLOW_IDMAP
,
1995 .init_fs_context
= fuse_init_fs_context
,
1996 .parameters
= fuse_fs_parameters
,
1997 .kill_sb
= fuse_kill_sb_anon
,
1999 MODULE_ALIAS_FS("fuse");
2002 static void fuse_kill_sb_blk(struct super_block
*sb
)
2004 fuse_sb_destroy(sb
);
2005 kill_block_super(sb
);
2006 fuse_mount_destroy(get_fuse_mount_super(sb
));
2009 static struct file_system_type fuseblk_fs_type
= {
2010 .owner
= THIS_MODULE
,
2012 .init_fs_context
= fuse_init_fs_context
,
2013 .parameters
= fuse_fs_parameters
,
2014 .kill_sb
= fuse_kill_sb_blk
,
2015 .fs_flags
= FS_REQUIRES_DEV
| FS_HAS_SUBTYPE
| FS_ALLOW_IDMAP
,
2017 MODULE_ALIAS_FS("fuseblk");
2019 static inline int register_fuseblk(void)
2021 return register_filesystem(&fuseblk_fs_type
);
2024 static inline void unregister_fuseblk(void)
2026 unregister_filesystem(&fuseblk_fs_type
);
2029 static inline int register_fuseblk(void)
2034 static inline void unregister_fuseblk(void)
2039 static void fuse_inode_init_once(void *foo
)
2041 struct inode
*inode
= foo
;
2043 inode_init_once(inode
);
2046 static int __init
fuse_fs_init(void)
2050 fuse_inode_cachep
= kmem_cache_create("fuse_inode",
2051 sizeof(struct fuse_inode
), 0,
2052 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
|SLAB_RECLAIM_ACCOUNT
,
2053 fuse_inode_init_once
);
2055 if (!fuse_inode_cachep
)
2058 err
= register_fuseblk();
2062 err
= register_filesystem(&fuse_fs_type
);
2069 unregister_fuseblk();
2071 kmem_cache_destroy(fuse_inode_cachep
);
2076 static void fuse_fs_cleanup(void)
2078 unregister_filesystem(&fuse_fs_type
);
2079 unregister_fuseblk();
2082 * Make sure all delayed rcu free inodes are flushed before we
2086 kmem_cache_destroy(fuse_inode_cachep
);
2089 static struct kobject
*fuse_kobj
;
2091 static int fuse_sysfs_init(void)
2095 fuse_kobj
= kobject_create_and_add("fuse", fs_kobj
);
2101 err
= sysfs_create_mount_point(fuse_kobj
, "connections");
2103 goto out_fuse_unregister
;
2107 out_fuse_unregister
:
2108 kobject_put(fuse_kobj
);
2113 static void fuse_sysfs_cleanup(void)
2115 sysfs_remove_mount_point(fuse_kobj
, "connections");
2116 kobject_put(fuse_kobj
);
2119 static int __init
fuse_init(void)
2123 pr_info("init (API version %i.%i)\n",
2124 FUSE_KERNEL_VERSION
, FUSE_KERNEL_MINOR_VERSION
);
2126 INIT_LIST_HEAD(&fuse_conn_list
);
2127 res
= fuse_fs_init();
2131 res
= fuse_dev_init();
2133 goto err_fs_cleanup
;
2135 res
= fuse_sysfs_init();
2137 goto err_dev_cleanup
;
2139 res
= fuse_ctl_init();
2141 goto err_sysfs_cleanup
;
2143 sanitize_global_limit(&max_user_bgreq
);
2144 sanitize_global_limit(&max_user_congthresh
);
2149 fuse_sysfs_cleanup();
2158 static void __exit
fuse_exit(void)
2163 fuse_sysfs_cleanup();
2168 module_init(fuse_init
);
2169 module_exit(fuse_exit
);