2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/fs_context.h>
19 #include <linux/fs_parser.h>
20 #include <linux/statfs.h>
21 #include <linux/random.h>
22 #include <linux/sched.h>
23 #include <linux/exportfs.h>
24 #include <linux/posix_acl.h>
25 #include <linux/pid_namespace.h>
27 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
28 MODULE_DESCRIPTION("Filesystem in Userspace");
29 MODULE_LICENSE("GPL");
31 static struct kmem_cache
*fuse_inode_cachep
;
32 struct list_head fuse_conn_list
;
33 DEFINE_MUTEX(fuse_mutex
);
35 static int set_global_limit(const char *val
, const struct kernel_param
*kp
);
37 unsigned max_user_bgreq
;
38 module_param_call(max_user_bgreq
, set_global_limit
, param_get_uint
,
39 &max_user_bgreq
, 0644);
40 __MODULE_PARM_TYPE(max_user_bgreq
, "uint");
41 MODULE_PARM_DESC(max_user_bgreq
,
42 "Global limit for the maximum number of backgrounded requests an "
43 "unprivileged user can set");
45 unsigned max_user_congthresh
;
46 module_param_call(max_user_congthresh
, set_global_limit
, param_get_uint
,
47 &max_user_congthresh
, 0644);
48 __MODULE_PARM_TYPE(max_user_congthresh
, "uint");
49 MODULE_PARM_DESC(max_user_congthresh
,
50 "Global limit for the maximum congestion threshold an "
51 "unprivileged user can set");
53 #define FUSE_SUPER_MAGIC 0x65735546
55 #define FUSE_DEFAULT_BLKSIZE 512
57 /** Maximum number of outstanding background requests */
58 #define FUSE_DEFAULT_MAX_BACKGROUND 12
60 /** Congestion starts at 75% of maximum */
61 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
64 static struct file_system_type fuseblk_fs_type
;
67 struct fuse_forget_link
*fuse_alloc_forget(void)
69 return kzalloc(sizeof(struct fuse_forget_link
), GFP_KERNEL_ACCOUNT
);
72 static struct inode
*fuse_alloc_inode(struct super_block
*sb
)
74 struct fuse_inode
*fi
;
76 fi
= kmem_cache_alloc(fuse_inode_cachep
, GFP_KERNEL
);
87 mutex_init(&fi
->mutex
);
88 init_rwsem(&fi
->i_mmap_sem
);
89 spin_lock_init(&fi
->lock
);
90 fi
->forget
= fuse_alloc_forget();
94 if (IS_ENABLED(CONFIG_FUSE_DAX
) && !fuse_dax_inode_alloc(sb
, fi
))
102 kmem_cache_free(fuse_inode_cachep
, fi
);
106 static void fuse_free_inode(struct inode
*inode
)
108 struct fuse_inode
*fi
= get_fuse_inode(inode
);
110 mutex_destroy(&fi
->mutex
);
112 #ifdef CONFIG_FUSE_DAX
115 kmem_cache_free(fuse_inode_cachep
, fi
);
118 static void fuse_evict_inode(struct inode
*inode
)
120 struct fuse_inode
*fi
= get_fuse_inode(inode
);
122 truncate_inode_pages_final(&inode
->i_data
);
124 if (inode
->i_sb
->s_flags
& SB_ACTIVE
) {
125 struct fuse_conn
*fc
= get_fuse_conn(inode
);
127 if (FUSE_IS_DAX(inode
))
128 fuse_dax_inode_cleanup(inode
);
130 fuse_queue_forget(fc
, fi
->forget
, fi
->nodeid
,
135 if (S_ISREG(inode
->i_mode
) && !fuse_is_bad(inode
)) {
136 WARN_ON(!list_empty(&fi
->write_files
));
137 WARN_ON(!list_empty(&fi
->queued_writes
));
141 static int fuse_reconfigure(struct fs_context
*fc
)
143 struct super_block
*sb
= fc
->root
->d_sb
;
146 if (fc
->sb_flags
& SB_MANDLOCK
)
153 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
154 * so that it will fit.
156 static ino_t
fuse_squash_ino(u64 ino64
)
158 ino_t ino
= (ino_t
) ino64
;
159 if (sizeof(ino_t
) < sizeof(u64
))
160 ino
^= ino64
>> (sizeof(u64
) - sizeof(ino_t
)) * 8;
164 void fuse_change_attributes_common(struct inode
*inode
, struct fuse_attr
*attr
,
167 struct fuse_conn
*fc
= get_fuse_conn(inode
);
168 struct fuse_inode
*fi
= get_fuse_inode(inode
);
170 lockdep_assert_held(&fi
->lock
);
172 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
173 fi
->i_time
= attr_valid
;
174 WRITE_ONCE(fi
->inval_mask
, 0);
176 inode
->i_ino
= fuse_squash_ino(attr
->ino
);
177 inode
->i_mode
= (inode
->i_mode
& S_IFMT
) | (attr
->mode
& 07777);
178 set_nlink(inode
, attr
->nlink
);
179 inode
->i_uid
= make_kuid(fc
->user_ns
, attr
->uid
);
180 inode
->i_gid
= make_kgid(fc
->user_ns
, attr
->gid
);
181 inode
->i_blocks
= attr
->blocks
;
182 inode
->i_atime
.tv_sec
= attr
->atime
;
183 inode
->i_atime
.tv_nsec
= attr
->atimensec
;
184 /* mtime from server may be stale due to local buffered write */
185 if (!fc
->writeback_cache
|| !S_ISREG(inode
->i_mode
)) {
186 inode
->i_mtime
.tv_sec
= attr
->mtime
;
187 inode
->i_mtime
.tv_nsec
= attr
->mtimensec
;
188 inode
->i_ctime
.tv_sec
= attr
->ctime
;
189 inode
->i_ctime
.tv_nsec
= attr
->ctimensec
;
192 if (attr
->blksize
!= 0)
193 inode
->i_blkbits
= ilog2(attr
->blksize
);
195 inode
->i_blkbits
= inode
->i_sb
->s_blocksize_bits
;
198 * Don't set the sticky bit in i_mode, unless we want the VFS
199 * to check permissions. This prevents failures due to the
200 * check in may_delete().
202 fi
->orig_i_mode
= inode
->i_mode
;
203 if (!fc
->default_permissions
)
204 inode
->i_mode
&= ~S_ISVTX
;
206 fi
->orig_ino
= attr
->ino
;
209 * We are refreshing inode data and it is possible that another
210 * client set suid/sgid or security.capability xattr. So clear
211 * S_NOSEC. Ideally, we could have cleared it only if suid/sgid
212 * was set or if security.capability xattr was set. But we don't
213 * know if security.capability has been set or not. So clear it
214 * anyway. Its less efficient but should be safe.
216 inode
->i_flags
&= ~S_NOSEC
;
219 void fuse_change_attributes(struct inode
*inode
, struct fuse_attr
*attr
,
220 u64 attr_valid
, u64 attr_version
)
222 struct fuse_conn
*fc
= get_fuse_conn(inode
);
223 struct fuse_inode
*fi
= get_fuse_inode(inode
);
224 bool is_wb
= fc
->writeback_cache
;
226 struct timespec64 old_mtime
;
228 spin_lock(&fi
->lock
);
229 if ((attr_version
!= 0 && fi
->attr_version
> attr_version
) ||
230 test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
231 spin_unlock(&fi
->lock
);
235 old_mtime
= inode
->i_mtime
;
236 fuse_change_attributes_common(inode
, attr
, attr_valid
);
238 oldsize
= inode
->i_size
;
240 * In case of writeback_cache enabled, the cached writes beyond EOF
241 * extend local i_size without keeping userspace server in sync. So,
242 * attr->size coming from server can be stale. We cannot trust it.
244 if (!is_wb
|| !S_ISREG(inode
->i_mode
))
245 i_size_write(inode
, attr
->size
);
246 spin_unlock(&fi
->lock
);
248 if (!is_wb
&& S_ISREG(inode
->i_mode
)) {
251 if (oldsize
!= attr
->size
) {
252 truncate_pagecache(inode
, attr
->size
);
253 if (!fc
->explicit_inval_data
)
255 } else if (fc
->auto_inval_data
) {
256 struct timespec64 new_mtime
= {
257 .tv_sec
= attr
->mtime
,
258 .tv_nsec
= attr
->mtimensec
,
262 * Auto inval mode also checks and invalidates if mtime
265 if (!timespec64_equal(&old_mtime
, &new_mtime
))
270 invalidate_inode_pages2(inode
->i_mapping
);
274 static void fuse_init_inode(struct inode
*inode
, struct fuse_attr
*attr
)
276 inode
->i_mode
= attr
->mode
& S_IFMT
;
277 inode
->i_size
= attr
->size
;
278 inode
->i_mtime
.tv_sec
= attr
->mtime
;
279 inode
->i_mtime
.tv_nsec
= attr
->mtimensec
;
280 inode
->i_ctime
.tv_sec
= attr
->ctime
;
281 inode
->i_ctime
.tv_nsec
= attr
->ctimensec
;
282 if (S_ISREG(inode
->i_mode
)) {
283 fuse_init_common(inode
);
284 fuse_init_file_inode(inode
);
285 } else if (S_ISDIR(inode
->i_mode
))
286 fuse_init_dir(inode
);
287 else if (S_ISLNK(inode
->i_mode
))
288 fuse_init_symlink(inode
);
289 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
) ||
290 S_ISFIFO(inode
->i_mode
) || S_ISSOCK(inode
->i_mode
)) {
291 fuse_init_common(inode
);
292 init_special_inode(inode
, inode
->i_mode
,
293 new_decode_dev(attr
->rdev
));
298 static int fuse_inode_eq(struct inode
*inode
, void *_nodeidp
)
300 u64 nodeid
= *(u64
*) _nodeidp
;
301 if (get_node_id(inode
) == nodeid
)
307 static int fuse_inode_set(struct inode
*inode
, void *_nodeidp
)
309 u64 nodeid
= *(u64
*) _nodeidp
;
310 get_fuse_inode(inode
)->nodeid
= nodeid
;
314 struct inode
*fuse_iget(struct super_block
*sb
, u64 nodeid
,
315 int generation
, struct fuse_attr
*attr
,
316 u64 attr_valid
, u64 attr_version
)
319 struct fuse_inode
*fi
;
320 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
323 * Auto mount points get their node id from the submount root, which is
324 * not a unique identifier within this filesystem.
326 * To avoid conflicts, do not place submount points into the inode hash
329 if (fc
->auto_submounts
&& (attr
->flags
& FUSE_ATTR_SUBMOUNT
) &&
330 S_ISDIR(attr
->mode
)) {
331 inode
= new_inode(sb
);
335 fuse_init_inode(inode
, attr
);
336 get_fuse_inode(inode
)->nodeid
= nodeid
;
337 inode
->i_flags
|= S_AUTOMOUNT
;
342 inode
= iget5_locked(sb
, nodeid
, fuse_inode_eq
, fuse_inode_set
, &nodeid
);
346 if ((inode
->i_state
& I_NEW
)) {
347 inode
->i_flags
|= S_NOATIME
;
348 if (!fc
->writeback_cache
|| !S_ISREG(attr
->mode
))
349 inode
->i_flags
|= S_NOCMTIME
;
350 inode
->i_generation
= generation
;
351 fuse_init_inode(inode
, attr
);
352 unlock_new_inode(inode
);
353 } else if ((inode
->i_mode
^ attr
->mode
) & S_IFMT
) {
354 /* Inode has changed type, any I/O on the old should fail */
355 fuse_make_bad(inode
);
360 fi
= get_fuse_inode(inode
);
361 spin_lock(&fi
->lock
);
363 spin_unlock(&fi
->lock
);
364 fuse_change_attributes(inode
, attr
, attr_valid
, attr_version
);
369 struct inode
*fuse_ilookup(struct fuse_conn
*fc
, u64 nodeid
,
370 struct fuse_mount
**fm
)
372 struct fuse_mount
*fm_iter
;
375 WARN_ON(!rwsem_is_locked(&fc
->killsb
));
376 list_for_each_entry(fm_iter
, &fc
->mounts
, fc_entry
) {
380 inode
= ilookup5(fm_iter
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
391 int fuse_reverse_inval_inode(struct fuse_conn
*fc
, u64 nodeid
,
392 loff_t offset
, loff_t len
)
394 struct fuse_inode
*fi
;
399 inode
= fuse_ilookup(fc
, nodeid
, NULL
);
403 fi
= get_fuse_inode(inode
);
404 spin_lock(&fi
->lock
);
405 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
406 spin_unlock(&fi
->lock
);
408 fuse_invalidate_attr(inode
);
409 forget_all_cached_acls(inode
);
411 pg_start
= offset
>> PAGE_SHIFT
;
415 pg_end
= (offset
+ len
- 1) >> PAGE_SHIFT
;
416 invalidate_inode_pages2_range(inode
->i_mapping
,
423 bool fuse_lock_inode(struct inode
*inode
)
427 if (!get_fuse_conn(inode
)->parallel_dirops
) {
428 mutex_lock(&get_fuse_inode(inode
)->mutex
);
435 void fuse_unlock_inode(struct inode
*inode
, bool locked
)
438 mutex_unlock(&get_fuse_inode(inode
)->mutex
);
441 static void fuse_umount_begin(struct super_block
*sb
)
443 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
445 if (!fc
->no_force_umount
)
449 static void fuse_send_destroy(struct fuse_mount
*fm
)
451 if (fm
->fc
->conn_init
) {
454 args
.opcode
= FUSE_DESTROY
;
457 fuse_simple_request(fm
, &args
);
461 static void fuse_put_super(struct super_block
*sb
)
463 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
465 fuse_conn_put(fm
->fc
);
469 static void convert_fuse_statfs(struct kstatfs
*stbuf
, struct fuse_kstatfs
*attr
)
471 stbuf
->f_type
= FUSE_SUPER_MAGIC
;
472 stbuf
->f_bsize
= attr
->bsize
;
473 stbuf
->f_frsize
= attr
->frsize
;
474 stbuf
->f_blocks
= attr
->blocks
;
475 stbuf
->f_bfree
= attr
->bfree
;
476 stbuf
->f_bavail
= attr
->bavail
;
477 stbuf
->f_files
= attr
->files
;
478 stbuf
->f_ffree
= attr
->ffree
;
479 stbuf
->f_namelen
= attr
->namelen
;
480 /* fsid is left zero */
483 static int fuse_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
485 struct super_block
*sb
= dentry
->d_sb
;
486 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
488 struct fuse_statfs_out outarg
;
491 if (!fuse_allow_current_process(fm
->fc
)) {
492 buf
->f_type
= FUSE_SUPER_MAGIC
;
496 memset(&outarg
, 0, sizeof(outarg
));
498 args
.opcode
= FUSE_STATFS
;
499 args
.nodeid
= get_node_id(d_inode(dentry
));
500 args
.out_numargs
= 1;
501 args
.out_args
[0].size
= sizeof(outarg
);
502 args
.out_args
[0].value
= &outarg
;
503 err
= fuse_simple_request(fm
, &args
);
505 convert_fuse_statfs(buf
, &outarg
.st
);
516 OPT_DEFAULT_PERMISSIONS
,
523 static const struct fs_parameter_spec fuse_fs_parameters
[] = {
524 fsparam_string ("source", OPT_SOURCE
),
525 fsparam_u32 ("fd", OPT_FD
),
526 fsparam_u32oct ("rootmode", OPT_ROOTMODE
),
527 fsparam_u32 ("user_id", OPT_USER_ID
),
528 fsparam_u32 ("group_id", OPT_GROUP_ID
),
529 fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS
),
530 fsparam_flag ("allow_other", OPT_ALLOW_OTHER
),
531 fsparam_u32 ("max_read", OPT_MAX_READ
),
532 fsparam_u32 ("blksize", OPT_BLKSIZE
),
533 fsparam_string ("subtype", OPT_SUBTYPE
),
537 static int fuse_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
)
539 struct fs_parse_result result
;
540 struct fuse_fs_context
*ctx
= fc
->fs_private
;
543 if (fc
->purpose
== FS_CONTEXT_FOR_RECONFIGURE
) {
545 * Ignore options coming from mount(MS_REMOUNT) for backward
551 return invalfc(fc
, "No changes allowed in reconfigure");
554 opt
= fs_parse(fc
, fuse_fs_parameters
, param
, &result
);
561 return invalfc(fc
, "Multiple sources specified");
562 fc
->source
= param
->string
;
563 param
->string
= NULL
;
568 return invalfc(fc
, "Multiple subtypes specified");
569 ctx
->subtype
= param
->string
;
570 param
->string
= NULL
;
574 ctx
->fd
= result
.uint_32
;
575 ctx
->fd_present
= true;
579 if (!fuse_valid_type(result
.uint_32
))
580 return invalfc(fc
, "Invalid rootmode");
581 ctx
->rootmode
= result
.uint_32
;
582 ctx
->rootmode_present
= true;
586 ctx
->user_id
= make_kuid(fc
->user_ns
, result
.uint_32
);
587 if (!uid_valid(ctx
->user_id
))
588 return invalfc(fc
, "Invalid user_id");
589 ctx
->user_id_present
= true;
593 ctx
->group_id
= make_kgid(fc
->user_ns
, result
.uint_32
);
594 if (!gid_valid(ctx
->group_id
))
595 return invalfc(fc
, "Invalid group_id");
596 ctx
->group_id_present
= true;
599 case OPT_DEFAULT_PERMISSIONS
:
600 ctx
->default_permissions
= true;
603 case OPT_ALLOW_OTHER
:
604 ctx
->allow_other
= true;
608 ctx
->max_read
= result
.uint_32
;
613 return invalfc(fc
, "blksize only supported for fuseblk");
614 ctx
->blksize
= result
.uint_32
;
624 static void fuse_free_fc(struct fs_context
*fc
)
626 struct fuse_fs_context
*ctx
= fc
->fs_private
;
634 static int fuse_show_options(struct seq_file
*m
, struct dentry
*root
)
636 struct super_block
*sb
= root
->d_sb
;
637 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
639 if (fc
->legacy_opts_show
) {
640 seq_printf(m
, ",user_id=%u",
641 from_kuid_munged(fc
->user_ns
, fc
->user_id
));
642 seq_printf(m
, ",group_id=%u",
643 from_kgid_munged(fc
->user_ns
, fc
->group_id
));
644 if (fc
->default_permissions
)
645 seq_puts(m
, ",default_permissions");
647 seq_puts(m
, ",allow_other");
648 if (fc
->max_read
!= ~0)
649 seq_printf(m
, ",max_read=%u", fc
->max_read
);
650 if (sb
->s_bdev
&& sb
->s_blocksize
!= FUSE_DEFAULT_BLKSIZE
)
651 seq_printf(m
, ",blksize=%lu", sb
->s_blocksize
);
653 #ifdef CONFIG_FUSE_DAX
661 static void fuse_iqueue_init(struct fuse_iqueue
*fiq
,
662 const struct fuse_iqueue_ops
*ops
,
665 memset(fiq
, 0, sizeof(struct fuse_iqueue
));
666 spin_lock_init(&fiq
->lock
);
667 init_waitqueue_head(&fiq
->waitq
);
668 INIT_LIST_HEAD(&fiq
->pending
);
669 INIT_LIST_HEAD(&fiq
->interrupts
);
670 fiq
->forget_list_tail
= &fiq
->forget_list_head
;
676 static void fuse_pqueue_init(struct fuse_pqueue
*fpq
)
680 spin_lock_init(&fpq
->lock
);
681 for (i
= 0; i
< FUSE_PQ_HASH_SIZE
; i
++)
682 INIT_LIST_HEAD(&fpq
->processing
[i
]);
683 INIT_LIST_HEAD(&fpq
->io
);
687 void fuse_conn_init(struct fuse_conn
*fc
, struct fuse_mount
*fm
,
688 struct user_namespace
*user_ns
,
689 const struct fuse_iqueue_ops
*fiq_ops
, void *fiq_priv
)
691 memset(fc
, 0, sizeof(*fc
));
692 spin_lock_init(&fc
->lock
);
693 spin_lock_init(&fc
->bg_lock
);
694 init_rwsem(&fc
->killsb
);
695 refcount_set(&fc
->count
, 1);
696 atomic_set(&fc
->dev_count
, 1);
697 init_waitqueue_head(&fc
->blocked_waitq
);
698 fuse_iqueue_init(&fc
->iq
, fiq_ops
, fiq_priv
);
699 INIT_LIST_HEAD(&fc
->bg_queue
);
700 INIT_LIST_HEAD(&fc
->entry
);
701 INIT_LIST_HEAD(&fc
->devices
);
702 atomic_set(&fc
->num_waiting
, 0);
703 fc
->max_background
= FUSE_DEFAULT_MAX_BACKGROUND
;
704 fc
->congestion_threshold
= FUSE_DEFAULT_CONGESTION_THRESHOLD
;
705 atomic64_set(&fc
->khctr
, 0);
706 fc
->polled_files
= RB_ROOT
;
710 atomic64_set(&fc
->attr_version
, 1);
711 get_random_bytes(&fc
->scramble_key
, sizeof(fc
->scramble_key
));
712 fc
->pid_ns
= get_pid_ns(task_active_pid_ns(current
));
713 fc
->user_ns
= get_user_ns(user_ns
);
714 fc
->max_pages
= FUSE_DEFAULT_MAX_PAGES_PER_REQ
;
716 INIT_LIST_HEAD(&fc
->mounts
);
717 list_add(&fm
->fc_entry
, &fc
->mounts
);
720 EXPORT_SYMBOL_GPL(fuse_conn_init
);
722 void fuse_conn_put(struct fuse_conn
*fc
)
724 if (refcount_dec_and_test(&fc
->count
)) {
725 struct fuse_iqueue
*fiq
= &fc
->iq
;
727 if (IS_ENABLED(CONFIG_FUSE_DAX
))
728 fuse_dax_conn_free(fc
);
729 if (fiq
->ops
->release
)
730 fiq
->ops
->release(fiq
);
731 put_pid_ns(fc
->pid_ns
);
732 put_user_ns(fc
->user_ns
);
736 EXPORT_SYMBOL_GPL(fuse_conn_put
);
738 struct fuse_conn
*fuse_conn_get(struct fuse_conn
*fc
)
740 refcount_inc(&fc
->count
);
743 EXPORT_SYMBOL_GPL(fuse_conn_get
);
745 static struct inode
*fuse_get_root_inode(struct super_block
*sb
, unsigned mode
)
747 struct fuse_attr attr
;
748 memset(&attr
, 0, sizeof(attr
));
751 attr
.ino
= FUSE_ROOT_ID
;
753 return fuse_iget(sb
, 1, 0, &attr
, 0, 0);
756 struct fuse_inode_handle
{
761 static struct dentry
*fuse_get_dentry(struct super_block
*sb
,
762 struct fuse_inode_handle
*handle
)
764 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
766 struct dentry
*entry
;
769 if (handle
->nodeid
== 0)
772 inode
= ilookup5(sb
, handle
->nodeid
, fuse_inode_eq
, &handle
->nodeid
);
774 struct fuse_entry_out outarg
;
775 const struct qstr name
= QSTR_INIT(".", 1);
777 if (!fc
->export_support
)
780 err
= fuse_lookup_name(sb
, handle
->nodeid
, &name
, &outarg
,
782 if (err
&& err
!= -ENOENT
)
789 if (get_node_id(inode
) != handle
->nodeid
)
793 if (inode
->i_generation
!= handle
->generation
)
796 entry
= d_obtain_alias(inode
);
797 if (!IS_ERR(entry
) && get_node_id(inode
) != FUSE_ROOT_ID
)
798 fuse_invalidate_entry_cache(entry
);
808 static int fuse_encode_fh(struct inode
*inode
, u32
*fh
, int *max_len
,
809 struct inode
*parent
)
811 int len
= parent
? 6 : 3;
815 if (*max_len
< len
) {
817 return FILEID_INVALID
;
820 nodeid
= get_fuse_inode(inode
)->nodeid
;
821 generation
= inode
->i_generation
;
823 fh
[0] = (u32
)(nodeid
>> 32);
824 fh
[1] = (u32
)(nodeid
& 0xffffffff);
828 nodeid
= get_fuse_inode(parent
)->nodeid
;
829 generation
= parent
->i_generation
;
831 fh
[3] = (u32
)(nodeid
>> 32);
832 fh
[4] = (u32
)(nodeid
& 0xffffffff);
837 return parent
? 0x82 : 0x81;
840 static struct dentry
*fuse_fh_to_dentry(struct super_block
*sb
,
841 struct fid
*fid
, int fh_len
, int fh_type
)
843 struct fuse_inode_handle handle
;
845 if ((fh_type
!= 0x81 && fh_type
!= 0x82) || fh_len
< 3)
848 handle
.nodeid
= (u64
) fid
->raw
[0] << 32;
849 handle
.nodeid
|= (u64
) fid
->raw
[1];
850 handle
.generation
= fid
->raw
[2];
851 return fuse_get_dentry(sb
, &handle
);
854 static struct dentry
*fuse_fh_to_parent(struct super_block
*sb
,
855 struct fid
*fid
, int fh_len
, int fh_type
)
857 struct fuse_inode_handle parent
;
859 if (fh_type
!= 0x82 || fh_len
< 6)
862 parent
.nodeid
= (u64
) fid
->raw
[3] << 32;
863 parent
.nodeid
|= (u64
) fid
->raw
[4];
864 parent
.generation
= fid
->raw
[5];
865 return fuse_get_dentry(sb
, &parent
);
868 static struct dentry
*fuse_get_parent(struct dentry
*child
)
870 struct inode
*child_inode
= d_inode(child
);
871 struct fuse_conn
*fc
= get_fuse_conn(child_inode
);
873 struct dentry
*parent
;
874 struct fuse_entry_out outarg
;
875 const struct qstr name
= QSTR_INIT("..", 2);
878 if (!fc
->export_support
)
879 return ERR_PTR(-ESTALE
);
881 err
= fuse_lookup_name(child_inode
->i_sb
, get_node_id(child_inode
),
882 &name
, &outarg
, &inode
);
885 return ERR_PTR(-ESTALE
);
889 parent
= d_obtain_alias(inode
);
890 if (!IS_ERR(parent
) && get_node_id(inode
) != FUSE_ROOT_ID
)
891 fuse_invalidate_entry_cache(parent
);
896 static const struct export_operations fuse_export_operations
= {
897 .fh_to_dentry
= fuse_fh_to_dentry
,
898 .fh_to_parent
= fuse_fh_to_parent
,
899 .encode_fh
= fuse_encode_fh
,
900 .get_parent
= fuse_get_parent
,
903 static const struct super_operations fuse_super_operations
= {
904 .alloc_inode
= fuse_alloc_inode
,
905 .free_inode
= fuse_free_inode
,
906 .evict_inode
= fuse_evict_inode
,
907 .write_inode
= fuse_write_inode
,
908 .drop_inode
= generic_delete_inode
,
909 .put_super
= fuse_put_super
,
910 .umount_begin
= fuse_umount_begin
,
911 .statfs
= fuse_statfs
,
912 .show_options
= fuse_show_options
,
915 static void sanitize_global_limit(unsigned *limit
)
918 * The default maximum number of async requests is calculated to consume
919 * 1/2^13 of the total memory, assuming 392 bytes per request.
922 *limit
= ((totalram_pages() << PAGE_SHIFT
) >> 13) / 392;
924 if (*limit
>= 1 << 16)
925 *limit
= (1 << 16) - 1;
928 static int set_global_limit(const char *val
, const struct kernel_param
*kp
)
932 rv
= param_set_uint(val
, kp
);
936 sanitize_global_limit((unsigned *)kp
->arg
);
941 static void process_init_limits(struct fuse_conn
*fc
, struct fuse_init_out
*arg
)
943 int cap_sys_admin
= capable(CAP_SYS_ADMIN
);
948 sanitize_global_limit(&max_user_bgreq
);
949 sanitize_global_limit(&max_user_congthresh
);
951 spin_lock(&fc
->bg_lock
);
952 if (arg
->max_background
) {
953 fc
->max_background
= arg
->max_background
;
955 if (!cap_sys_admin
&& fc
->max_background
> max_user_bgreq
)
956 fc
->max_background
= max_user_bgreq
;
958 if (arg
->congestion_threshold
) {
959 fc
->congestion_threshold
= arg
->congestion_threshold
;
961 if (!cap_sys_admin
&&
962 fc
->congestion_threshold
> max_user_congthresh
)
963 fc
->congestion_threshold
= max_user_congthresh
;
965 spin_unlock(&fc
->bg_lock
);
968 struct fuse_init_args
{
969 struct fuse_args args
;
970 struct fuse_init_in in
;
971 struct fuse_init_out out
;
974 static void process_init_reply(struct fuse_mount
*fm
, struct fuse_args
*args
,
977 struct fuse_conn
*fc
= fm
->fc
;
978 struct fuse_init_args
*ia
= container_of(args
, typeof(*ia
), args
);
979 struct fuse_init_out
*arg
= &ia
->out
;
982 if (error
|| arg
->major
!= FUSE_KERNEL_VERSION
)
985 unsigned long ra_pages
;
987 process_init_limits(fc
, arg
);
989 if (arg
->minor
>= 6) {
990 ra_pages
= arg
->max_readahead
/ PAGE_SIZE
;
991 if (arg
->flags
& FUSE_ASYNC_READ
)
993 if (!(arg
->flags
& FUSE_POSIX_LOCKS
))
995 if (arg
->minor
>= 17) {
996 if (!(arg
->flags
& FUSE_FLOCK_LOCKS
))
999 if (!(arg
->flags
& FUSE_POSIX_LOCKS
))
1002 if (arg
->flags
& FUSE_ATOMIC_O_TRUNC
)
1003 fc
->atomic_o_trunc
= 1;
1004 if (arg
->minor
>= 9) {
1005 /* LOOKUP has dependency on proto version */
1006 if (arg
->flags
& FUSE_EXPORT_SUPPORT
)
1007 fc
->export_support
= 1;
1009 if (arg
->flags
& FUSE_BIG_WRITES
)
1011 if (arg
->flags
& FUSE_DONT_MASK
)
1013 if (arg
->flags
& FUSE_AUTO_INVAL_DATA
)
1014 fc
->auto_inval_data
= 1;
1015 else if (arg
->flags
& FUSE_EXPLICIT_INVAL_DATA
)
1016 fc
->explicit_inval_data
= 1;
1017 if (arg
->flags
& FUSE_DO_READDIRPLUS
) {
1018 fc
->do_readdirplus
= 1;
1019 if (arg
->flags
& FUSE_READDIRPLUS_AUTO
)
1020 fc
->readdirplus_auto
= 1;
1022 if (arg
->flags
& FUSE_ASYNC_DIO
)
1024 if (arg
->flags
& FUSE_WRITEBACK_CACHE
)
1025 fc
->writeback_cache
= 1;
1026 if (arg
->flags
& FUSE_PARALLEL_DIROPS
)
1027 fc
->parallel_dirops
= 1;
1028 if (arg
->flags
& FUSE_HANDLE_KILLPRIV
)
1029 fc
->handle_killpriv
= 1;
1030 if (arg
->time_gran
&& arg
->time_gran
<= 1000000000)
1031 fm
->sb
->s_time_gran
= arg
->time_gran
;
1032 if ((arg
->flags
& FUSE_POSIX_ACL
)) {
1033 fc
->default_permissions
= 1;
1035 fm
->sb
->s_xattr
= fuse_acl_xattr_handlers
;
1037 if (arg
->flags
& FUSE_CACHE_SYMLINKS
)
1038 fc
->cache_symlinks
= 1;
1039 if (arg
->flags
& FUSE_ABORT_ERROR
)
1041 if (arg
->flags
& FUSE_MAX_PAGES
) {
1043 min_t(unsigned int, FUSE_MAX_MAX_PAGES
,
1044 max_t(unsigned int, arg
->max_pages
, 1));
1046 if (IS_ENABLED(CONFIG_FUSE_DAX
) &&
1047 arg
->flags
& FUSE_MAP_ALIGNMENT
&&
1048 !fuse_dax_check_alignment(fc
, arg
->map_alignment
)) {
1051 if (arg
->flags
& FUSE_HANDLE_KILLPRIV_V2
) {
1052 fc
->handle_killpriv_v2
= 1;
1053 fm
->sb
->s_flags
|= SB_NOSEC
;
1056 ra_pages
= fc
->max_read
/ PAGE_SIZE
;
1061 fm
->sb
->s_bdi
->ra_pages
=
1062 min(fm
->sb
->s_bdi
->ra_pages
, ra_pages
);
1063 fc
->minor
= arg
->minor
;
1064 fc
->max_write
= arg
->minor
< 5 ? 4096 : arg
->max_write
;
1065 fc
->max_write
= max_t(unsigned, 4096, fc
->max_write
);
1075 fuse_set_initialized(fc
);
1076 wake_up_all(&fc
->blocked_waitq
);
1079 void fuse_send_init(struct fuse_mount
*fm
)
1081 struct fuse_init_args
*ia
;
1083 ia
= kzalloc(sizeof(*ia
), GFP_KERNEL
| __GFP_NOFAIL
);
1085 ia
->in
.major
= FUSE_KERNEL_VERSION
;
1086 ia
->in
.minor
= FUSE_KERNEL_MINOR_VERSION
;
1087 ia
->in
.max_readahead
= fm
->sb
->s_bdi
->ra_pages
* PAGE_SIZE
;
1089 FUSE_ASYNC_READ
| FUSE_POSIX_LOCKS
| FUSE_ATOMIC_O_TRUNC
|
1090 FUSE_EXPORT_SUPPORT
| FUSE_BIG_WRITES
| FUSE_DONT_MASK
|
1091 FUSE_SPLICE_WRITE
| FUSE_SPLICE_MOVE
| FUSE_SPLICE_READ
|
1092 FUSE_FLOCK_LOCKS
| FUSE_HAS_IOCTL_DIR
| FUSE_AUTO_INVAL_DATA
|
1093 FUSE_DO_READDIRPLUS
| FUSE_READDIRPLUS_AUTO
| FUSE_ASYNC_DIO
|
1094 FUSE_WRITEBACK_CACHE
| FUSE_NO_OPEN_SUPPORT
|
1095 FUSE_PARALLEL_DIROPS
| FUSE_HANDLE_KILLPRIV
| FUSE_POSIX_ACL
|
1096 FUSE_ABORT_ERROR
| FUSE_MAX_PAGES
| FUSE_CACHE_SYMLINKS
|
1097 FUSE_NO_OPENDIR_SUPPORT
| FUSE_EXPLICIT_INVAL_DATA
|
1098 FUSE_HANDLE_KILLPRIV_V2
;
1099 #ifdef CONFIG_FUSE_DAX
1101 ia
->in
.flags
|= FUSE_MAP_ALIGNMENT
;
1103 if (fm
->fc
->auto_submounts
)
1104 ia
->in
.flags
|= FUSE_SUBMOUNTS
;
1106 ia
->args
.opcode
= FUSE_INIT
;
1107 ia
->args
.in_numargs
= 1;
1108 ia
->args
.in_args
[0].size
= sizeof(ia
->in
);
1109 ia
->args
.in_args
[0].value
= &ia
->in
;
1110 ia
->args
.out_numargs
= 1;
1111 /* Variable length argument used for backward compatibility
1112 with interface version < 7.5. Rest of init_out is zeroed
1113 by do_get_request(), so a short reply is not a problem */
1114 ia
->args
.out_argvar
= true;
1115 ia
->args
.out_args
[0].size
= sizeof(ia
->out
);
1116 ia
->args
.out_args
[0].value
= &ia
->out
;
1117 ia
->args
.force
= true;
1118 ia
->args
.nocreds
= true;
1119 ia
->args
.end
= process_init_reply
;
1121 if (fuse_simple_background(fm
, &ia
->args
, GFP_KERNEL
) != 0)
1122 process_init_reply(fm
, &ia
->args
, -ENOTCONN
);
1124 EXPORT_SYMBOL_GPL(fuse_send_init
);
1126 void fuse_free_conn(struct fuse_conn
*fc
)
1128 WARN_ON(!list_empty(&fc
->devices
));
1131 EXPORT_SYMBOL_GPL(fuse_free_conn
);
1133 static int fuse_bdi_init(struct fuse_conn
*fc
, struct super_block
*sb
)
1139 suffix
= "-fuseblk";
1141 * sb->s_bdi points to blkdev's bdi however we want to redirect
1142 * it to our private bdi...
1145 sb
->s_bdi
= &noop_backing_dev_info
;
1147 err
= super_setup_bdi_name(sb
, "%u:%u%s", MAJOR(fc
->dev
),
1148 MINOR(fc
->dev
), suffix
);
1152 /* fuse does it's own writeback accounting */
1153 sb
->s_bdi
->capabilities
&= ~BDI_CAP_WRITEBACK_ACCT
;
1154 sb
->s_bdi
->capabilities
|= BDI_CAP_STRICTLIMIT
;
1157 * For a single fuse filesystem use max 1% of dirty +
1158 * writeback threshold.
1160 * This gives about 1M of write buffer for memory maps on a
1161 * machine with 1G and 10% dirty_ratio, which should be more
1164 * Privileged users can raise it by writing to
1166 * /sys/class/bdi/<bdi>/max_ratio
1168 bdi_set_max_ratio(sb
->s_bdi
, 1);
1173 struct fuse_dev
*fuse_dev_alloc(void)
1175 struct fuse_dev
*fud
;
1176 struct list_head
*pq
;
1178 fud
= kzalloc(sizeof(struct fuse_dev
), GFP_KERNEL
);
1182 pq
= kcalloc(FUSE_PQ_HASH_SIZE
, sizeof(struct list_head
), GFP_KERNEL
);
1188 fud
->pq
.processing
= pq
;
1189 fuse_pqueue_init(&fud
->pq
);
1193 EXPORT_SYMBOL_GPL(fuse_dev_alloc
);
1195 void fuse_dev_install(struct fuse_dev
*fud
, struct fuse_conn
*fc
)
1197 fud
->fc
= fuse_conn_get(fc
);
1198 spin_lock(&fc
->lock
);
1199 list_add_tail(&fud
->entry
, &fc
->devices
);
1200 spin_unlock(&fc
->lock
);
1202 EXPORT_SYMBOL_GPL(fuse_dev_install
);
1204 struct fuse_dev
*fuse_dev_alloc_install(struct fuse_conn
*fc
)
1206 struct fuse_dev
*fud
;
1208 fud
= fuse_dev_alloc();
1212 fuse_dev_install(fud
, fc
);
1215 EXPORT_SYMBOL_GPL(fuse_dev_alloc_install
);
1217 void fuse_dev_free(struct fuse_dev
*fud
)
1219 struct fuse_conn
*fc
= fud
->fc
;
1222 spin_lock(&fc
->lock
);
1223 list_del(&fud
->entry
);
1224 spin_unlock(&fc
->lock
);
1228 kfree(fud
->pq
.processing
);
1231 EXPORT_SYMBOL_GPL(fuse_dev_free
);
1233 static void fuse_fill_attr_from_inode(struct fuse_attr
*attr
,
1234 const struct fuse_inode
*fi
)
1236 *attr
= (struct fuse_attr
){
1237 .ino
= fi
->inode
.i_ino
,
1238 .size
= fi
->inode
.i_size
,
1239 .blocks
= fi
->inode
.i_blocks
,
1240 .atime
= fi
->inode
.i_atime
.tv_sec
,
1241 .mtime
= fi
->inode
.i_mtime
.tv_sec
,
1242 .ctime
= fi
->inode
.i_ctime
.tv_sec
,
1243 .atimensec
= fi
->inode
.i_atime
.tv_nsec
,
1244 .mtimensec
= fi
->inode
.i_mtime
.tv_nsec
,
1245 .ctimensec
= fi
->inode
.i_ctime
.tv_nsec
,
1246 .mode
= fi
->inode
.i_mode
,
1247 .nlink
= fi
->inode
.i_nlink
,
1248 .uid
= fi
->inode
.i_uid
.val
,
1249 .gid
= fi
->inode
.i_gid
.val
,
1250 .rdev
= fi
->inode
.i_rdev
,
1251 .blksize
= 1u << fi
->inode
.i_blkbits
,
1255 static void fuse_sb_defaults(struct super_block
*sb
)
1257 sb
->s_magic
= FUSE_SUPER_MAGIC
;
1258 sb
->s_op
= &fuse_super_operations
;
1259 sb
->s_xattr
= fuse_xattr_handlers
;
1260 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1261 sb
->s_time_gran
= 1;
1262 sb
->s_export_op
= &fuse_export_operations
;
1263 sb
->s_iflags
|= SB_I_IMA_UNVERIFIABLE_SIGNATURE
;
1264 if (sb
->s_user_ns
!= &init_user_ns
)
1265 sb
->s_iflags
|= SB_I_UNTRUSTED_MOUNTER
;
1266 sb
->s_flags
&= ~(SB_NOSEC
| SB_I_VERSION
);
1269 * If we are not in the initial user namespace posix
1270 * acls must be translated.
1272 if (sb
->s_user_ns
!= &init_user_ns
)
1273 sb
->s_xattr
= fuse_no_acl_xattr_handlers
;
1276 int fuse_fill_super_submount(struct super_block
*sb
,
1277 struct fuse_inode
*parent_fi
)
1279 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1280 struct super_block
*parent_sb
= parent_fi
->inode
.i_sb
;
1281 struct fuse_attr root_attr
;
1284 fuse_sb_defaults(sb
);
1287 WARN_ON(sb
->s_bdi
!= &noop_backing_dev_info
);
1288 sb
->s_bdi
= bdi_get(parent_sb
->s_bdi
);
1290 sb
->s_xattr
= parent_sb
->s_xattr
;
1291 sb
->s_time_gran
= parent_sb
->s_time_gran
;
1292 sb
->s_blocksize
= parent_sb
->s_blocksize
;
1293 sb
->s_blocksize_bits
= parent_sb
->s_blocksize_bits
;
1294 sb
->s_subtype
= kstrdup(parent_sb
->s_subtype
, GFP_KERNEL
);
1295 if (parent_sb
->s_subtype
&& !sb
->s_subtype
)
1298 fuse_fill_attr_from_inode(&root_attr
, parent_fi
);
1299 root
= fuse_iget(sb
, parent_fi
->nodeid
, 0, &root_attr
, 0, 0);
1301 * This inode is just a duplicate, so it is not looked up and
1302 * its nlookup should not be incremented. fuse_iget() does
1303 * that, though, so undo it here.
1305 get_fuse_inode(root
)->nlookup
--;
1306 sb
->s_d_op
= &fuse_dentry_operations
;
1307 sb
->s_root
= d_make_root(root
);
1314 int fuse_fill_super_common(struct super_block
*sb
, struct fuse_fs_context
*ctx
)
1316 struct fuse_dev
*fud
= NULL
;
1317 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1318 struct fuse_conn
*fc
= fm
->fc
;
1320 struct dentry
*root_dentry
;
1324 if (sb
->s_flags
& SB_MANDLOCK
)
1327 fuse_sb_defaults(sb
);
1332 if (!sb_set_blocksize(sb
, ctx
->blksize
))
1336 sb
->s_blocksize
= PAGE_SIZE
;
1337 sb
->s_blocksize_bits
= PAGE_SHIFT
;
1340 sb
->s_subtype
= ctx
->subtype
;
1341 ctx
->subtype
= NULL
;
1342 if (IS_ENABLED(CONFIG_FUSE_DAX
)) {
1343 err
= fuse_dax_conn_alloc(fc
, ctx
->dax_dev
);
1350 fud
= fuse_dev_alloc_install(fc
);
1355 fc
->dev
= sb
->s_dev
;
1357 err
= fuse_bdi_init(fc
, sb
);
1361 /* Handle umasking inside the fuse code */
1362 if (sb
->s_flags
& SB_POSIXACL
)
1364 sb
->s_flags
|= SB_POSIXACL
;
1366 fc
->default_permissions
= ctx
->default_permissions
;
1367 fc
->allow_other
= ctx
->allow_other
;
1368 fc
->user_id
= ctx
->user_id
;
1369 fc
->group_id
= ctx
->group_id
;
1370 fc
->legacy_opts_show
= ctx
->legacy_opts_show
;
1371 fc
->max_read
= max_t(unsigned int, 4096, ctx
->max_read
);
1372 fc
->destroy
= ctx
->destroy
;
1373 fc
->no_control
= ctx
->no_control
;
1374 fc
->no_force_umount
= ctx
->no_force_umount
;
1377 root
= fuse_get_root_inode(sb
, ctx
->rootmode
);
1378 sb
->s_d_op
= &fuse_root_dentry_operations
;
1379 root_dentry
= d_make_root(root
);
1382 /* Root dentry doesn't have .d_revalidate */
1383 sb
->s_d_op
= &fuse_dentry_operations
;
1385 mutex_lock(&fuse_mutex
);
1387 if (ctx
->fudptr
&& *ctx
->fudptr
)
1390 err
= fuse_ctl_add_conn(fc
);
1394 list_add_tail(&fc
->entry
, &fuse_conn_list
);
1395 sb
->s_root
= root_dentry
;
1398 mutex_unlock(&fuse_mutex
);
1402 mutex_unlock(&fuse_mutex
);
1408 if (IS_ENABLED(CONFIG_FUSE_DAX
))
1409 fuse_dax_conn_free(fc
);
1413 EXPORT_SYMBOL_GPL(fuse_fill_super_common
);
1415 static int fuse_fill_super(struct super_block
*sb
, struct fs_context
*fsc
)
1417 struct fuse_fs_context
*ctx
= fsc
->fs_private
;
1420 struct fuse_conn
*fc
;
1421 struct fuse_mount
*fm
;
1424 file
= fget(ctx
->fd
);
1429 * Require mount to happen from the same user namespace which
1430 * opened /dev/fuse to prevent potential attacks.
1432 if ((file
->f_op
!= &fuse_dev_operations
) ||
1433 (file
->f_cred
->user_ns
!= sb
->s_user_ns
))
1435 ctx
->fudptr
= &file
->private_data
;
1437 fc
= kmalloc(sizeof(*fc
), GFP_KERNEL
);
1442 fm
= kzalloc(sizeof(*fm
), GFP_KERNEL
);
1448 fuse_conn_init(fc
, fm
, sb
->s_user_ns
, &fuse_dev_fiq_ops
, NULL
);
1449 fc
->release
= fuse_free_conn
;
1453 err
= fuse_fill_super_common(sb
, ctx
);
1457 * atomic_dec_and_test() in fput() provides the necessary
1458 * memory barrier for file->private_data to be visible on all
1462 fuse_send_init(get_fuse_mount_super(sb
));
1468 sb
->s_fs_info
= NULL
;
1475 static int fuse_get_tree(struct fs_context
*fc
)
1477 struct fuse_fs_context
*ctx
= fc
->fs_private
;
1479 if (!ctx
->fd_present
|| !ctx
->rootmode_present
||
1480 !ctx
->user_id_present
|| !ctx
->group_id_present
)
1485 return get_tree_bdev(fc
, fuse_fill_super
);
1488 return get_tree_nodev(fc
, fuse_fill_super
);
1491 static const struct fs_context_operations fuse_context_ops
= {
1492 .free
= fuse_free_fc
,
1493 .parse_param
= fuse_parse_param
,
1494 .reconfigure
= fuse_reconfigure
,
1495 .get_tree
= fuse_get_tree
,
1499 * Set up the filesystem mount context.
1501 static int fuse_init_fs_context(struct fs_context
*fc
)
1503 struct fuse_fs_context
*ctx
;
1505 ctx
= kzalloc(sizeof(struct fuse_fs_context
), GFP_KERNEL
);
1510 ctx
->blksize
= FUSE_DEFAULT_BLKSIZE
;
1511 ctx
->legacy_opts_show
= true;
1514 if (fc
->fs_type
== &fuseblk_fs_type
) {
1515 ctx
->is_bdev
= true;
1516 ctx
->destroy
= true;
1520 fc
->fs_private
= ctx
;
1521 fc
->ops
= &fuse_context_ops
;
1525 bool fuse_mount_remove(struct fuse_mount
*fm
)
1527 struct fuse_conn
*fc
= fm
->fc
;
1530 down_write(&fc
->killsb
);
1531 list_del_init(&fm
->fc_entry
);
1532 if (list_empty(&fc
->mounts
))
1534 up_write(&fc
->killsb
);
1538 EXPORT_SYMBOL_GPL(fuse_mount_remove
);
1540 void fuse_conn_destroy(struct fuse_mount
*fm
)
1542 struct fuse_conn
*fc
= fm
->fc
;
1545 fuse_send_destroy(fm
);
1547 fuse_abort_conn(fc
);
1548 fuse_wait_aborted(fc
);
1550 if (!list_empty(&fc
->entry
)) {
1551 mutex_lock(&fuse_mutex
);
1552 list_del(&fc
->entry
);
1553 fuse_ctl_remove_conn(fc
);
1554 mutex_unlock(&fuse_mutex
);
1557 EXPORT_SYMBOL_GPL(fuse_conn_destroy
);
1559 static void fuse_sb_destroy(struct super_block
*sb
)
1561 struct fuse_mount
*fm
= get_fuse_mount_super(sb
);
1565 last
= fuse_mount_remove(fm
);
1567 fuse_conn_destroy(fm
);
1571 static void fuse_kill_sb_anon(struct super_block
*sb
)
1573 fuse_sb_destroy(sb
);
1574 kill_anon_super(sb
);
1577 static struct file_system_type fuse_fs_type
= {
1578 .owner
= THIS_MODULE
,
1580 .fs_flags
= FS_HAS_SUBTYPE
| FS_USERNS_MOUNT
,
1581 .init_fs_context
= fuse_init_fs_context
,
1582 .parameters
= fuse_fs_parameters
,
1583 .kill_sb
= fuse_kill_sb_anon
,
1585 MODULE_ALIAS_FS("fuse");
1588 static void fuse_kill_sb_blk(struct super_block
*sb
)
1590 fuse_sb_destroy(sb
);
1591 kill_block_super(sb
);
1594 static struct file_system_type fuseblk_fs_type
= {
1595 .owner
= THIS_MODULE
,
1597 .init_fs_context
= fuse_init_fs_context
,
1598 .parameters
= fuse_fs_parameters
,
1599 .kill_sb
= fuse_kill_sb_blk
,
1600 .fs_flags
= FS_REQUIRES_DEV
| FS_HAS_SUBTYPE
,
1602 MODULE_ALIAS_FS("fuseblk");
1604 static inline int register_fuseblk(void)
1606 return register_filesystem(&fuseblk_fs_type
);
1609 static inline void unregister_fuseblk(void)
1611 unregister_filesystem(&fuseblk_fs_type
);
1614 static inline int register_fuseblk(void)
1619 static inline void unregister_fuseblk(void)
1624 static void fuse_inode_init_once(void *foo
)
1626 struct inode
*inode
= foo
;
1628 inode_init_once(inode
);
1631 static int __init
fuse_fs_init(void)
1635 fuse_inode_cachep
= kmem_cache_create("fuse_inode",
1636 sizeof(struct fuse_inode
), 0,
1637 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
|SLAB_RECLAIM_ACCOUNT
,
1638 fuse_inode_init_once
);
1640 if (!fuse_inode_cachep
)
1643 err
= register_fuseblk();
1647 err
= register_filesystem(&fuse_fs_type
);
1654 unregister_fuseblk();
1656 kmem_cache_destroy(fuse_inode_cachep
);
1661 static void fuse_fs_cleanup(void)
1663 unregister_filesystem(&fuse_fs_type
);
1664 unregister_fuseblk();
1667 * Make sure all delayed rcu free inodes are flushed before we
1671 kmem_cache_destroy(fuse_inode_cachep
);
1674 static struct kobject
*fuse_kobj
;
1676 static int fuse_sysfs_init(void)
1680 fuse_kobj
= kobject_create_and_add("fuse", fs_kobj
);
1686 err
= sysfs_create_mount_point(fuse_kobj
, "connections");
1688 goto out_fuse_unregister
;
1692 out_fuse_unregister
:
1693 kobject_put(fuse_kobj
);
1698 static void fuse_sysfs_cleanup(void)
1700 sysfs_remove_mount_point(fuse_kobj
, "connections");
1701 kobject_put(fuse_kobj
);
1704 static int __init
fuse_init(void)
1708 pr_info("init (API version %i.%i)\n",
1709 FUSE_KERNEL_VERSION
, FUSE_KERNEL_MINOR_VERSION
);
1711 INIT_LIST_HEAD(&fuse_conn_list
);
1712 res
= fuse_fs_init();
1716 res
= fuse_dev_init();
1718 goto err_fs_cleanup
;
1720 res
= fuse_sysfs_init();
1722 goto err_dev_cleanup
;
1724 res
= fuse_ctl_init();
1726 goto err_sysfs_cleanup
;
1728 sanitize_global_limit(&max_user_bgreq
);
1729 sanitize_global_limit(&max_user_congthresh
);
1734 fuse_sysfs_cleanup();
1743 static void __exit
fuse_exit(void)
1748 fuse_sysfs_cleanup();
1753 module_init(fuse_init
);
1754 module_exit(fuse_exit
);