2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/parser.h>
19 #include <linux/statfs.h>
20 #include <linux/random.h>
21 #include <linux/sched.h>
22 #include <linux/exportfs.h>
24 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
25 MODULE_DESCRIPTION("Filesystem in Userspace");
26 MODULE_LICENSE("GPL");
28 static struct kmem_cache
*fuse_inode_cachep
;
29 struct list_head fuse_conn_list
;
30 DEFINE_MUTEX(fuse_mutex
);
32 static int set_global_limit(const char *val
, struct kernel_param
*kp
);
34 unsigned max_user_bgreq
;
35 module_param_call(max_user_bgreq
, set_global_limit
, param_get_uint
,
36 &max_user_bgreq
, 0644);
37 __MODULE_PARM_TYPE(max_user_bgreq
, "uint");
38 MODULE_PARM_DESC(max_user_bgreq
,
39 "Global limit for the maximum number of backgrounded requests an "
40 "unprivileged user can set");
42 unsigned max_user_congthresh
;
43 module_param_call(max_user_congthresh
, set_global_limit
, param_get_uint
,
44 &max_user_congthresh
, 0644);
45 __MODULE_PARM_TYPE(max_user_congthresh
, "uint");
46 MODULE_PARM_DESC(max_user_congthresh
,
47 "Global limit for the maximum congestion threshold an "
48 "unprivileged user can set");
50 #define FUSE_SUPER_MAGIC 0x65735546
52 #define FUSE_DEFAULT_BLKSIZE 512
54 /** Maximum number of outstanding background requests */
55 #define FUSE_DEFAULT_MAX_BACKGROUND 12
57 /** Congestion starts at 75% of maximum */
58 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
60 struct fuse_mount_data
{
65 unsigned fd_present
:1;
66 unsigned rootmode_present
:1;
67 unsigned user_id_present
:1;
68 unsigned group_id_present
:1;
74 struct fuse_forget_link
*fuse_alloc_forget(void)
76 return kzalloc(sizeof(struct fuse_forget_link
), GFP_KERNEL
);
79 static struct inode
*fuse_alloc_inode(struct super_block
*sb
)
82 struct fuse_inode
*fi
;
84 inode
= kmem_cache_alloc(fuse_inode_cachep
, GFP_KERNEL
);
88 fi
= get_fuse_inode(inode
);
96 INIT_LIST_HEAD(&fi
->write_files
);
97 INIT_LIST_HEAD(&fi
->queued_writes
);
98 INIT_LIST_HEAD(&fi
->writepages
);
99 init_waitqueue_head(&fi
->page_waitq
);
100 fi
->forget
= fuse_alloc_forget();
102 kmem_cache_free(fuse_inode_cachep
, inode
);
109 static void fuse_i_callback(struct rcu_head
*head
)
111 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
112 kmem_cache_free(fuse_inode_cachep
, inode
);
115 static void fuse_destroy_inode(struct inode
*inode
)
117 struct fuse_inode
*fi
= get_fuse_inode(inode
);
118 BUG_ON(!list_empty(&fi
->write_files
));
119 BUG_ON(!list_empty(&fi
->queued_writes
));
121 call_rcu(&inode
->i_rcu
, fuse_i_callback
);
124 static void fuse_evict_inode(struct inode
*inode
)
126 truncate_inode_pages(&inode
->i_data
, 0);
128 if (inode
->i_sb
->s_flags
& MS_ACTIVE
) {
129 struct fuse_conn
*fc
= get_fuse_conn(inode
);
130 struct fuse_inode
*fi
= get_fuse_inode(inode
);
131 fuse_queue_forget(fc
, fi
->forget
, fi
->nodeid
, fi
->nlookup
);
136 static int fuse_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
138 if (*flags
& MS_MANDLOCK
)
145 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
146 * so that it will fit.
148 static ino_t
fuse_squash_ino(u64 ino64
)
150 ino_t ino
= (ino_t
) ino64
;
151 if (sizeof(ino_t
) < sizeof(u64
))
152 ino
^= ino64
>> (sizeof(u64
) - sizeof(ino_t
)) * 8;
156 void fuse_change_attributes_common(struct inode
*inode
, struct fuse_attr
*attr
,
159 struct fuse_conn
*fc
= get_fuse_conn(inode
);
160 struct fuse_inode
*fi
= get_fuse_inode(inode
);
162 fi
->attr_version
= ++fc
->attr_version
;
163 fi
->i_time
= attr_valid
;
165 inode
->i_ino
= fuse_squash_ino(attr
->ino
);
166 inode
->i_mode
= (inode
->i_mode
& S_IFMT
) | (attr
->mode
& 07777);
167 set_nlink(inode
, attr
->nlink
);
168 inode
->i_uid
= make_kuid(&init_user_ns
, attr
->uid
);
169 inode
->i_gid
= make_kgid(&init_user_ns
, attr
->gid
);
170 inode
->i_blocks
= attr
->blocks
;
171 inode
->i_atime
.tv_sec
= attr
->atime
;
172 inode
->i_atime
.tv_nsec
= attr
->atimensec
;
173 inode
->i_mtime
.tv_sec
= attr
->mtime
;
174 inode
->i_mtime
.tv_nsec
= attr
->mtimensec
;
175 inode
->i_ctime
.tv_sec
= attr
->ctime
;
176 inode
->i_ctime
.tv_nsec
= attr
->ctimensec
;
178 if (attr
->blksize
!= 0)
179 inode
->i_blkbits
= ilog2(attr
->blksize
);
181 inode
->i_blkbits
= inode
->i_sb
->s_blocksize_bits
;
184 * Don't set the sticky bit in i_mode, unless we want the VFS
185 * to check permissions. This prevents failures due to the
186 * check in may_delete().
188 fi
->orig_i_mode
= inode
->i_mode
;
189 if (!(fc
->flags
& FUSE_DEFAULT_PERMISSIONS
))
190 inode
->i_mode
&= ~S_ISVTX
;
192 fi
->orig_ino
= attr
->ino
;
195 void fuse_change_attributes(struct inode
*inode
, struct fuse_attr
*attr
,
196 u64 attr_valid
, u64 attr_version
)
198 struct fuse_conn
*fc
= get_fuse_conn(inode
);
199 struct fuse_inode
*fi
= get_fuse_inode(inode
);
201 struct timespec old_mtime
;
203 spin_lock(&fc
->lock
);
204 if ((attr_version
!= 0 && fi
->attr_version
> attr_version
) ||
205 test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
206 spin_unlock(&fc
->lock
);
210 old_mtime
= inode
->i_mtime
;
211 fuse_change_attributes_common(inode
, attr
, attr_valid
);
213 oldsize
= inode
->i_size
;
214 i_size_write(inode
, attr
->size
);
215 spin_unlock(&fc
->lock
);
217 if (S_ISREG(inode
->i_mode
)) {
220 if (oldsize
!= attr
->size
) {
221 truncate_pagecache(inode
, attr
->size
);
223 } else if (fc
->auto_inval_data
) {
224 struct timespec new_mtime
= {
225 .tv_sec
= attr
->mtime
,
226 .tv_nsec
= attr
->mtimensec
,
230 * Auto inval mode also checks and invalidates if mtime
233 if (!timespec_equal(&old_mtime
, &new_mtime
))
238 invalidate_inode_pages2(inode
->i_mapping
);
242 static void fuse_init_inode(struct inode
*inode
, struct fuse_attr
*attr
)
244 inode
->i_mode
= attr
->mode
& S_IFMT
;
245 inode
->i_size
= attr
->size
;
246 if (S_ISREG(inode
->i_mode
)) {
247 fuse_init_common(inode
);
248 fuse_init_file_inode(inode
);
249 } else if (S_ISDIR(inode
->i_mode
))
250 fuse_init_dir(inode
);
251 else if (S_ISLNK(inode
->i_mode
))
252 fuse_init_symlink(inode
);
253 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
) ||
254 S_ISFIFO(inode
->i_mode
) || S_ISSOCK(inode
->i_mode
)) {
255 fuse_init_common(inode
);
256 init_special_inode(inode
, inode
->i_mode
,
257 new_decode_dev(attr
->rdev
));
262 int fuse_inode_eq(struct inode
*inode
, void *_nodeidp
)
264 u64 nodeid
= *(u64
*) _nodeidp
;
265 if (get_node_id(inode
) == nodeid
)
271 static int fuse_inode_set(struct inode
*inode
, void *_nodeidp
)
273 u64 nodeid
= *(u64
*) _nodeidp
;
274 get_fuse_inode(inode
)->nodeid
= nodeid
;
278 struct inode
*fuse_iget(struct super_block
*sb
, u64 nodeid
,
279 int generation
, struct fuse_attr
*attr
,
280 u64 attr_valid
, u64 attr_version
)
283 struct fuse_inode
*fi
;
284 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
287 inode
= iget5_locked(sb
, nodeid
, fuse_inode_eq
, fuse_inode_set
, &nodeid
);
291 if ((inode
->i_state
& I_NEW
)) {
292 inode
->i_flags
|= S_NOATIME
|S_NOCMTIME
;
293 inode
->i_generation
= generation
;
294 inode
->i_data
.backing_dev_info
= &fc
->bdi
;
295 fuse_init_inode(inode
, attr
);
296 unlock_new_inode(inode
);
297 } else if ((inode
->i_mode
^ attr
->mode
) & S_IFMT
) {
298 /* Inode has changed type, any I/O on the old should fail */
299 make_bad_inode(inode
);
304 fi
= get_fuse_inode(inode
);
305 spin_lock(&fc
->lock
);
307 spin_unlock(&fc
->lock
);
308 fuse_change_attributes(inode
, attr
, attr_valid
, attr_version
);
313 int fuse_reverse_inval_inode(struct super_block
*sb
, u64 nodeid
,
314 loff_t offset
, loff_t len
)
320 inode
= ilookup5(sb
, nodeid
, fuse_inode_eq
, &nodeid
);
324 fuse_invalidate_attr(inode
);
326 pg_start
= offset
>> PAGE_CACHE_SHIFT
;
330 pg_end
= (offset
+ len
- 1) >> PAGE_CACHE_SHIFT
;
331 invalidate_inode_pages2_range(inode
->i_mapping
,
338 static void fuse_umount_begin(struct super_block
*sb
)
340 fuse_abort_conn(get_fuse_conn_super(sb
));
343 static void fuse_send_destroy(struct fuse_conn
*fc
)
345 struct fuse_req
*req
= fc
->destroy_req
;
346 if (req
&& fc
->conn_init
) {
347 fc
->destroy_req
= NULL
;
348 req
->in
.h
.opcode
= FUSE_DESTROY
;
351 fuse_request_send(fc
, req
);
352 fuse_put_request(fc
, req
);
356 static void fuse_bdi_destroy(struct fuse_conn
*fc
)
358 if (fc
->bdi_initialized
)
359 bdi_destroy(&fc
->bdi
);
362 void fuse_conn_kill(struct fuse_conn
*fc
)
364 spin_lock(&fc
->lock
);
368 spin_unlock(&fc
->lock
);
369 /* Flush all readers on this fs */
370 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
371 wake_up_all(&fc
->waitq
);
372 wake_up_all(&fc
->blocked_waitq
);
373 wake_up_all(&fc
->reserved_req_waitq
);
375 EXPORT_SYMBOL_GPL(fuse_conn_kill
);
377 static void fuse_put_super(struct super_block
*sb
)
379 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
381 fuse_send_destroy(fc
);
384 mutex_lock(&fuse_mutex
);
385 list_del(&fc
->entry
);
386 fuse_ctl_remove_conn(fc
);
387 mutex_unlock(&fuse_mutex
);
388 fuse_bdi_destroy(fc
);
393 static void convert_fuse_statfs(struct kstatfs
*stbuf
, struct fuse_kstatfs
*attr
)
395 stbuf
->f_type
= FUSE_SUPER_MAGIC
;
396 stbuf
->f_bsize
= attr
->bsize
;
397 stbuf
->f_frsize
= attr
->frsize
;
398 stbuf
->f_blocks
= attr
->blocks
;
399 stbuf
->f_bfree
= attr
->bfree
;
400 stbuf
->f_bavail
= attr
->bavail
;
401 stbuf
->f_files
= attr
->files
;
402 stbuf
->f_ffree
= attr
->ffree
;
403 stbuf
->f_namelen
= attr
->namelen
;
404 /* fsid is left zero */
407 static int fuse_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
409 struct super_block
*sb
= dentry
->d_sb
;
410 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
411 struct fuse_req
*req
;
412 struct fuse_statfs_out outarg
;
415 if (!fuse_allow_current_process(fc
)) {
416 buf
->f_type
= FUSE_SUPER_MAGIC
;
420 req
= fuse_get_req_nopages(fc
);
424 memset(&outarg
, 0, sizeof(outarg
));
426 req
->in
.h
.opcode
= FUSE_STATFS
;
427 req
->in
.h
.nodeid
= get_node_id(dentry
->d_inode
);
428 req
->out
.numargs
= 1;
429 req
->out
.args
[0].size
=
430 fc
->minor
< 4 ? FUSE_COMPAT_STATFS_SIZE
: sizeof(outarg
);
431 req
->out
.args
[0].value
= &outarg
;
432 fuse_request_send(fc
, req
);
433 err
= req
->out
.h
.error
;
435 convert_fuse_statfs(buf
, &outarg
.st
);
436 fuse_put_request(fc
, req
);
445 OPT_DEFAULT_PERMISSIONS
,
452 static const match_table_t tokens
= {
454 {OPT_ROOTMODE
, "rootmode=%o"},
455 {OPT_USER_ID
, "user_id=%u"},
456 {OPT_GROUP_ID
, "group_id=%u"},
457 {OPT_DEFAULT_PERMISSIONS
, "default_permissions"},
458 {OPT_ALLOW_OTHER
, "allow_other"},
459 {OPT_MAX_READ
, "max_read=%u"},
460 {OPT_BLKSIZE
, "blksize=%u"},
464 static int parse_fuse_opt(char *opt
, struct fuse_mount_data
*d
, int is_bdev
)
467 memset(d
, 0, sizeof(struct fuse_mount_data
));
469 d
->blksize
= FUSE_DEFAULT_BLKSIZE
;
471 while ((p
= strsep(&opt
, ",")) != NULL
) {
474 substring_t args
[MAX_OPT_ARGS
];
478 token
= match_token(p
, tokens
, args
);
481 if (match_int(&args
[0], &value
))
488 if (match_octal(&args
[0], &value
))
490 if (!fuse_valid_type(value
))
493 d
->rootmode_present
= 1;
497 if (match_int(&args
[0], &value
))
499 d
->user_id
= make_kuid(current_user_ns(), value
);
500 if (!uid_valid(d
->user_id
))
502 d
->user_id_present
= 1;
506 if (match_int(&args
[0], &value
))
508 d
->group_id
= make_kgid(current_user_ns(), value
);
509 if (!gid_valid(d
->group_id
))
511 d
->group_id_present
= 1;
514 case OPT_DEFAULT_PERMISSIONS
:
515 d
->flags
|= FUSE_DEFAULT_PERMISSIONS
;
518 case OPT_ALLOW_OTHER
:
519 d
->flags
|= FUSE_ALLOW_OTHER
;
523 if (match_int(&args
[0], &value
))
529 if (!is_bdev
|| match_int(&args
[0], &value
))
539 if (!d
->fd_present
|| !d
->rootmode_present
||
540 !d
->user_id_present
|| !d
->group_id_present
)
546 static int fuse_show_options(struct seq_file
*m
, struct dentry
*root
)
548 struct super_block
*sb
= root
->d_sb
;
549 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
551 seq_printf(m
, ",user_id=%u", from_kuid_munged(&init_user_ns
, fc
->user_id
));
552 seq_printf(m
, ",group_id=%u", from_kgid_munged(&init_user_ns
, fc
->group_id
));
553 if (fc
->flags
& FUSE_DEFAULT_PERMISSIONS
)
554 seq_puts(m
, ",default_permissions");
555 if (fc
->flags
& FUSE_ALLOW_OTHER
)
556 seq_puts(m
, ",allow_other");
557 if (fc
->max_read
!= ~0)
558 seq_printf(m
, ",max_read=%u", fc
->max_read
);
559 if (sb
->s_bdev
&& sb
->s_blocksize
!= FUSE_DEFAULT_BLKSIZE
)
560 seq_printf(m
, ",blksize=%lu", sb
->s_blocksize
);
564 void fuse_conn_init(struct fuse_conn
*fc
)
566 memset(fc
, 0, sizeof(*fc
));
567 spin_lock_init(&fc
->lock
);
568 mutex_init(&fc
->inst_mutex
);
569 init_rwsem(&fc
->killsb
);
570 atomic_set(&fc
->count
, 1);
571 init_waitqueue_head(&fc
->waitq
);
572 init_waitqueue_head(&fc
->blocked_waitq
);
573 init_waitqueue_head(&fc
->reserved_req_waitq
);
574 INIT_LIST_HEAD(&fc
->pending
);
575 INIT_LIST_HEAD(&fc
->processing
);
576 INIT_LIST_HEAD(&fc
->io
);
577 INIT_LIST_HEAD(&fc
->interrupts
);
578 INIT_LIST_HEAD(&fc
->bg_queue
);
579 INIT_LIST_HEAD(&fc
->entry
);
580 fc
->forget_list_tail
= &fc
->forget_list_head
;
581 atomic_set(&fc
->num_waiting
, 0);
582 fc
->max_background
= FUSE_DEFAULT_MAX_BACKGROUND
;
583 fc
->congestion_threshold
= FUSE_DEFAULT_CONGESTION_THRESHOLD
;
585 fc
->polled_files
= RB_ROOT
;
589 fc
->attr_version
= 1;
590 get_random_bytes(&fc
->scramble_key
, sizeof(fc
->scramble_key
));
592 EXPORT_SYMBOL_GPL(fuse_conn_init
);
594 void fuse_conn_put(struct fuse_conn
*fc
)
596 if (atomic_dec_and_test(&fc
->count
)) {
598 fuse_request_free(fc
->destroy_req
);
599 mutex_destroy(&fc
->inst_mutex
);
603 EXPORT_SYMBOL_GPL(fuse_conn_put
);
605 struct fuse_conn
*fuse_conn_get(struct fuse_conn
*fc
)
607 atomic_inc(&fc
->count
);
610 EXPORT_SYMBOL_GPL(fuse_conn_get
);
612 static struct inode
*fuse_get_root_inode(struct super_block
*sb
, unsigned mode
)
614 struct fuse_attr attr
;
615 memset(&attr
, 0, sizeof(attr
));
618 attr
.ino
= FUSE_ROOT_ID
;
620 return fuse_iget(sb
, 1, 0, &attr
, 0, 0);
623 struct fuse_inode_handle
{
628 static struct dentry
*fuse_get_dentry(struct super_block
*sb
,
629 struct fuse_inode_handle
*handle
)
631 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
633 struct dentry
*entry
;
636 if (handle
->nodeid
== 0)
639 inode
= ilookup5(sb
, handle
->nodeid
, fuse_inode_eq
, &handle
->nodeid
);
641 struct fuse_entry_out outarg
;
644 if (!fc
->export_support
)
649 err
= fuse_lookup_name(sb
, handle
->nodeid
, &name
, &outarg
,
651 if (err
&& err
!= -ENOENT
)
658 if (get_node_id(inode
) != handle
->nodeid
)
662 if (inode
->i_generation
!= handle
->generation
)
665 entry
= d_obtain_alias(inode
);
666 if (!IS_ERR(entry
) && get_node_id(inode
) != FUSE_ROOT_ID
)
667 fuse_invalidate_entry_cache(entry
);
677 static int fuse_encode_fh(struct inode
*inode
, u32
*fh
, int *max_len
,
678 struct inode
*parent
)
680 int len
= parent
? 6 : 3;
684 if (*max_len
< len
) {
686 return FILEID_INVALID
;
689 nodeid
= get_fuse_inode(inode
)->nodeid
;
690 generation
= inode
->i_generation
;
692 fh
[0] = (u32
)(nodeid
>> 32);
693 fh
[1] = (u32
)(nodeid
& 0xffffffff);
697 nodeid
= get_fuse_inode(parent
)->nodeid
;
698 generation
= parent
->i_generation
;
700 fh
[3] = (u32
)(nodeid
>> 32);
701 fh
[4] = (u32
)(nodeid
& 0xffffffff);
706 return parent
? 0x82 : 0x81;
709 static struct dentry
*fuse_fh_to_dentry(struct super_block
*sb
,
710 struct fid
*fid
, int fh_len
, int fh_type
)
712 struct fuse_inode_handle handle
;
714 if ((fh_type
!= 0x81 && fh_type
!= 0x82) || fh_len
< 3)
717 handle
.nodeid
= (u64
) fid
->raw
[0] << 32;
718 handle
.nodeid
|= (u64
) fid
->raw
[1];
719 handle
.generation
= fid
->raw
[2];
720 return fuse_get_dentry(sb
, &handle
);
723 static struct dentry
*fuse_fh_to_parent(struct super_block
*sb
,
724 struct fid
*fid
, int fh_len
, int fh_type
)
726 struct fuse_inode_handle parent
;
728 if (fh_type
!= 0x82 || fh_len
< 6)
731 parent
.nodeid
= (u64
) fid
->raw
[3] << 32;
732 parent
.nodeid
|= (u64
) fid
->raw
[4];
733 parent
.generation
= fid
->raw
[5];
734 return fuse_get_dentry(sb
, &parent
);
737 static struct dentry
*fuse_get_parent(struct dentry
*child
)
739 struct inode
*child_inode
= child
->d_inode
;
740 struct fuse_conn
*fc
= get_fuse_conn(child_inode
);
742 struct dentry
*parent
;
743 struct fuse_entry_out outarg
;
747 if (!fc
->export_support
)
748 return ERR_PTR(-ESTALE
);
752 err
= fuse_lookup_name(child_inode
->i_sb
, get_node_id(child_inode
),
753 &name
, &outarg
, &inode
);
756 return ERR_PTR(-ESTALE
);
760 parent
= d_obtain_alias(inode
);
761 if (!IS_ERR(parent
) && get_node_id(inode
) != FUSE_ROOT_ID
)
762 fuse_invalidate_entry_cache(parent
);
767 static const struct export_operations fuse_export_operations
= {
768 .fh_to_dentry
= fuse_fh_to_dentry
,
769 .fh_to_parent
= fuse_fh_to_parent
,
770 .encode_fh
= fuse_encode_fh
,
771 .get_parent
= fuse_get_parent
,
774 static const struct super_operations fuse_super_operations
= {
775 .alloc_inode
= fuse_alloc_inode
,
776 .destroy_inode
= fuse_destroy_inode
,
777 .evict_inode
= fuse_evict_inode
,
778 .drop_inode
= generic_delete_inode
,
779 .remount_fs
= fuse_remount_fs
,
780 .put_super
= fuse_put_super
,
781 .umount_begin
= fuse_umount_begin
,
782 .statfs
= fuse_statfs
,
783 .show_options
= fuse_show_options
,
786 static void sanitize_global_limit(unsigned *limit
)
789 *limit
= ((totalram_pages
<< PAGE_SHIFT
) >> 13) /
790 sizeof(struct fuse_req
);
792 if (*limit
>= 1 << 16)
793 *limit
= (1 << 16) - 1;
796 static int set_global_limit(const char *val
, struct kernel_param
*kp
)
800 rv
= param_set_uint(val
, kp
);
804 sanitize_global_limit((unsigned *)kp
->arg
);
809 static void process_init_limits(struct fuse_conn
*fc
, struct fuse_init_out
*arg
)
811 int cap_sys_admin
= capable(CAP_SYS_ADMIN
);
816 sanitize_global_limit(&max_user_bgreq
);
817 sanitize_global_limit(&max_user_congthresh
);
819 if (arg
->max_background
) {
820 fc
->max_background
= arg
->max_background
;
822 if (!cap_sys_admin
&& fc
->max_background
> max_user_bgreq
)
823 fc
->max_background
= max_user_bgreq
;
825 if (arg
->congestion_threshold
) {
826 fc
->congestion_threshold
= arg
->congestion_threshold
;
828 if (!cap_sys_admin
&&
829 fc
->congestion_threshold
> max_user_congthresh
)
830 fc
->congestion_threshold
= max_user_congthresh
;
834 static void process_init_reply(struct fuse_conn
*fc
, struct fuse_req
*req
)
836 struct fuse_init_out
*arg
= &req
->misc
.init_out
;
838 if (req
->out
.h
.error
|| arg
->major
!= FUSE_KERNEL_VERSION
)
841 unsigned long ra_pages
;
843 process_init_limits(fc
, arg
);
845 if (arg
->minor
>= 6) {
846 ra_pages
= arg
->max_readahead
/ PAGE_CACHE_SIZE
;
847 if (arg
->flags
& FUSE_ASYNC_READ
)
849 if (!(arg
->flags
& FUSE_POSIX_LOCKS
))
851 if (arg
->minor
>= 17) {
852 if (!(arg
->flags
& FUSE_FLOCK_LOCKS
))
855 if (!(arg
->flags
& FUSE_POSIX_LOCKS
))
858 if (arg
->flags
& FUSE_ATOMIC_O_TRUNC
)
859 fc
->atomic_o_trunc
= 1;
860 if (arg
->minor
>= 9) {
861 /* LOOKUP has dependency on proto version */
862 if (arg
->flags
& FUSE_EXPORT_SUPPORT
)
863 fc
->export_support
= 1;
865 if (arg
->flags
& FUSE_BIG_WRITES
)
867 if (arg
->flags
& FUSE_DONT_MASK
)
869 if (arg
->flags
& FUSE_AUTO_INVAL_DATA
)
870 fc
->auto_inval_data
= 1;
871 if (arg
->flags
& FUSE_DO_READDIRPLUS
) {
872 fc
->do_readdirplus
= 1;
873 if (arg
->flags
& FUSE_READDIRPLUS_AUTO
)
874 fc
->readdirplus_auto
= 1;
876 if (arg
->flags
& FUSE_ASYNC_DIO
)
879 ra_pages
= fc
->max_read
/ PAGE_CACHE_SIZE
;
884 fc
->bdi
.ra_pages
= min(fc
->bdi
.ra_pages
, ra_pages
);
885 fc
->minor
= arg
->minor
;
886 fc
->max_write
= arg
->minor
< 5 ? 4096 : arg
->max_write
;
887 fc
->max_write
= max_t(unsigned, 4096, fc
->max_write
);
891 wake_up_all(&fc
->blocked_waitq
);
894 static void fuse_send_init(struct fuse_conn
*fc
, struct fuse_req
*req
)
896 struct fuse_init_in
*arg
= &req
->misc
.init_in
;
898 arg
->major
= FUSE_KERNEL_VERSION
;
899 arg
->minor
= FUSE_KERNEL_MINOR_VERSION
;
900 arg
->max_readahead
= fc
->bdi
.ra_pages
* PAGE_CACHE_SIZE
;
901 arg
->flags
|= FUSE_ASYNC_READ
| FUSE_POSIX_LOCKS
| FUSE_ATOMIC_O_TRUNC
|
902 FUSE_EXPORT_SUPPORT
| FUSE_BIG_WRITES
| FUSE_DONT_MASK
|
903 FUSE_SPLICE_WRITE
| FUSE_SPLICE_MOVE
| FUSE_SPLICE_READ
|
904 FUSE_FLOCK_LOCKS
| FUSE_IOCTL_DIR
| FUSE_AUTO_INVAL_DATA
|
905 FUSE_DO_READDIRPLUS
| FUSE_READDIRPLUS_AUTO
| FUSE_ASYNC_DIO
;
906 req
->in
.h
.opcode
= FUSE_INIT
;
908 req
->in
.args
[0].size
= sizeof(*arg
);
909 req
->in
.args
[0].value
= arg
;
910 req
->out
.numargs
= 1;
911 /* Variable length argument used for backward compatibility
912 with interface version < 7.5. Rest of init_out is zeroed
913 by do_get_request(), so a short reply is not a problem */
915 req
->out
.args
[0].size
= sizeof(struct fuse_init_out
);
916 req
->out
.args
[0].value
= &req
->misc
.init_out
;
917 req
->end
= process_init_reply
;
918 fuse_request_send_background(fc
, req
);
921 static void fuse_free_conn(struct fuse_conn
*fc
)
926 static int fuse_bdi_init(struct fuse_conn
*fc
, struct super_block
*sb
)
930 fc
->bdi
.name
= "fuse";
931 fc
->bdi
.ra_pages
= (VM_MAX_READAHEAD
* 1024) / PAGE_CACHE_SIZE
;
932 /* fuse does it's own writeback accounting */
933 fc
->bdi
.capabilities
= BDI_CAP_NO_ACCT_WB
| BDI_CAP_STRICTLIMIT
;
935 err
= bdi_init(&fc
->bdi
);
939 fc
->bdi_initialized
= 1;
942 err
= bdi_register(&fc
->bdi
, NULL
, "%u:%u-fuseblk",
943 MAJOR(fc
->dev
), MINOR(fc
->dev
));
945 err
= bdi_register_dev(&fc
->bdi
, fc
->dev
);
952 * For a single fuse filesystem use max 1% of dirty +
953 * writeback threshold.
955 * This gives about 1M of write buffer for memory maps on a
956 * machine with 1G and 10% dirty_ratio, which should be more
959 * Privileged users can raise it by writing to
961 * /sys/class/bdi/<bdi>/max_ratio
963 bdi_set_max_ratio(&fc
->bdi
, 1);
968 static int fuse_fill_super(struct super_block
*sb
, void *data
, int silent
)
970 struct fuse_conn
*fc
;
972 struct fuse_mount_data d
;
974 struct dentry
*root_dentry
;
975 struct fuse_req
*init_req
;
977 int is_bdev
= sb
->s_bdev
!= NULL
;
980 if (sb
->s_flags
& MS_MANDLOCK
)
983 sb
->s_flags
&= ~MS_NOSEC
;
985 if (!parse_fuse_opt((char *) data
, &d
, is_bdev
))
991 if (!sb_set_blocksize(sb
, d
.blksize
))
995 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
996 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
998 sb
->s_magic
= FUSE_SUPER_MAGIC
;
999 sb
->s_op
= &fuse_super_operations
;
1000 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1001 sb
->s_time_gran
= 1;
1002 sb
->s_export_op
= &fuse_export_operations
;
1009 if ((file
->f_op
!= &fuse_dev_operations
) ||
1010 (file
->f_cred
->user_ns
!= &init_user_ns
))
1013 fc
= kmalloc(sizeof(*fc
), GFP_KERNEL
);
1020 fc
->dev
= sb
->s_dev
;
1022 err
= fuse_bdi_init(fc
, sb
);
1026 sb
->s_bdi
= &fc
->bdi
;
1028 /* Handle umasking inside the fuse code */
1029 if (sb
->s_flags
& MS_POSIXACL
)
1031 sb
->s_flags
|= MS_POSIXACL
;
1033 fc
->release
= fuse_free_conn
;
1034 fc
->flags
= d
.flags
;
1035 fc
->user_id
= d
.user_id
;
1036 fc
->group_id
= d
.group_id
;
1037 fc
->max_read
= max_t(unsigned, 4096, d
.max_read
);
1039 /* Used by get_root_inode() */
1043 root
= fuse_get_root_inode(sb
, d
.rootmode
);
1044 root_dentry
= d_make_root(root
);
1047 /* only now - we want root dentry with NULL ->d_op */
1048 sb
->s_d_op
= &fuse_dentry_operations
;
1050 init_req
= fuse_request_alloc(0);
1053 init_req
->background
= 1;
1056 fc
->destroy_req
= fuse_request_alloc(0);
1057 if (!fc
->destroy_req
)
1058 goto err_free_init_req
;
1061 mutex_lock(&fuse_mutex
);
1063 if (file
->private_data
)
1066 err
= fuse_ctl_add_conn(fc
);
1070 list_add_tail(&fc
->entry
, &fuse_conn_list
);
1071 sb
->s_root
= root_dentry
;
1073 file
->private_data
= fuse_conn_get(fc
);
1074 mutex_unlock(&fuse_mutex
);
1076 * atomic_dec_and_test() in fput() provides the necessary
1077 * memory barrier for file->private_data to be visible on all
1082 fuse_send_init(fc
, init_req
);
1087 mutex_unlock(&fuse_mutex
);
1089 fuse_request_free(init_req
);
1093 fuse_bdi_destroy(fc
);
1101 static struct dentry
*fuse_mount(struct file_system_type
*fs_type
,
1102 int flags
, const char *dev_name
,
1105 return mount_nodev(fs_type
, flags
, raw_data
, fuse_fill_super
);
1108 static void fuse_kill_sb_anon(struct super_block
*sb
)
1110 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
1113 down_write(&fc
->killsb
);
1115 up_write(&fc
->killsb
);
1118 kill_anon_super(sb
);
1121 static struct file_system_type fuse_fs_type
= {
1122 .owner
= THIS_MODULE
,
1124 .fs_flags
= FS_HAS_SUBTYPE
,
1125 .mount
= fuse_mount
,
1126 .kill_sb
= fuse_kill_sb_anon
,
1128 MODULE_ALIAS_FS("fuse");
1131 static struct dentry
*fuse_mount_blk(struct file_system_type
*fs_type
,
1132 int flags
, const char *dev_name
,
1135 return mount_bdev(fs_type
, flags
, dev_name
, raw_data
, fuse_fill_super
);
1138 static void fuse_kill_sb_blk(struct super_block
*sb
)
1140 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
1143 down_write(&fc
->killsb
);
1145 up_write(&fc
->killsb
);
1148 kill_block_super(sb
);
1151 static struct file_system_type fuseblk_fs_type
= {
1152 .owner
= THIS_MODULE
,
1154 .mount
= fuse_mount_blk
,
1155 .kill_sb
= fuse_kill_sb_blk
,
1156 .fs_flags
= FS_REQUIRES_DEV
| FS_HAS_SUBTYPE
,
1158 MODULE_ALIAS_FS("fuseblk");
1160 static inline int register_fuseblk(void)
1162 return register_filesystem(&fuseblk_fs_type
);
1165 static inline void unregister_fuseblk(void)
1167 unregister_filesystem(&fuseblk_fs_type
);
1170 static inline int register_fuseblk(void)
1175 static inline void unregister_fuseblk(void)
1180 static void fuse_inode_init_once(void *foo
)
1182 struct inode
*inode
= foo
;
1184 inode_init_once(inode
);
1187 static int __init
fuse_fs_init(void)
1191 fuse_inode_cachep
= kmem_cache_create("fuse_inode",
1192 sizeof(struct fuse_inode
),
1193 0, SLAB_HWCACHE_ALIGN
,
1194 fuse_inode_init_once
);
1196 if (!fuse_inode_cachep
)
1199 err
= register_fuseblk();
1203 err
= register_filesystem(&fuse_fs_type
);
1210 unregister_fuseblk();
1212 kmem_cache_destroy(fuse_inode_cachep
);
1217 static void fuse_fs_cleanup(void)
1219 unregister_filesystem(&fuse_fs_type
);
1220 unregister_fuseblk();
1223 * Make sure all delayed rcu free inodes are flushed before we
1227 kmem_cache_destroy(fuse_inode_cachep
);
1230 static struct kobject
*fuse_kobj
;
1231 static struct kobject
*connections_kobj
;
1233 static int fuse_sysfs_init(void)
1237 fuse_kobj
= kobject_create_and_add("fuse", fs_kobj
);
1243 connections_kobj
= kobject_create_and_add("connections", fuse_kobj
);
1244 if (!connections_kobj
) {
1246 goto out_fuse_unregister
;
1251 out_fuse_unregister
:
1252 kobject_put(fuse_kobj
);
1257 static void fuse_sysfs_cleanup(void)
1259 kobject_put(connections_kobj
);
1260 kobject_put(fuse_kobj
);
1263 static int __init
fuse_init(void)
1267 printk(KERN_INFO
"fuse init (API version %i.%i)\n",
1268 FUSE_KERNEL_VERSION
, FUSE_KERNEL_MINOR_VERSION
);
1270 INIT_LIST_HEAD(&fuse_conn_list
);
1271 res
= fuse_fs_init();
1275 res
= fuse_dev_init();
1277 goto err_fs_cleanup
;
1279 res
= fuse_sysfs_init();
1281 goto err_dev_cleanup
;
1283 res
= fuse_ctl_init();
1285 goto err_sysfs_cleanup
;
1287 sanitize_global_limit(&max_user_bgreq
);
1288 sanitize_global_limit(&max_user_congthresh
);
1293 fuse_sysfs_cleanup();
1302 static void __exit
fuse_exit(void)
1304 printk(KERN_DEBUG
"fuse exit\n");
1307 fuse_sysfs_cleanup();
1312 module_init(fuse_init
);
1313 module_exit(fuse_exit
);