2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/parser.h>
19 #include <linux/statfs.h>
20 #include <linux/random.h>
21 #include <linux/sched.h>
22 #include <linux/exportfs.h>
24 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
25 MODULE_DESCRIPTION("Filesystem in Userspace");
26 MODULE_LICENSE("GPL");
28 static struct kmem_cache
*fuse_inode_cachep
;
29 struct list_head fuse_conn_list
;
30 DEFINE_MUTEX(fuse_mutex
);
32 static int set_global_limit(const char *val
, struct kernel_param
*kp
);
34 unsigned max_user_bgreq
;
35 module_param_call(max_user_bgreq
, set_global_limit
, param_get_uint
,
36 &max_user_bgreq
, 0644);
37 __MODULE_PARM_TYPE(max_user_bgreq
, "uint");
38 MODULE_PARM_DESC(max_user_bgreq
,
39 "Global limit for the maximum number of backgrounded requests an "
40 "unprivileged user can set");
42 unsigned max_user_congthresh
;
43 module_param_call(max_user_congthresh
, set_global_limit
, param_get_uint
,
44 &max_user_congthresh
, 0644);
45 __MODULE_PARM_TYPE(max_user_congthresh
, "uint");
46 MODULE_PARM_DESC(max_user_congthresh
,
47 "Global limit for the maximum congestion threshold an "
48 "unprivileged user can set");
50 #define FUSE_SUPER_MAGIC 0x65735546
52 #define FUSE_DEFAULT_BLKSIZE 512
54 /** Maximum number of outstanding background requests */
55 #define FUSE_DEFAULT_MAX_BACKGROUND 12
57 /** Congestion starts at 75% of maximum */
58 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
60 struct fuse_mount_data
{
65 unsigned fd_present
:1;
66 unsigned rootmode_present
:1;
67 unsigned user_id_present
:1;
68 unsigned group_id_present
:1;
74 struct fuse_forget_link
*fuse_alloc_forget(void)
76 return kzalloc(sizeof(struct fuse_forget_link
), GFP_KERNEL
);
79 static struct inode
*fuse_alloc_inode(struct super_block
*sb
)
82 struct fuse_inode
*fi
;
84 inode
= kmem_cache_alloc(fuse_inode_cachep
, GFP_KERNEL
);
88 fi
= get_fuse_inode(inode
);
96 INIT_LIST_HEAD(&fi
->write_files
);
97 INIT_LIST_HEAD(&fi
->queued_writes
);
98 INIT_LIST_HEAD(&fi
->writepages
);
99 init_waitqueue_head(&fi
->page_waitq
);
100 mutex_init(&fi
->mutex
);
101 fi
->forget
= fuse_alloc_forget();
103 kmem_cache_free(fuse_inode_cachep
, inode
);
110 static void fuse_i_callback(struct rcu_head
*head
)
112 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
113 kmem_cache_free(fuse_inode_cachep
, inode
);
116 static void fuse_destroy_inode(struct inode
*inode
)
118 struct fuse_inode
*fi
= get_fuse_inode(inode
);
119 BUG_ON(!list_empty(&fi
->write_files
));
120 BUG_ON(!list_empty(&fi
->queued_writes
));
121 mutex_destroy(&fi
->mutex
);
123 call_rcu(&inode
->i_rcu
, fuse_i_callback
);
126 static void fuse_evict_inode(struct inode
*inode
)
128 truncate_inode_pages_final(&inode
->i_data
);
130 if (inode
->i_sb
->s_flags
& MS_ACTIVE
) {
131 struct fuse_conn
*fc
= get_fuse_conn(inode
);
132 struct fuse_inode
*fi
= get_fuse_inode(inode
);
133 fuse_queue_forget(fc
, fi
->forget
, fi
->nodeid
, fi
->nlookup
);
138 static int fuse_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
141 if (*flags
& MS_MANDLOCK
)
148 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
149 * so that it will fit.
151 static ino_t
fuse_squash_ino(u64 ino64
)
153 ino_t ino
= (ino_t
) ino64
;
154 if (sizeof(ino_t
) < sizeof(u64
))
155 ino
^= ino64
>> (sizeof(u64
) - sizeof(ino_t
)) * 8;
159 void fuse_change_attributes_common(struct inode
*inode
, struct fuse_attr
*attr
,
162 struct fuse_conn
*fc
= get_fuse_conn(inode
);
163 struct fuse_inode
*fi
= get_fuse_inode(inode
);
165 fi
->attr_version
= ++fc
->attr_version
;
166 fi
->i_time
= attr_valid
;
168 inode
->i_ino
= fuse_squash_ino(attr
->ino
);
169 inode
->i_mode
= (inode
->i_mode
& S_IFMT
) | (attr
->mode
& 07777);
170 set_nlink(inode
, attr
->nlink
);
171 inode
->i_uid
= make_kuid(&init_user_ns
, attr
->uid
);
172 inode
->i_gid
= make_kgid(&init_user_ns
, attr
->gid
);
173 inode
->i_blocks
= attr
->blocks
;
174 inode
->i_atime
.tv_sec
= attr
->atime
;
175 inode
->i_atime
.tv_nsec
= attr
->atimensec
;
176 /* mtime from server may be stale due to local buffered write */
177 if (!fc
->writeback_cache
|| !S_ISREG(inode
->i_mode
)) {
178 inode
->i_mtime
.tv_sec
= attr
->mtime
;
179 inode
->i_mtime
.tv_nsec
= attr
->mtimensec
;
180 inode
->i_ctime
.tv_sec
= attr
->ctime
;
181 inode
->i_ctime
.tv_nsec
= attr
->ctimensec
;
184 if (attr
->blksize
!= 0)
185 inode
->i_blkbits
= ilog2(attr
->blksize
);
187 inode
->i_blkbits
= inode
->i_sb
->s_blocksize_bits
;
190 * Don't set the sticky bit in i_mode, unless we want the VFS
191 * to check permissions. This prevents failures due to the
192 * check in may_delete().
194 fi
->orig_i_mode
= inode
->i_mode
;
195 if (!(fc
->flags
& FUSE_DEFAULT_PERMISSIONS
))
196 inode
->i_mode
&= ~S_ISVTX
;
198 fi
->orig_ino
= attr
->ino
;
201 void fuse_change_attributes(struct inode
*inode
, struct fuse_attr
*attr
,
202 u64 attr_valid
, u64 attr_version
)
204 struct fuse_conn
*fc
= get_fuse_conn(inode
);
205 struct fuse_inode
*fi
= get_fuse_inode(inode
);
206 bool is_wb
= fc
->writeback_cache
;
208 struct timespec old_mtime
;
210 spin_lock(&fc
->lock
);
211 if ((attr_version
!= 0 && fi
->attr_version
> attr_version
) ||
212 test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
213 spin_unlock(&fc
->lock
);
217 old_mtime
= inode
->i_mtime
;
218 fuse_change_attributes_common(inode
, attr
, attr_valid
);
220 oldsize
= inode
->i_size
;
222 * In case of writeback_cache enabled, the cached writes beyond EOF
223 * extend local i_size without keeping userspace server in sync. So,
224 * attr->size coming from server can be stale. We cannot trust it.
226 if (!is_wb
|| !S_ISREG(inode
->i_mode
))
227 i_size_write(inode
, attr
->size
);
228 spin_unlock(&fc
->lock
);
230 if (!is_wb
&& S_ISREG(inode
->i_mode
)) {
233 if (oldsize
!= attr
->size
) {
234 truncate_pagecache(inode
, attr
->size
);
236 } else if (fc
->auto_inval_data
) {
237 struct timespec new_mtime
= {
238 .tv_sec
= attr
->mtime
,
239 .tv_nsec
= attr
->mtimensec
,
243 * Auto inval mode also checks and invalidates if mtime
246 if (!timespec_equal(&old_mtime
, &new_mtime
))
251 invalidate_inode_pages2(inode
->i_mapping
);
255 static void fuse_init_inode(struct inode
*inode
, struct fuse_attr
*attr
)
257 inode
->i_mode
= attr
->mode
& S_IFMT
;
258 inode
->i_size
= attr
->size
;
259 inode
->i_mtime
.tv_sec
= attr
->mtime
;
260 inode
->i_mtime
.tv_nsec
= attr
->mtimensec
;
261 inode
->i_ctime
.tv_sec
= attr
->ctime
;
262 inode
->i_ctime
.tv_nsec
= attr
->ctimensec
;
263 if (S_ISREG(inode
->i_mode
)) {
264 fuse_init_common(inode
);
265 fuse_init_file_inode(inode
);
266 } else if (S_ISDIR(inode
->i_mode
))
267 fuse_init_dir(inode
);
268 else if (S_ISLNK(inode
->i_mode
))
269 fuse_init_symlink(inode
);
270 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
) ||
271 S_ISFIFO(inode
->i_mode
) || S_ISSOCK(inode
->i_mode
)) {
272 fuse_init_common(inode
);
273 init_special_inode(inode
, inode
->i_mode
,
274 new_decode_dev(attr
->rdev
));
279 int fuse_inode_eq(struct inode
*inode
, void *_nodeidp
)
281 u64 nodeid
= *(u64
*) _nodeidp
;
282 if (get_node_id(inode
) == nodeid
)
288 static int fuse_inode_set(struct inode
*inode
, void *_nodeidp
)
290 u64 nodeid
= *(u64
*) _nodeidp
;
291 get_fuse_inode(inode
)->nodeid
= nodeid
;
295 struct inode
*fuse_iget(struct super_block
*sb
, u64 nodeid
,
296 int generation
, struct fuse_attr
*attr
,
297 u64 attr_valid
, u64 attr_version
)
300 struct fuse_inode
*fi
;
301 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
304 inode
= iget5_locked(sb
, nodeid
, fuse_inode_eq
, fuse_inode_set
, &nodeid
);
308 if ((inode
->i_state
& I_NEW
)) {
309 inode
->i_flags
|= S_NOATIME
;
310 if (!fc
->writeback_cache
|| !S_ISREG(attr
->mode
))
311 inode
->i_flags
|= S_NOCMTIME
;
312 inode
->i_generation
= generation
;
313 fuse_init_inode(inode
, attr
);
314 unlock_new_inode(inode
);
315 } else if ((inode
->i_mode
^ attr
->mode
) & S_IFMT
) {
316 /* Inode has changed type, any I/O on the old should fail */
317 make_bad_inode(inode
);
322 fi
= get_fuse_inode(inode
);
323 spin_lock(&fc
->lock
);
325 spin_unlock(&fc
->lock
);
326 fuse_change_attributes(inode
, attr
, attr_valid
, attr_version
);
331 int fuse_reverse_inval_inode(struct super_block
*sb
, u64 nodeid
,
332 loff_t offset
, loff_t len
)
338 inode
= ilookup5(sb
, nodeid
, fuse_inode_eq
, &nodeid
);
342 fuse_invalidate_attr(inode
);
344 pg_start
= offset
>> PAGE_SHIFT
;
348 pg_end
= (offset
+ len
- 1) >> PAGE_SHIFT
;
349 invalidate_inode_pages2_range(inode
->i_mapping
,
356 void fuse_lock_inode(struct inode
*inode
)
358 if (!get_fuse_conn(inode
)->parallel_dirops
)
359 mutex_lock(&get_fuse_inode(inode
)->mutex
);
362 void fuse_unlock_inode(struct inode
*inode
)
364 if (!get_fuse_conn(inode
)->parallel_dirops
)
365 mutex_unlock(&get_fuse_inode(inode
)->mutex
);
368 static void fuse_umount_begin(struct super_block
*sb
)
370 fuse_abort_conn(get_fuse_conn_super(sb
));
373 static void fuse_send_destroy(struct fuse_conn
*fc
)
375 struct fuse_req
*req
= fc
->destroy_req
;
376 if (req
&& fc
->conn_init
) {
377 fc
->destroy_req
= NULL
;
378 req
->in
.h
.opcode
= FUSE_DESTROY
;
379 __set_bit(FR_FORCE
, &req
->flags
);
380 __clear_bit(FR_BACKGROUND
, &req
->flags
);
381 fuse_request_send(fc
, req
);
382 fuse_put_request(fc
, req
);
386 static void fuse_bdi_destroy(struct fuse_conn
*fc
)
388 if (fc
->bdi_initialized
)
389 bdi_destroy(&fc
->bdi
);
392 static void fuse_put_super(struct super_block
*sb
)
394 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
396 fuse_send_destroy(fc
);
399 mutex_lock(&fuse_mutex
);
400 list_del(&fc
->entry
);
401 fuse_ctl_remove_conn(fc
);
402 mutex_unlock(&fuse_mutex
);
403 fuse_bdi_destroy(fc
);
408 static void convert_fuse_statfs(struct kstatfs
*stbuf
, struct fuse_kstatfs
*attr
)
410 stbuf
->f_type
= FUSE_SUPER_MAGIC
;
411 stbuf
->f_bsize
= attr
->bsize
;
412 stbuf
->f_frsize
= attr
->frsize
;
413 stbuf
->f_blocks
= attr
->blocks
;
414 stbuf
->f_bfree
= attr
->bfree
;
415 stbuf
->f_bavail
= attr
->bavail
;
416 stbuf
->f_files
= attr
->files
;
417 stbuf
->f_ffree
= attr
->ffree
;
418 stbuf
->f_namelen
= attr
->namelen
;
419 /* fsid is left zero */
422 static int fuse_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
424 struct super_block
*sb
= dentry
->d_sb
;
425 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
427 struct fuse_statfs_out outarg
;
430 if (!fuse_allow_current_process(fc
)) {
431 buf
->f_type
= FUSE_SUPER_MAGIC
;
435 memset(&outarg
, 0, sizeof(outarg
));
437 args
.in
.h
.opcode
= FUSE_STATFS
;
438 args
.in
.h
.nodeid
= get_node_id(d_inode(dentry
));
439 args
.out
.numargs
= 1;
440 args
.out
.args
[0].size
= sizeof(outarg
);
441 args
.out
.args
[0].value
= &outarg
;
442 err
= fuse_simple_request(fc
, &args
);
444 convert_fuse_statfs(buf
, &outarg
.st
);
453 OPT_DEFAULT_PERMISSIONS
,
460 static const match_table_t tokens
= {
462 {OPT_ROOTMODE
, "rootmode=%o"},
463 {OPT_USER_ID
, "user_id=%u"},
464 {OPT_GROUP_ID
, "group_id=%u"},
465 {OPT_DEFAULT_PERMISSIONS
, "default_permissions"},
466 {OPT_ALLOW_OTHER
, "allow_other"},
467 {OPT_MAX_READ
, "max_read=%u"},
468 {OPT_BLKSIZE
, "blksize=%u"},
472 static int fuse_match_uint(substring_t
*s
, unsigned int *res
)
475 char *buf
= match_strdup(s
);
477 err
= kstrtouint(buf
, 10, res
);
483 static int parse_fuse_opt(char *opt
, struct fuse_mount_data
*d
, int is_bdev
)
486 memset(d
, 0, sizeof(struct fuse_mount_data
));
488 d
->blksize
= FUSE_DEFAULT_BLKSIZE
;
490 while ((p
= strsep(&opt
, ",")) != NULL
) {
494 substring_t args
[MAX_OPT_ARGS
];
498 token
= match_token(p
, tokens
, args
);
501 if (match_int(&args
[0], &value
))
508 if (match_octal(&args
[0], &value
))
510 if (!fuse_valid_type(value
))
513 d
->rootmode_present
= 1;
517 if (fuse_match_uint(&args
[0], &uv
))
519 d
->user_id
= make_kuid(current_user_ns(), uv
);
520 if (!uid_valid(d
->user_id
))
522 d
->user_id_present
= 1;
526 if (fuse_match_uint(&args
[0], &uv
))
528 d
->group_id
= make_kgid(current_user_ns(), uv
);
529 if (!gid_valid(d
->group_id
))
531 d
->group_id_present
= 1;
534 case OPT_DEFAULT_PERMISSIONS
:
535 d
->flags
|= FUSE_DEFAULT_PERMISSIONS
;
538 case OPT_ALLOW_OTHER
:
539 d
->flags
|= FUSE_ALLOW_OTHER
;
543 if (match_int(&args
[0], &value
))
549 if (!is_bdev
|| match_int(&args
[0], &value
))
559 if (!d
->fd_present
|| !d
->rootmode_present
||
560 !d
->user_id_present
|| !d
->group_id_present
)
566 static int fuse_show_options(struct seq_file
*m
, struct dentry
*root
)
568 struct super_block
*sb
= root
->d_sb
;
569 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
571 seq_printf(m
, ",user_id=%u", from_kuid_munged(&init_user_ns
, fc
->user_id
));
572 seq_printf(m
, ",group_id=%u", from_kgid_munged(&init_user_ns
, fc
->group_id
));
573 if (fc
->flags
& FUSE_DEFAULT_PERMISSIONS
)
574 seq_puts(m
, ",default_permissions");
575 if (fc
->flags
& FUSE_ALLOW_OTHER
)
576 seq_puts(m
, ",allow_other");
577 if (fc
->max_read
!= ~0)
578 seq_printf(m
, ",max_read=%u", fc
->max_read
);
579 if (sb
->s_bdev
&& sb
->s_blocksize
!= FUSE_DEFAULT_BLKSIZE
)
580 seq_printf(m
, ",blksize=%lu", sb
->s_blocksize
);
584 static void fuse_iqueue_init(struct fuse_iqueue
*fiq
)
586 memset(fiq
, 0, sizeof(struct fuse_iqueue
));
587 init_waitqueue_head(&fiq
->waitq
);
588 INIT_LIST_HEAD(&fiq
->pending
);
589 INIT_LIST_HEAD(&fiq
->interrupts
);
590 fiq
->forget_list_tail
= &fiq
->forget_list_head
;
594 static void fuse_pqueue_init(struct fuse_pqueue
*fpq
)
596 memset(fpq
, 0, sizeof(struct fuse_pqueue
));
597 spin_lock_init(&fpq
->lock
);
598 INIT_LIST_HEAD(&fpq
->processing
);
599 INIT_LIST_HEAD(&fpq
->io
);
603 void fuse_conn_init(struct fuse_conn
*fc
)
605 memset(fc
, 0, sizeof(*fc
));
606 spin_lock_init(&fc
->lock
);
607 init_rwsem(&fc
->killsb
);
608 atomic_set(&fc
->count
, 1);
609 atomic_set(&fc
->dev_count
, 1);
610 init_waitqueue_head(&fc
->blocked_waitq
);
611 init_waitqueue_head(&fc
->reserved_req_waitq
);
612 fuse_iqueue_init(&fc
->iq
);
613 INIT_LIST_HEAD(&fc
->bg_queue
);
614 INIT_LIST_HEAD(&fc
->entry
);
615 INIT_LIST_HEAD(&fc
->devices
);
616 atomic_set(&fc
->num_waiting
, 0);
617 fc
->max_background
= FUSE_DEFAULT_MAX_BACKGROUND
;
618 fc
->congestion_threshold
= FUSE_DEFAULT_CONGESTION_THRESHOLD
;
620 fc
->polled_files
= RB_ROOT
;
624 fc
->attr_version
= 1;
625 get_random_bytes(&fc
->scramble_key
, sizeof(fc
->scramble_key
));
627 EXPORT_SYMBOL_GPL(fuse_conn_init
);
629 void fuse_conn_put(struct fuse_conn
*fc
)
631 if (atomic_dec_and_test(&fc
->count
)) {
633 fuse_request_free(fc
->destroy_req
);
637 EXPORT_SYMBOL_GPL(fuse_conn_put
);
639 struct fuse_conn
*fuse_conn_get(struct fuse_conn
*fc
)
641 atomic_inc(&fc
->count
);
644 EXPORT_SYMBOL_GPL(fuse_conn_get
);
646 static struct inode
*fuse_get_root_inode(struct super_block
*sb
, unsigned mode
)
648 struct fuse_attr attr
;
649 memset(&attr
, 0, sizeof(attr
));
652 attr
.ino
= FUSE_ROOT_ID
;
654 return fuse_iget(sb
, 1, 0, &attr
, 0, 0);
657 struct fuse_inode_handle
{
662 static struct dentry
*fuse_get_dentry(struct super_block
*sb
,
663 struct fuse_inode_handle
*handle
)
665 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
667 struct dentry
*entry
;
670 if (handle
->nodeid
== 0)
673 inode
= ilookup5(sb
, handle
->nodeid
, fuse_inode_eq
, &handle
->nodeid
);
675 struct fuse_entry_out outarg
;
676 const struct qstr name
= QSTR_INIT(".", 1);
678 if (!fc
->export_support
)
681 err
= fuse_lookup_name(sb
, handle
->nodeid
, &name
, &outarg
,
683 if (err
&& err
!= -ENOENT
)
690 if (get_node_id(inode
) != handle
->nodeid
)
694 if (inode
->i_generation
!= handle
->generation
)
697 entry
= d_obtain_alias(inode
);
698 if (!IS_ERR(entry
) && get_node_id(inode
) != FUSE_ROOT_ID
)
699 fuse_invalidate_entry_cache(entry
);
709 static int fuse_encode_fh(struct inode
*inode
, u32
*fh
, int *max_len
,
710 struct inode
*parent
)
712 int len
= parent
? 6 : 3;
716 if (*max_len
< len
) {
718 return FILEID_INVALID
;
721 nodeid
= get_fuse_inode(inode
)->nodeid
;
722 generation
= inode
->i_generation
;
724 fh
[0] = (u32
)(nodeid
>> 32);
725 fh
[1] = (u32
)(nodeid
& 0xffffffff);
729 nodeid
= get_fuse_inode(parent
)->nodeid
;
730 generation
= parent
->i_generation
;
732 fh
[3] = (u32
)(nodeid
>> 32);
733 fh
[4] = (u32
)(nodeid
& 0xffffffff);
738 return parent
? 0x82 : 0x81;
741 static struct dentry
*fuse_fh_to_dentry(struct super_block
*sb
,
742 struct fid
*fid
, int fh_len
, int fh_type
)
744 struct fuse_inode_handle handle
;
746 if ((fh_type
!= 0x81 && fh_type
!= 0x82) || fh_len
< 3)
749 handle
.nodeid
= (u64
) fid
->raw
[0] << 32;
750 handle
.nodeid
|= (u64
) fid
->raw
[1];
751 handle
.generation
= fid
->raw
[2];
752 return fuse_get_dentry(sb
, &handle
);
755 static struct dentry
*fuse_fh_to_parent(struct super_block
*sb
,
756 struct fid
*fid
, int fh_len
, int fh_type
)
758 struct fuse_inode_handle parent
;
760 if (fh_type
!= 0x82 || fh_len
< 6)
763 parent
.nodeid
= (u64
) fid
->raw
[3] << 32;
764 parent
.nodeid
|= (u64
) fid
->raw
[4];
765 parent
.generation
= fid
->raw
[5];
766 return fuse_get_dentry(sb
, &parent
);
769 static struct dentry
*fuse_get_parent(struct dentry
*child
)
771 struct inode
*child_inode
= d_inode(child
);
772 struct fuse_conn
*fc
= get_fuse_conn(child_inode
);
774 struct dentry
*parent
;
775 struct fuse_entry_out outarg
;
776 const struct qstr name
= QSTR_INIT("..", 2);
779 if (!fc
->export_support
)
780 return ERR_PTR(-ESTALE
);
782 err
= fuse_lookup_name(child_inode
->i_sb
, get_node_id(child_inode
),
783 &name
, &outarg
, &inode
);
786 return ERR_PTR(-ESTALE
);
790 parent
= d_obtain_alias(inode
);
791 if (!IS_ERR(parent
) && get_node_id(inode
) != FUSE_ROOT_ID
)
792 fuse_invalidate_entry_cache(parent
);
797 static const struct export_operations fuse_export_operations
= {
798 .fh_to_dentry
= fuse_fh_to_dentry
,
799 .fh_to_parent
= fuse_fh_to_parent
,
800 .encode_fh
= fuse_encode_fh
,
801 .get_parent
= fuse_get_parent
,
804 static const struct super_operations fuse_super_operations
= {
805 .alloc_inode
= fuse_alloc_inode
,
806 .destroy_inode
= fuse_destroy_inode
,
807 .evict_inode
= fuse_evict_inode
,
808 .write_inode
= fuse_write_inode
,
809 .drop_inode
= generic_delete_inode
,
810 .remount_fs
= fuse_remount_fs
,
811 .put_super
= fuse_put_super
,
812 .umount_begin
= fuse_umount_begin
,
813 .statfs
= fuse_statfs
,
814 .show_options
= fuse_show_options
,
817 static void sanitize_global_limit(unsigned *limit
)
820 *limit
= ((totalram_pages
<< PAGE_SHIFT
) >> 13) /
821 sizeof(struct fuse_req
);
823 if (*limit
>= 1 << 16)
824 *limit
= (1 << 16) - 1;
827 static int set_global_limit(const char *val
, struct kernel_param
*kp
)
831 rv
= param_set_uint(val
, kp
);
835 sanitize_global_limit((unsigned *)kp
->arg
);
840 static void process_init_limits(struct fuse_conn
*fc
, struct fuse_init_out
*arg
)
842 int cap_sys_admin
= capable(CAP_SYS_ADMIN
);
847 sanitize_global_limit(&max_user_bgreq
);
848 sanitize_global_limit(&max_user_congthresh
);
850 if (arg
->max_background
) {
851 fc
->max_background
= arg
->max_background
;
853 if (!cap_sys_admin
&& fc
->max_background
> max_user_bgreq
)
854 fc
->max_background
= max_user_bgreq
;
856 if (arg
->congestion_threshold
) {
857 fc
->congestion_threshold
= arg
->congestion_threshold
;
859 if (!cap_sys_admin
&&
860 fc
->congestion_threshold
> max_user_congthresh
)
861 fc
->congestion_threshold
= max_user_congthresh
;
865 static void process_init_reply(struct fuse_conn
*fc
, struct fuse_req
*req
)
867 struct fuse_init_out
*arg
= &req
->misc
.init_out
;
869 if (req
->out
.h
.error
|| arg
->major
!= FUSE_KERNEL_VERSION
)
872 unsigned long ra_pages
;
874 process_init_limits(fc
, arg
);
876 if (arg
->minor
>= 6) {
877 ra_pages
= arg
->max_readahead
/ PAGE_SIZE
;
878 if (arg
->flags
& FUSE_ASYNC_READ
)
880 if (!(arg
->flags
& FUSE_POSIX_LOCKS
))
882 if (arg
->minor
>= 17) {
883 if (!(arg
->flags
& FUSE_FLOCK_LOCKS
))
886 if (!(arg
->flags
& FUSE_POSIX_LOCKS
))
889 if (arg
->flags
& FUSE_ATOMIC_O_TRUNC
)
890 fc
->atomic_o_trunc
= 1;
891 if (arg
->minor
>= 9) {
892 /* LOOKUP has dependency on proto version */
893 if (arg
->flags
& FUSE_EXPORT_SUPPORT
)
894 fc
->export_support
= 1;
896 if (arg
->flags
& FUSE_BIG_WRITES
)
898 if (arg
->flags
& FUSE_DONT_MASK
)
900 if (arg
->flags
& FUSE_AUTO_INVAL_DATA
)
901 fc
->auto_inval_data
= 1;
902 if (arg
->flags
& FUSE_DO_READDIRPLUS
) {
903 fc
->do_readdirplus
= 1;
904 if (arg
->flags
& FUSE_READDIRPLUS_AUTO
)
905 fc
->readdirplus_auto
= 1;
907 if (arg
->flags
& FUSE_ASYNC_DIO
)
909 if (arg
->flags
& FUSE_WRITEBACK_CACHE
)
910 fc
->writeback_cache
= 1;
911 if (arg
->flags
& FUSE_PARALLEL_DIROPS
)
912 fc
->parallel_dirops
= 1;
913 if (arg
->time_gran
&& arg
->time_gran
<= 1000000000)
914 fc
->sb
->s_time_gran
= arg
->time_gran
;
916 ra_pages
= fc
->max_read
/ PAGE_SIZE
;
921 fc
->bdi
.ra_pages
= min(fc
->bdi
.ra_pages
, ra_pages
);
922 fc
->minor
= arg
->minor
;
923 fc
->max_write
= arg
->minor
< 5 ? 4096 : arg
->max_write
;
924 fc
->max_write
= max_t(unsigned, 4096, fc
->max_write
);
927 fuse_set_initialized(fc
);
928 wake_up_all(&fc
->blocked_waitq
);
931 static void fuse_send_init(struct fuse_conn
*fc
, struct fuse_req
*req
)
933 struct fuse_init_in
*arg
= &req
->misc
.init_in
;
935 arg
->major
= FUSE_KERNEL_VERSION
;
936 arg
->minor
= FUSE_KERNEL_MINOR_VERSION
;
937 arg
->max_readahead
= fc
->bdi
.ra_pages
* PAGE_SIZE
;
938 arg
->flags
|= FUSE_ASYNC_READ
| FUSE_POSIX_LOCKS
| FUSE_ATOMIC_O_TRUNC
|
939 FUSE_EXPORT_SUPPORT
| FUSE_BIG_WRITES
| FUSE_DONT_MASK
|
940 FUSE_SPLICE_WRITE
| FUSE_SPLICE_MOVE
| FUSE_SPLICE_READ
|
941 FUSE_FLOCK_LOCKS
| FUSE_HAS_IOCTL_DIR
| FUSE_AUTO_INVAL_DATA
|
942 FUSE_DO_READDIRPLUS
| FUSE_READDIRPLUS_AUTO
| FUSE_ASYNC_DIO
|
943 FUSE_WRITEBACK_CACHE
| FUSE_NO_OPEN_SUPPORT
|
944 FUSE_PARALLEL_DIROPS
;
945 req
->in
.h
.opcode
= FUSE_INIT
;
947 req
->in
.args
[0].size
= sizeof(*arg
);
948 req
->in
.args
[0].value
= arg
;
949 req
->out
.numargs
= 1;
950 /* Variable length argument used for backward compatibility
951 with interface version < 7.5. Rest of init_out is zeroed
952 by do_get_request(), so a short reply is not a problem */
954 req
->out
.args
[0].size
= sizeof(struct fuse_init_out
);
955 req
->out
.args
[0].value
= &req
->misc
.init_out
;
956 req
->end
= process_init_reply
;
957 fuse_request_send_background(fc
, req
);
960 static void fuse_free_conn(struct fuse_conn
*fc
)
962 WARN_ON(!list_empty(&fc
->devices
));
966 static int fuse_bdi_init(struct fuse_conn
*fc
, struct super_block
*sb
)
970 fc
->bdi
.name
= "fuse";
971 fc
->bdi
.ra_pages
= (VM_MAX_READAHEAD
* 1024) / PAGE_SIZE
;
972 /* fuse does it's own writeback accounting */
973 fc
->bdi
.capabilities
= BDI_CAP_NO_ACCT_WB
| BDI_CAP_STRICTLIMIT
;
975 err
= bdi_init(&fc
->bdi
);
979 fc
->bdi_initialized
= 1;
982 err
= bdi_register(&fc
->bdi
, NULL
, "%u:%u-fuseblk",
983 MAJOR(fc
->dev
), MINOR(fc
->dev
));
985 err
= bdi_register_dev(&fc
->bdi
, fc
->dev
);
992 * For a single fuse filesystem use max 1% of dirty +
993 * writeback threshold.
995 * This gives about 1M of write buffer for memory maps on a
996 * machine with 1G and 10% dirty_ratio, which should be more
999 * Privileged users can raise it by writing to
1001 * /sys/class/bdi/<bdi>/max_ratio
1003 bdi_set_max_ratio(&fc
->bdi
, 1);
1008 struct fuse_dev
*fuse_dev_alloc(struct fuse_conn
*fc
)
1010 struct fuse_dev
*fud
;
1012 fud
= kzalloc(sizeof(struct fuse_dev
), GFP_KERNEL
);
1014 fud
->fc
= fuse_conn_get(fc
);
1015 fuse_pqueue_init(&fud
->pq
);
1017 spin_lock(&fc
->lock
);
1018 list_add_tail(&fud
->entry
, &fc
->devices
);
1019 spin_unlock(&fc
->lock
);
1024 EXPORT_SYMBOL_GPL(fuse_dev_alloc
);
1026 void fuse_dev_free(struct fuse_dev
*fud
)
1028 struct fuse_conn
*fc
= fud
->fc
;
1031 spin_lock(&fc
->lock
);
1032 list_del(&fud
->entry
);
1033 spin_unlock(&fc
->lock
);
1039 EXPORT_SYMBOL_GPL(fuse_dev_free
);
1041 static int fuse_fill_super(struct super_block
*sb
, void *data
, int silent
)
1043 struct fuse_dev
*fud
;
1044 struct fuse_conn
*fc
;
1046 struct fuse_mount_data d
;
1048 struct dentry
*root_dentry
;
1049 struct fuse_req
*init_req
;
1051 int is_bdev
= sb
->s_bdev
!= NULL
;
1054 if (sb
->s_flags
& MS_MANDLOCK
)
1057 sb
->s_flags
&= ~(MS_NOSEC
| MS_I_VERSION
);
1059 if (!parse_fuse_opt(data
, &d
, is_bdev
))
1065 if (!sb_set_blocksize(sb
, d
.blksize
))
1069 sb
->s_blocksize
= PAGE_SIZE
;
1070 sb
->s_blocksize_bits
= PAGE_SHIFT
;
1072 sb
->s_magic
= FUSE_SUPER_MAGIC
;
1073 sb
->s_op
= &fuse_super_operations
;
1074 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1075 sb
->s_time_gran
= 1;
1076 sb
->s_export_op
= &fuse_export_operations
;
1083 if ((file
->f_op
!= &fuse_dev_operations
) ||
1084 (file
->f_cred
->user_ns
!= &init_user_ns
))
1087 fc
= kmalloc(sizeof(*fc
), GFP_KERNEL
);
1093 fc
->release
= fuse_free_conn
;
1095 fud
= fuse_dev_alloc(fc
);
1099 fc
->dev
= sb
->s_dev
;
1101 err
= fuse_bdi_init(fc
, sb
);
1105 sb
->s_bdi
= &fc
->bdi
;
1107 /* Handle umasking inside the fuse code */
1108 if (sb
->s_flags
& MS_POSIXACL
)
1110 sb
->s_flags
|= MS_POSIXACL
;
1112 fc
->flags
= d
.flags
;
1113 fc
->user_id
= d
.user_id
;
1114 fc
->group_id
= d
.group_id
;
1115 fc
->max_read
= max_t(unsigned, 4096, d
.max_read
);
1117 /* Used by get_root_inode() */
1121 root
= fuse_get_root_inode(sb
, d
.rootmode
);
1122 root_dentry
= d_make_root(root
);
1125 /* only now - we want root dentry with NULL ->d_op */
1126 sb
->s_d_op
= &fuse_dentry_operations
;
1128 init_req
= fuse_request_alloc(0);
1131 __set_bit(FR_BACKGROUND
, &init_req
->flags
);
1134 fc
->destroy_req
= fuse_request_alloc(0);
1135 if (!fc
->destroy_req
)
1136 goto err_free_init_req
;
1139 mutex_lock(&fuse_mutex
);
1141 if (file
->private_data
)
1144 err
= fuse_ctl_add_conn(fc
);
1148 list_add_tail(&fc
->entry
, &fuse_conn_list
);
1149 sb
->s_root
= root_dentry
;
1150 file
->private_data
= fud
;
1151 mutex_unlock(&fuse_mutex
);
1153 * atomic_dec_and_test() in fput() provides the necessary
1154 * memory barrier for file->private_data to be visible on all
1159 fuse_send_init(fc
, init_req
);
1164 mutex_unlock(&fuse_mutex
);
1166 fuse_request_free(init_req
);
1172 fuse_bdi_destroy(fc
);
1180 static struct dentry
*fuse_mount(struct file_system_type
*fs_type
,
1181 int flags
, const char *dev_name
,
1184 return mount_nodev(fs_type
, flags
, raw_data
, fuse_fill_super
);
1187 static void fuse_kill_sb_anon(struct super_block
*sb
)
1189 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
1192 down_write(&fc
->killsb
);
1194 up_write(&fc
->killsb
);
1197 kill_anon_super(sb
);
1200 static struct file_system_type fuse_fs_type
= {
1201 .owner
= THIS_MODULE
,
1203 .fs_flags
= FS_HAS_SUBTYPE
,
1204 .mount
= fuse_mount
,
1205 .kill_sb
= fuse_kill_sb_anon
,
1207 MODULE_ALIAS_FS("fuse");
1210 static struct dentry
*fuse_mount_blk(struct file_system_type
*fs_type
,
1211 int flags
, const char *dev_name
,
1214 return mount_bdev(fs_type
, flags
, dev_name
, raw_data
, fuse_fill_super
);
1217 static void fuse_kill_sb_blk(struct super_block
*sb
)
1219 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
1222 down_write(&fc
->killsb
);
1224 up_write(&fc
->killsb
);
1227 kill_block_super(sb
);
1230 static struct file_system_type fuseblk_fs_type
= {
1231 .owner
= THIS_MODULE
,
1233 .mount
= fuse_mount_blk
,
1234 .kill_sb
= fuse_kill_sb_blk
,
1235 .fs_flags
= FS_REQUIRES_DEV
| FS_HAS_SUBTYPE
,
1237 MODULE_ALIAS_FS("fuseblk");
1239 static inline int register_fuseblk(void)
1241 return register_filesystem(&fuseblk_fs_type
);
1244 static inline void unregister_fuseblk(void)
1246 unregister_filesystem(&fuseblk_fs_type
);
1249 static inline int register_fuseblk(void)
1254 static inline void unregister_fuseblk(void)
1259 static void fuse_inode_init_once(void *foo
)
1261 struct inode
*inode
= foo
;
1263 inode_init_once(inode
);
1266 static int __init
fuse_fs_init(void)
1270 fuse_inode_cachep
= kmem_cache_create("fuse_inode",
1271 sizeof(struct fuse_inode
), 0,
1272 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
,
1273 fuse_inode_init_once
);
1275 if (!fuse_inode_cachep
)
1278 err
= register_fuseblk();
1282 err
= register_filesystem(&fuse_fs_type
);
1289 unregister_fuseblk();
1291 kmem_cache_destroy(fuse_inode_cachep
);
1296 static void fuse_fs_cleanup(void)
1298 unregister_filesystem(&fuse_fs_type
);
1299 unregister_fuseblk();
1302 * Make sure all delayed rcu free inodes are flushed before we
1306 kmem_cache_destroy(fuse_inode_cachep
);
1309 static struct kobject
*fuse_kobj
;
1311 static int fuse_sysfs_init(void)
1315 fuse_kobj
= kobject_create_and_add("fuse", fs_kobj
);
1321 err
= sysfs_create_mount_point(fuse_kobj
, "connections");
1323 goto out_fuse_unregister
;
1327 out_fuse_unregister
:
1328 kobject_put(fuse_kobj
);
1333 static void fuse_sysfs_cleanup(void)
1335 sysfs_remove_mount_point(fuse_kobj
, "connections");
1336 kobject_put(fuse_kobj
);
1339 static int __init
fuse_init(void)
1343 printk(KERN_INFO
"fuse init (API version %i.%i)\n",
1344 FUSE_KERNEL_VERSION
, FUSE_KERNEL_MINOR_VERSION
);
1346 INIT_LIST_HEAD(&fuse_conn_list
);
1347 res
= fuse_fs_init();
1351 res
= fuse_dev_init();
1353 goto err_fs_cleanup
;
1355 res
= fuse_sysfs_init();
1357 goto err_dev_cleanup
;
1359 res
= fuse_ctl_init();
1361 goto err_sysfs_cleanup
;
1363 sanitize_global_limit(&max_user_bgreq
);
1364 sanitize_global_limit(&max_user_congthresh
);
1369 fuse_sysfs_cleanup();
1378 static void __exit
fuse_exit(void)
1380 printk(KERN_DEBUG
"fuse exit\n");
1383 fuse_sysfs_cleanup();
1388 module_init(fuse_init
);
1389 module_exit(fuse_exit
);