4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/f2fs.h>
37 static struct proc_dir_entry
*f2fs_proc_root
;
38 static struct kmem_cache
*f2fs_inode_cachep
;
39 static struct kset
*f2fs_kset
;
43 Opt_disable_roll_forward
,
51 Opt_disable_ext_identify
,
57 static match_table_t f2fs_tokens
= {
58 {Opt_gc_background
, "background_gc=%s"},
59 {Opt_disable_roll_forward
, "disable_roll_forward"},
60 {Opt_discard
, "discard"},
61 {Opt_noheap
, "no_heap"},
62 {Opt_user_xattr
, "user_xattr"},
63 {Opt_nouser_xattr
, "nouser_xattr"},
66 {Opt_active_logs
, "active_logs=%u"},
67 {Opt_disable_ext_identify
, "disable_ext_identify"},
68 {Opt_inline_xattr
, "inline_xattr"},
69 {Opt_inline_data
, "inline_data"},
73 /* Sysfs support for f2fs */
75 GC_THREAD
, /* struct f2fs_gc_thread */
76 SM_INFO
, /* struct f2fs_sm_info */
77 F2FS_SBI
, /* struct f2fs_sb_info */
81 struct attribute attr
;
82 ssize_t (*show
)(struct f2fs_attr
*, struct f2fs_sb_info
*, char *);
83 ssize_t (*store
)(struct f2fs_attr
*, struct f2fs_sb_info
*,
84 const char *, size_t);
89 static unsigned char *__struct_ptr(struct f2fs_sb_info
*sbi
, int struct_type
)
91 if (struct_type
== GC_THREAD
)
92 return (unsigned char *)sbi
->gc_thread
;
93 else if (struct_type
== SM_INFO
)
94 return (unsigned char *)SM_I(sbi
);
95 else if (struct_type
== F2FS_SBI
)
96 return (unsigned char *)sbi
;
100 static ssize_t
f2fs_sbi_show(struct f2fs_attr
*a
,
101 struct f2fs_sb_info
*sbi
, char *buf
)
103 unsigned char *ptr
= NULL
;
106 ptr
= __struct_ptr(sbi
, a
->struct_type
);
110 ui
= (unsigned int *)(ptr
+ a
->offset
);
112 return snprintf(buf
, PAGE_SIZE
, "%u\n", *ui
);
115 static ssize_t
f2fs_sbi_store(struct f2fs_attr
*a
,
116 struct f2fs_sb_info
*sbi
,
117 const char *buf
, size_t count
)
124 ptr
= __struct_ptr(sbi
, a
->struct_type
);
128 ui
= (unsigned int *)(ptr
+ a
->offset
);
130 ret
= kstrtoul(skip_spaces(buf
), 0, &t
);
137 static ssize_t
f2fs_attr_show(struct kobject
*kobj
,
138 struct attribute
*attr
, char *buf
)
140 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
142 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
144 return a
->show
? a
->show(a
, sbi
, buf
) : 0;
147 static ssize_t
f2fs_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
148 const char *buf
, size_t len
)
150 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
152 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
154 return a
->store
? a
->store(a
, sbi
, buf
, len
) : 0;
157 static void f2fs_sb_release(struct kobject
*kobj
)
159 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
161 complete(&sbi
->s_kobj_unregister
);
164 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
165 static struct f2fs_attr f2fs_attr_##_name = { \
166 .attr = {.name = __stringify(_name), .mode = _mode }, \
169 .struct_type = _struct_type, \
173 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
174 F2FS_ATTR_OFFSET(struct_type, name, 0644, \
175 f2fs_sbi_show, f2fs_sbi_store, \
176 offsetof(struct struct_name, elname))
178 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_min_sleep_time
, min_sleep_time
);
179 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_max_sleep_time
, max_sleep_time
);
180 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_no_gc_sleep_time
, no_gc_sleep_time
);
181 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_idle
, gc_idle
);
182 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, reclaim_segments
, rec_prefree_segments
);
183 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, max_small_discards
, max_discards
);
184 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, ipu_policy
, ipu_policy
);
185 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_ipu_util
, min_ipu_util
);
186 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, max_victim_search
, max_victim_search
);
188 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
189 static struct attribute
*f2fs_attrs
[] = {
190 ATTR_LIST(gc_min_sleep_time
),
191 ATTR_LIST(gc_max_sleep_time
),
192 ATTR_LIST(gc_no_gc_sleep_time
),
194 ATTR_LIST(reclaim_segments
),
195 ATTR_LIST(max_small_discards
),
196 ATTR_LIST(ipu_policy
),
197 ATTR_LIST(min_ipu_util
),
198 ATTR_LIST(max_victim_search
),
202 static const struct sysfs_ops f2fs_attr_ops
= {
203 .show
= f2fs_attr_show
,
204 .store
= f2fs_attr_store
,
207 static struct kobj_type f2fs_ktype
= {
208 .default_attrs
= f2fs_attrs
,
209 .sysfs_ops
= &f2fs_attr_ops
,
210 .release
= f2fs_sb_release
,
213 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
215 struct va_format vaf
;
221 printk("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
225 static void init_once(void *foo
)
227 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
229 inode_init_once(&fi
->vfs_inode
);
232 static int parse_options(struct super_block
*sb
, char *options
)
234 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
235 substring_t args
[MAX_OPT_ARGS
];
242 while ((p
= strsep(&options
, ",")) != NULL
) {
247 * Initialize args struct so we know whether arg was
248 * found; some options take optional arguments.
250 args
[0].to
= args
[0].from
= NULL
;
251 token
= match_token(p
, f2fs_tokens
, args
);
254 case Opt_gc_background
:
255 name
= match_strdup(&args
[0]);
259 if (!strncmp(name
, "on", 2))
261 else if (!strncmp(name
, "off", 3))
262 clear_opt(sbi
, BG_GC
);
269 case Opt_disable_roll_forward
:
270 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
273 set_opt(sbi
, DISCARD
);
276 set_opt(sbi
, NOHEAP
);
278 #ifdef CONFIG_F2FS_FS_XATTR
280 set_opt(sbi
, XATTR_USER
);
282 case Opt_nouser_xattr
:
283 clear_opt(sbi
, XATTR_USER
);
285 case Opt_inline_xattr
:
286 set_opt(sbi
, INLINE_XATTR
);
290 f2fs_msg(sb
, KERN_INFO
,
291 "user_xattr options not supported");
293 case Opt_nouser_xattr
:
294 f2fs_msg(sb
, KERN_INFO
,
295 "nouser_xattr options not supported");
297 case Opt_inline_xattr
:
298 f2fs_msg(sb
, KERN_INFO
,
299 "inline_xattr options not supported");
302 #ifdef CONFIG_F2FS_FS_POSIX_ACL
304 set_opt(sbi
, POSIX_ACL
);
307 clear_opt(sbi
, POSIX_ACL
);
311 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
314 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
317 case Opt_active_logs
:
318 if (args
->from
&& match_int(args
, &arg
))
320 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
322 sbi
->active_logs
= arg
;
324 case Opt_disable_ext_identify
:
325 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
327 case Opt_inline_data
:
328 set_opt(sbi
, INLINE_DATA
);
331 f2fs_msg(sb
, KERN_ERR
,
332 "Unrecognized mount option \"%s\" or missing value",
340 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
342 struct f2fs_inode_info
*fi
;
344 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
348 init_once((void *) fi
);
350 /* Initialize f2fs-specific inode info */
351 fi
->vfs_inode
.i_version
= 1;
352 atomic_set(&fi
->dirty_dents
, 0);
353 fi
->i_current_depth
= 1;
355 rwlock_init(&fi
->ext
.ext_lock
);
357 set_inode_flag(fi
, FI_NEW_INODE
);
359 if (test_opt(F2FS_SB(sb
), INLINE_XATTR
))
360 set_inode_flag(fi
, FI_INLINE_XATTR
);
362 return &fi
->vfs_inode
;
365 static int f2fs_drop_inode(struct inode
*inode
)
368 * This is to avoid a deadlock condition like below.
369 * writeback_single_inode(inode)
370 * - f2fs_write_data_page
371 * - f2fs_gc -> iput -> evict
372 * - inode_wait_for_writeback(inode)
374 if (!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)
376 return generic_drop_inode(inode
);
380 * f2fs_dirty_inode() is called from __mark_inode_dirty()
382 * We should call set_dirty_inode to write the dirty inode through write_inode.
384 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
386 set_inode_flag(F2FS_I(inode
), FI_DIRTY_INODE
);
389 static void f2fs_i_callback(struct rcu_head
*head
)
391 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
392 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
395 static void f2fs_destroy_inode(struct inode
*inode
)
397 call_rcu(&inode
->i_rcu
, f2fs_i_callback
);
400 static void f2fs_put_super(struct super_block
*sb
)
402 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
405 remove_proc_entry("segment_info", sbi
->s_proc
);
406 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
408 kobject_del(&sbi
->s_kobj
);
410 f2fs_destroy_stats(sbi
);
413 /* We don't need to do checkpoint when it's clean */
414 if (sbi
->s_dirty
&& get_pages(sbi
, F2FS_DIRTY_NODES
))
415 write_checkpoint(sbi
, true);
417 iput(sbi
->node_inode
);
418 iput(sbi
->meta_inode
);
420 /* destroy f2fs internal modules */
421 destroy_node_manager(sbi
);
422 destroy_segment_manager(sbi
);
425 kobject_put(&sbi
->s_kobj
);
426 wait_for_completion(&sbi
->s_kobj_unregister
);
428 sb
->s_fs_info
= NULL
;
429 brelse(sbi
->raw_super_buf
);
433 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
435 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
437 trace_f2fs_sync_fs(sb
, sync
);
439 if (!sbi
->s_dirty
&& !get_pages(sbi
, F2FS_DIRTY_NODES
))
443 mutex_lock(&sbi
->gc_mutex
);
444 write_checkpoint(sbi
, false);
445 mutex_unlock(&sbi
->gc_mutex
);
447 f2fs_balance_fs(sbi
);
453 static int f2fs_freeze(struct super_block
*sb
)
457 if (f2fs_readonly(sb
))
460 err
= f2fs_sync_fs(sb
, 1);
464 static int f2fs_unfreeze(struct super_block
*sb
)
469 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
471 struct super_block
*sb
= dentry
->d_sb
;
472 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
473 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
474 block_t total_count
, user_block_count
, start_count
, ovp_count
;
476 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
477 user_block_count
= sbi
->user_block_count
;
478 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
479 ovp_count
= SM_I(sbi
)->ovp_segments
<< sbi
->log_blocks_per_seg
;
480 buf
->f_type
= F2FS_SUPER_MAGIC
;
481 buf
->f_bsize
= sbi
->blocksize
;
483 buf
->f_blocks
= total_count
- start_count
;
484 buf
->f_bfree
= buf
->f_blocks
- valid_user_blocks(sbi
) - ovp_count
;
485 buf
->f_bavail
= user_block_count
- valid_user_blocks(sbi
);
487 buf
->f_files
= sbi
->total_node_count
;
488 buf
->f_ffree
= sbi
->total_node_count
- valid_inode_count(sbi
);
490 buf
->f_namelen
= F2FS_NAME_LEN
;
491 buf
->f_fsid
.val
[0] = (u32
)id
;
492 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
497 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
499 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
501 if (!(root
->d_sb
->s_flags
& MS_RDONLY
) && test_opt(sbi
, BG_GC
))
502 seq_printf(seq
, ",background_gc=%s", "on");
504 seq_printf(seq
, ",background_gc=%s", "off");
505 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
506 seq_puts(seq
, ",disable_roll_forward");
507 if (test_opt(sbi
, DISCARD
))
508 seq_puts(seq
, ",discard");
509 if (test_opt(sbi
, NOHEAP
))
510 seq_puts(seq
, ",no_heap_alloc");
511 #ifdef CONFIG_F2FS_FS_XATTR
512 if (test_opt(sbi
, XATTR_USER
))
513 seq_puts(seq
, ",user_xattr");
515 seq_puts(seq
, ",nouser_xattr");
516 if (test_opt(sbi
, INLINE_XATTR
))
517 seq_puts(seq
, ",inline_xattr");
519 #ifdef CONFIG_F2FS_FS_POSIX_ACL
520 if (test_opt(sbi
, POSIX_ACL
))
521 seq_puts(seq
, ",acl");
523 seq_puts(seq
, ",noacl");
525 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
526 seq_puts(seq
, ",disable_ext_identify");
527 if (test_opt(sbi
, INLINE_DATA
))
528 seq_puts(seq
, ",inline_data");
529 seq_printf(seq
, ",active_logs=%u", sbi
->active_logs
);
534 static int segment_info_seq_show(struct seq_file
*seq
, void *offset
)
536 struct super_block
*sb
= seq
->private;
537 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
538 unsigned int total_segs
=
539 le32_to_cpu(sbi
->raw_super
->segment_count_main
);
542 for (i
= 0; i
< total_segs
; i
++) {
543 seq_printf(seq
, "%u", get_valid_blocks(sbi
, i
, 1));
544 if (i
!= 0 && (i
% 10) == 0)
552 static int segment_info_open_fs(struct inode
*inode
, struct file
*file
)
554 return single_open(file
, segment_info_seq_show
, PDE_DATA(inode
));
557 static const struct file_operations f2fs_seq_segment_info_fops
= {
558 .owner
= THIS_MODULE
,
559 .open
= segment_info_open_fs
,
562 .release
= single_release
,
565 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
567 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
568 struct f2fs_mount_info org_mount_opt
;
569 int err
, active_logs
;
572 * Save the old mount options in case we
573 * need to restore them.
575 org_mount_opt
= sbi
->mount_opt
;
576 active_logs
= sbi
->active_logs
;
578 /* parse mount options */
579 err
= parse_options(sb
, data
);
584 * Previous and new state of filesystem is RO,
585 * so no point in checking GC conditions.
587 if ((sb
->s_flags
& MS_RDONLY
) && (*flags
& MS_RDONLY
))
591 * We stop the GC thread if FS is mounted as RO
592 * or if background_gc = off is passed in mount
593 * option. Also sync the filesystem.
595 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
596 if (sbi
->gc_thread
) {
600 } else if (test_opt(sbi
, BG_GC
) && !sbi
->gc_thread
) {
601 err
= start_gc_thread(sbi
);
606 /* Update the POSIXACL Flag */
607 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
608 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
612 sbi
->mount_opt
= org_mount_opt
;
613 sbi
->active_logs
= active_logs
;
617 static struct super_operations f2fs_sops
= {
618 .alloc_inode
= f2fs_alloc_inode
,
619 .drop_inode
= f2fs_drop_inode
,
620 .destroy_inode
= f2fs_destroy_inode
,
621 .write_inode
= f2fs_write_inode
,
622 .dirty_inode
= f2fs_dirty_inode
,
623 .show_options
= f2fs_show_options
,
624 .evict_inode
= f2fs_evict_inode
,
625 .put_super
= f2fs_put_super
,
626 .sync_fs
= f2fs_sync_fs
,
627 .freeze_fs
= f2fs_freeze
,
628 .unfreeze_fs
= f2fs_unfreeze
,
629 .statfs
= f2fs_statfs
,
630 .remount_fs
= f2fs_remount
,
633 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
634 u64 ino
, u32 generation
)
636 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
639 if (unlikely(ino
< F2FS_ROOT_INO(sbi
)))
640 return ERR_PTR(-ESTALE
);
643 * f2fs_iget isn't quite right if the inode is currently unallocated!
644 * However f2fs_iget currently does appropriate checks to handle stale
645 * inodes so everything is OK.
647 inode
= f2fs_iget(sb
, ino
);
649 return ERR_CAST(inode
);
650 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
651 /* we didn't find the right inode.. */
653 return ERR_PTR(-ESTALE
);
658 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
659 int fh_len
, int fh_type
)
661 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
665 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
666 int fh_len
, int fh_type
)
668 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
672 static const struct export_operations f2fs_export_ops
= {
673 .fh_to_dentry
= f2fs_fh_to_dentry
,
674 .fh_to_parent
= f2fs_fh_to_parent
,
675 .get_parent
= f2fs_get_parent
,
678 static loff_t
max_file_size(unsigned bits
)
680 loff_t result
= (DEF_ADDRS_PER_INODE
- F2FS_INLINE_XATTR_ADDRS
);
681 loff_t leaf_count
= ADDRS_PER_BLOCK
;
683 /* two direct node blocks */
684 result
+= (leaf_count
* 2);
686 /* two indirect node blocks */
687 leaf_count
*= NIDS_PER_BLOCK
;
688 result
+= (leaf_count
* 2);
690 /* one double indirect node block */
691 leaf_count
*= NIDS_PER_BLOCK
;
692 result
+= leaf_count
;
698 static int sanity_check_raw_super(struct super_block
*sb
,
699 struct f2fs_super_block
*raw_super
)
701 unsigned int blocksize
;
703 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
704 f2fs_msg(sb
, KERN_INFO
,
705 "Magic Mismatch, valid(0x%x) - read(0x%x)",
706 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
710 /* Currently, support only 4KB page cache size */
711 if (F2FS_BLKSIZE
!= PAGE_CACHE_SIZE
) {
712 f2fs_msg(sb
, KERN_INFO
,
713 "Invalid page_cache_size (%lu), supports only 4KB\n",
718 /* Currently, support only 4KB block size */
719 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
720 if (blocksize
!= F2FS_BLKSIZE
) {
721 f2fs_msg(sb
, KERN_INFO
,
722 "Invalid blocksize (%u), supports only 4KB\n",
727 if (le32_to_cpu(raw_super
->log_sectorsize
) !=
728 F2FS_LOG_SECTOR_SIZE
) {
729 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize");
732 if (le32_to_cpu(raw_super
->log_sectors_per_block
) !=
733 F2FS_LOG_SECTORS_PER_BLOCK
) {
734 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectors per block");
740 static int sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
742 unsigned int total
, fsmeta
;
743 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
744 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
746 total
= le32_to_cpu(raw_super
->segment_count
);
747 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
748 fsmeta
+= le32_to_cpu(raw_super
->segment_count_sit
);
749 fsmeta
+= le32_to_cpu(raw_super
->segment_count_nat
);
750 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
751 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
753 if (unlikely(fsmeta
>= total
))
756 if (unlikely(is_set_ckpt_flags(ckpt
, CP_ERROR_FLAG
))) {
757 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
763 static void init_sb_info(struct f2fs_sb_info
*sbi
)
765 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
768 sbi
->log_sectors_per_block
=
769 le32_to_cpu(raw_super
->log_sectors_per_block
);
770 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
771 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
772 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
773 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
774 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
775 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
776 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
777 sbi
->total_node_count
=
778 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
779 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
780 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
781 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
782 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
783 sbi
->cur_victim_sec
= NULL_SECNO
;
784 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
786 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
787 atomic_set(&sbi
->nr_pages
[i
], 0);
791 * Read f2fs raw super block.
792 * Because we have two copies of super block, so read the first one at first,
793 * if the first one is invalid, move to read the second one.
795 static int read_raw_super_block(struct super_block
*sb
,
796 struct f2fs_super_block
**raw_super
,
797 struct buffer_head
**raw_super_buf
)
802 *raw_super_buf
= sb_bread(sb
, block
);
803 if (!*raw_super_buf
) {
804 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
814 *raw_super
= (struct f2fs_super_block
*)
815 ((char *)(*raw_super_buf
)->b_data
+ F2FS_SUPER_OFFSET
);
817 /* sanity checking of raw super */
818 if (sanity_check_raw_super(sb
, *raw_super
)) {
819 brelse(*raw_super_buf
);
820 f2fs_msg(sb
, KERN_ERR
,
821 "Can't find valid F2FS filesystem in %dth superblock",
834 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
836 struct f2fs_sb_info
*sbi
;
837 struct f2fs_super_block
*raw_super
;
838 struct buffer_head
*raw_super_buf
;
843 /* allocate memory for f2fs-specific super block info */
844 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
848 /* set a block size */
849 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
850 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
854 err
= read_raw_super_block(sb
, &raw_super
, &raw_super_buf
);
859 /* init some FS parameters */
860 sbi
->active_logs
= NR_CURSEG_TYPE
;
864 #ifdef CONFIG_F2FS_FS_XATTR
865 set_opt(sbi
, XATTR_USER
);
867 #ifdef CONFIG_F2FS_FS_POSIX_ACL
868 set_opt(sbi
, POSIX_ACL
);
870 /* parse mount options */
871 err
= parse_options(sb
, (char *)data
);
875 sb
->s_maxbytes
= max_file_size(le32_to_cpu(raw_super
->log_blocksize
));
876 sb
->s_max_links
= F2FS_LINK_MAX
;
877 get_random_bytes(&sbi
->s_next_generation
, sizeof(u32
));
879 sb
->s_op
= &f2fs_sops
;
880 sb
->s_xattr
= f2fs_xattr_handlers
;
881 sb
->s_export_op
= &f2fs_export_ops
;
882 sb
->s_magic
= F2FS_SUPER_MAGIC
;
884 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
885 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
886 memcpy(sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
888 /* init f2fs-specific super block info */
890 sbi
->raw_super
= raw_super
;
891 sbi
->raw_super_buf
= raw_super_buf
;
892 mutex_init(&sbi
->gc_mutex
);
893 mutex_init(&sbi
->writepages
);
894 mutex_init(&sbi
->cp_mutex
);
895 mutex_init(&sbi
->node_write
);
896 sbi
->por_doing
= false;
897 spin_lock_init(&sbi
->stat_lock
);
899 mutex_init(&sbi
->read_io
.io_mutex
);
900 sbi
->read_io
.sbi
= sbi
;
901 sbi
->read_io
.bio
= NULL
;
902 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
903 mutex_init(&sbi
->write_io
[i
].io_mutex
);
904 sbi
->write_io
[i
].sbi
= sbi
;
905 sbi
->write_io
[i
].bio
= NULL
;
908 init_rwsem(&sbi
->cp_rwsem
);
909 init_waitqueue_head(&sbi
->cp_wait
);
912 /* get an inode for meta space */
913 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
914 if (IS_ERR(sbi
->meta_inode
)) {
915 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
916 err
= PTR_ERR(sbi
->meta_inode
);
920 err
= get_valid_checkpoint(sbi
);
922 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
923 goto free_meta_inode
;
926 /* sanity checking of checkpoint */
928 if (sanity_check_ckpt(sbi
)) {
929 f2fs_msg(sb
, KERN_ERR
, "Invalid F2FS checkpoint");
933 sbi
->total_valid_node_count
=
934 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
935 sbi
->total_valid_inode_count
=
936 le32_to_cpu(sbi
->ckpt
->valid_inode_count
);
937 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
938 sbi
->total_valid_block_count
=
939 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
940 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
941 sbi
->alloc_valid_block_count
= 0;
942 INIT_LIST_HEAD(&sbi
->dir_inode_list
);
943 spin_lock_init(&sbi
->dir_inode_lock
);
945 init_orphan_info(sbi
);
947 /* setup f2fs internal modules */
948 err
= build_segment_manager(sbi
);
950 f2fs_msg(sb
, KERN_ERR
,
951 "Failed to initialize F2FS segment manager");
954 err
= build_node_manager(sbi
);
956 f2fs_msg(sb
, KERN_ERR
,
957 "Failed to initialize F2FS node manager");
961 build_gc_manager(sbi
);
963 /* get an inode for node space */
964 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
965 if (IS_ERR(sbi
->node_inode
)) {
966 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
967 err
= PTR_ERR(sbi
->node_inode
);
971 /* if there are nt orphan nodes free them */
972 recover_orphan_inodes(sbi
);
974 /* read root inode and dentry */
975 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
977 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
979 goto free_node_inode
;
981 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
983 goto free_root_inode
;
986 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
989 goto free_root_inode
;
992 /* recover fsynced data */
993 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
994 err
= recover_fsync_data(sbi
);
996 f2fs_msg(sb
, KERN_ERR
,
997 "Cannot recover all fsync data errno=%ld", err
);
1001 * If filesystem is not mounted as read-only then
1002 * do start the gc_thread.
1004 if (!(sb
->s_flags
& MS_RDONLY
)) {
1005 /* After POR, we can run background GC thread.*/
1006 err
= start_gc_thread(sbi
);
1011 err
= f2fs_build_stats(sbi
);
1016 sbi
->s_proc
= proc_mkdir(sb
->s_id
, f2fs_proc_root
);
1019 proc_create_data("segment_info", S_IRUGO
, sbi
->s_proc
,
1020 &f2fs_seq_segment_info_fops
, sb
);
1022 if (test_opt(sbi
, DISCARD
)) {
1023 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1024 if (!blk_queue_discard(q
))
1025 f2fs_msg(sb
, KERN_WARNING
,
1026 "mounting with \"discard\" option, but "
1027 "the device does not support discard");
1030 sbi
->s_kobj
.kset
= f2fs_kset
;
1031 init_completion(&sbi
->s_kobj_unregister
);
1032 err
= kobject_init_and_add(&sbi
->s_kobj
, &f2fs_ktype
, NULL
,
1040 remove_proc_entry("segment_info", sbi
->s_proc
);
1041 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
1043 f2fs_destroy_stats(sbi
);
1045 stop_gc_thread(sbi
);
1050 iput(sbi
->node_inode
);
1052 destroy_node_manager(sbi
);
1054 destroy_segment_manager(sbi
);
1058 make_bad_inode(sbi
->meta_inode
);
1059 iput(sbi
->meta_inode
);
1061 brelse(raw_super_buf
);
1067 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
1068 const char *dev_name
, void *data
)
1070 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
1073 static struct file_system_type f2fs_fs_type
= {
1074 .owner
= THIS_MODULE
,
1076 .mount
= f2fs_mount
,
1077 .kill_sb
= kill_block_super
,
1078 .fs_flags
= FS_REQUIRES_DEV
,
1080 MODULE_ALIAS_FS("f2fs");
1082 static int __init
init_inodecache(void)
1084 f2fs_inode_cachep
= f2fs_kmem_cache_create("f2fs_inode_cache",
1085 sizeof(struct f2fs_inode_info
), NULL
);
1086 if (!f2fs_inode_cachep
)
1091 static void destroy_inodecache(void)
1094 * Make sure all delayed rcu free inodes are flushed before we
1098 kmem_cache_destroy(f2fs_inode_cachep
);
1101 static int __init
init_f2fs_fs(void)
1105 err
= init_inodecache();
1108 err
= create_node_manager_caches();
1110 goto free_inodecache
;
1111 err
= create_segment_manager_caches();
1113 goto free_node_manager_caches
;
1114 err
= create_gc_caches();
1116 goto free_segment_manager_caches
;
1117 err
= create_checkpoint_caches();
1119 goto free_gc_caches
;
1120 f2fs_kset
= kset_create_and_add("f2fs", NULL
, fs_kobj
);
1123 goto free_checkpoint_caches
;
1125 err
= register_filesystem(&f2fs_fs_type
);
1128 f2fs_create_root_stats();
1129 f2fs_proc_root
= proc_mkdir("fs/f2fs", NULL
);
1133 kset_unregister(f2fs_kset
);
1134 free_checkpoint_caches
:
1135 destroy_checkpoint_caches();
1137 destroy_gc_caches();
1138 free_segment_manager_caches
:
1139 destroy_segment_manager_caches();
1140 free_node_manager_caches
:
1141 destroy_node_manager_caches();
1143 destroy_inodecache();
1148 static void __exit
exit_f2fs_fs(void)
1150 remove_proc_entry("fs/f2fs", NULL
);
1151 f2fs_destroy_root_stats();
1152 unregister_filesystem(&f2fs_fs_type
);
1153 destroy_checkpoint_caches();
1154 destroy_gc_caches();
1155 destroy_segment_manager_caches();
1156 destroy_node_manager_caches();
1157 destroy_inodecache();
1158 kset_unregister(f2fs_kset
);
1161 module_init(init_f2fs_fs
)
1162 module_exit(exit_f2fs_fs
)
1164 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
1165 MODULE_DESCRIPTION("Flash Friendly File System");
1166 MODULE_LICENSE("GPL");