4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
38 static struct proc_dir_entry
*f2fs_proc_root
;
39 static struct kmem_cache
*f2fs_inode_cachep
;
40 static struct kset
*f2fs_kset
;
42 /* f2fs-wide shrinker description */
43 static struct shrinker f2fs_shrinker_info
= {
44 .scan_objects
= f2fs_shrink_scan
,
45 .count_objects
= f2fs_shrink_count
,
46 .seeks
= DEFAULT_SEEKS
,
51 Opt_disable_roll_forward
,
60 Opt_disable_ext_identify
,
73 static match_table_t f2fs_tokens
= {
74 {Opt_gc_background
, "background_gc=%s"},
75 {Opt_disable_roll_forward
, "disable_roll_forward"},
76 {Opt_norecovery
, "norecovery"},
77 {Opt_discard
, "discard"},
78 {Opt_noheap
, "no_heap"},
79 {Opt_user_xattr
, "user_xattr"},
80 {Opt_nouser_xattr
, "nouser_xattr"},
83 {Opt_active_logs
, "active_logs=%u"},
84 {Opt_disable_ext_identify
, "disable_ext_identify"},
85 {Opt_inline_xattr
, "inline_xattr"},
86 {Opt_inline_data
, "inline_data"},
87 {Opt_inline_dentry
, "inline_dentry"},
88 {Opt_flush_merge
, "flush_merge"},
89 {Opt_nobarrier
, "nobarrier"},
90 {Opt_fastboot
, "fastboot"},
91 {Opt_extent_cache
, "extent_cache"},
92 {Opt_noextent_cache
, "noextent_cache"},
93 {Opt_noinline_data
, "noinline_data"},
97 /* Sysfs support for f2fs */
99 GC_THREAD
, /* struct f2fs_gc_thread */
100 SM_INFO
, /* struct f2fs_sm_info */
101 NM_INFO
, /* struct f2fs_nm_info */
102 F2FS_SBI
, /* struct f2fs_sb_info */
106 struct attribute attr
;
107 ssize_t (*show
)(struct f2fs_attr
*, struct f2fs_sb_info
*, char *);
108 ssize_t (*store
)(struct f2fs_attr
*, struct f2fs_sb_info
*,
109 const char *, size_t);
114 static unsigned char *__struct_ptr(struct f2fs_sb_info
*sbi
, int struct_type
)
116 if (struct_type
== GC_THREAD
)
117 return (unsigned char *)sbi
->gc_thread
;
118 else if (struct_type
== SM_INFO
)
119 return (unsigned char *)SM_I(sbi
);
120 else if (struct_type
== NM_INFO
)
121 return (unsigned char *)NM_I(sbi
);
122 else if (struct_type
== F2FS_SBI
)
123 return (unsigned char *)sbi
;
127 static ssize_t
f2fs_sbi_show(struct f2fs_attr
*a
,
128 struct f2fs_sb_info
*sbi
, char *buf
)
130 unsigned char *ptr
= NULL
;
133 ptr
= __struct_ptr(sbi
, a
->struct_type
);
137 ui
= (unsigned int *)(ptr
+ a
->offset
);
139 return snprintf(buf
, PAGE_SIZE
, "%u\n", *ui
);
142 static ssize_t
f2fs_sbi_store(struct f2fs_attr
*a
,
143 struct f2fs_sb_info
*sbi
,
144 const char *buf
, size_t count
)
151 ptr
= __struct_ptr(sbi
, a
->struct_type
);
155 ui
= (unsigned int *)(ptr
+ a
->offset
);
157 ret
= kstrtoul(skip_spaces(buf
), 0, &t
);
164 static ssize_t
f2fs_attr_show(struct kobject
*kobj
,
165 struct attribute
*attr
, char *buf
)
167 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
169 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
171 return a
->show
? a
->show(a
, sbi
, buf
) : 0;
174 static ssize_t
f2fs_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
175 const char *buf
, size_t len
)
177 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
179 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
181 return a
->store
? a
->store(a
, sbi
, buf
, len
) : 0;
184 static void f2fs_sb_release(struct kobject
*kobj
)
186 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
188 complete(&sbi
->s_kobj_unregister
);
191 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
192 static struct f2fs_attr f2fs_attr_##_name = { \
193 .attr = {.name = __stringify(_name), .mode = _mode }, \
196 .struct_type = _struct_type, \
200 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
201 F2FS_ATTR_OFFSET(struct_type, name, 0644, \
202 f2fs_sbi_show, f2fs_sbi_store, \
203 offsetof(struct struct_name, elname))
205 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_min_sleep_time
, min_sleep_time
);
206 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_max_sleep_time
, max_sleep_time
);
207 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_no_gc_sleep_time
, no_gc_sleep_time
);
208 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_idle
, gc_idle
);
209 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, reclaim_segments
, rec_prefree_segments
);
210 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, max_small_discards
, max_discards
);
211 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, batched_trim_sections
, trim_sections
);
212 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, ipu_policy
, ipu_policy
);
213 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_ipu_util
, min_ipu_util
);
214 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_fsync_blocks
, min_fsync_blocks
);
215 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ram_thresh
, ram_thresh
);
216 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ra_nid_pages
, ra_nid_pages
);
217 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, max_victim_search
, max_victim_search
);
218 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, dir_level
, dir_level
);
219 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, cp_interval
, cp_interval
);
221 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
222 static struct attribute
*f2fs_attrs
[] = {
223 ATTR_LIST(gc_min_sleep_time
),
224 ATTR_LIST(gc_max_sleep_time
),
225 ATTR_LIST(gc_no_gc_sleep_time
),
227 ATTR_LIST(reclaim_segments
),
228 ATTR_LIST(max_small_discards
),
229 ATTR_LIST(batched_trim_sections
),
230 ATTR_LIST(ipu_policy
),
231 ATTR_LIST(min_ipu_util
),
232 ATTR_LIST(min_fsync_blocks
),
233 ATTR_LIST(max_victim_search
),
234 ATTR_LIST(dir_level
),
235 ATTR_LIST(ram_thresh
),
236 ATTR_LIST(ra_nid_pages
),
237 ATTR_LIST(cp_interval
),
241 static const struct sysfs_ops f2fs_attr_ops
= {
242 .show
= f2fs_attr_show
,
243 .store
= f2fs_attr_store
,
246 static struct kobj_type f2fs_ktype
= {
247 .default_attrs
= f2fs_attrs
,
248 .sysfs_ops
= &f2fs_attr_ops
,
249 .release
= f2fs_sb_release
,
252 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
254 struct va_format vaf
;
260 printk("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
264 static void init_once(void *foo
)
266 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
268 inode_init_once(&fi
->vfs_inode
);
271 static int parse_options(struct super_block
*sb
, char *options
)
273 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
274 struct request_queue
*q
;
275 substring_t args
[MAX_OPT_ARGS
];
282 while ((p
= strsep(&options
, ",")) != NULL
) {
287 * Initialize args struct so we know whether arg was
288 * found; some options take optional arguments.
290 args
[0].to
= args
[0].from
= NULL
;
291 token
= match_token(p
, f2fs_tokens
, args
);
294 case Opt_gc_background
:
295 name
= match_strdup(&args
[0]);
299 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
301 clear_opt(sbi
, FORCE_FG_GC
);
302 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
303 clear_opt(sbi
, BG_GC
);
304 clear_opt(sbi
, FORCE_FG_GC
);
305 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
307 set_opt(sbi
, FORCE_FG_GC
);
314 case Opt_disable_roll_forward
:
315 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
318 /* this option mounts f2fs with ro */
319 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
320 if (!f2fs_readonly(sb
))
324 q
= bdev_get_queue(sb
->s_bdev
);
325 if (blk_queue_discard(q
)) {
326 set_opt(sbi
, DISCARD
);
328 f2fs_msg(sb
, KERN_WARNING
,
329 "mounting with \"discard\" option, but "
330 "the device does not support discard");
334 set_opt(sbi
, NOHEAP
);
336 #ifdef CONFIG_F2FS_FS_XATTR
338 set_opt(sbi
, XATTR_USER
);
340 case Opt_nouser_xattr
:
341 clear_opt(sbi
, XATTR_USER
);
343 case Opt_inline_xattr
:
344 set_opt(sbi
, INLINE_XATTR
);
348 f2fs_msg(sb
, KERN_INFO
,
349 "user_xattr options not supported");
351 case Opt_nouser_xattr
:
352 f2fs_msg(sb
, KERN_INFO
,
353 "nouser_xattr options not supported");
355 case Opt_inline_xattr
:
356 f2fs_msg(sb
, KERN_INFO
,
357 "inline_xattr options not supported");
360 #ifdef CONFIG_F2FS_FS_POSIX_ACL
362 set_opt(sbi
, POSIX_ACL
);
365 clear_opt(sbi
, POSIX_ACL
);
369 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
372 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
375 case Opt_active_logs
:
376 if (args
->from
&& match_int(args
, &arg
))
378 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
380 sbi
->active_logs
= arg
;
382 case Opt_disable_ext_identify
:
383 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
385 case Opt_inline_data
:
386 set_opt(sbi
, INLINE_DATA
);
388 case Opt_inline_dentry
:
389 set_opt(sbi
, INLINE_DENTRY
);
391 case Opt_flush_merge
:
392 set_opt(sbi
, FLUSH_MERGE
);
395 set_opt(sbi
, NOBARRIER
);
398 set_opt(sbi
, FASTBOOT
);
400 case Opt_extent_cache
:
401 set_opt(sbi
, EXTENT_CACHE
);
403 case Opt_noextent_cache
:
404 clear_opt(sbi
, EXTENT_CACHE
);
406 case Opt_noinline_data
:
407 clear_opt(sbi
, INLINE_DATA
);
410 f2fs_msg(sb
, KERN_ERR
,
411 "Unrecognized mount option \"%s\" or missing value",
419 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
421 struct f2fs_inode_info
*fi
;
423 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
427 init_once((void *) fi
);
429 /* Initialize f2fs-specific inode info */
430 fi
->vfs_inode
.i_version
= 1;
431 atomic_set(&fi
->dirty_pages
, 0);
432 fi
->i_current_depth
= 1;
434 init_rwsem(&fi
->i_sem
);
435 INIT_LIST_HEAD(&fi
->inmem_pages
);
436 mutex_init(&fi
->inmem_lock
);
438 set_inode_flag(fi
, FI_NEW_INODE
);
440 if (test_opt(F2FS_SB(sb
), INLINE_XATTR
))
441 set_inode_flag(fi
, FI_INLINE_XATTR
);
443 /* Will be used by directory only */
444 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
446 #ifdef CONFIG_F2FS_FS_ENCRYPTION
447 fi
->i_crypt_info
= NULL
;
449 return &fi
->vfs_inode
;
452 static int f2fs_drop_inode(struct inode
*inode
)
455 * This is to avoid a deadlock condition like below.
456 * writeback_single_inode(inode)
457 * - f2fs_write_data_page
458 * - f2fs_gc -> iput -> evict
459 * - inode_wait_for_writeback(inode)
461 if (!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
) {
462 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
463 /* to avoid evict_inode call simultaneously */
464 atomic_inc(&inode
->i_count
);
465 spin_unlock(&inode
->i_lock
);
467 /* some remained atomic pages should discarded */
468 if (f2fs_is_atomic_file(inode
))
469 commit_inmem_pages(inode
, true);
471 /* should remain fi->extent_tree for writepage */
472 f2fs_destroy_extent_node(inode
);
474 sb_start_intwrite(inode
->i_sb
);
475 i_size_write(inode
, 0);
477 if (F2FS_HAS_BLOCKS(inode
))
478 f2fs_truncate(inode
, true);
480 sb_end_intwrite(inode
->i_sb
);
482 #ifdef CONFIG_F2FS_FS_ENCRYPTION
483 if (F2FS_I(inode
)->i_crypt_info
)
484 f2fs_free_encryption_info(inode
,
485 F2FS_I(inode
)->i_crypt_info
);
487 spin_lock(&inode
->i_lock
);
488 atomic_dec(&inode
->i_count
);
492 return generic_drop_inode(inode
);
496 * f2fs_dirty_inode() is called from __mark_inode_dirty()
498 * We should call set_dirty_inode to write the dirty inode through write_inode.
500 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
502 set_inode_flag(F2FS_I(inode
), FI_DIRTY_INODE
);
505 static void f2fs_i_callback(struct rcu_head
*head
)
507 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
508 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
511 static void f2fs_destroy_inode(struct inode
*inode
)
513 call_rcu(&inode
->i_rcu
, f2fs_i_callback
);
516 static void f2fs_put_super(struct super_block
*sb
)
518 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
521 remove_proc_entry("segment_info", sbi
->s_proc
);
522 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
524 kobject_del(&sbi
->s_kobj
);
528 /* prevent remaining shrinker jobs */
529 mutex_lock(&sbi
->umount_mutex
);
532 * We don't need to do checkpoint when superblock is clean.
533 * But, the previous checkpoint was not done by umount, it needs to do
534 * clean checkpoint again.
536 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
537 !is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_UMOUNT_FLAG
)) {
538 struct cp_control cpc
= {
541 write_checkpoint(sbi
, &cpc
);
544 /* write_checkpoint can update stat informaion */
545 f2fs_destroy_stats(sbi
);
548 * normally superblock is clean, so we need to release this.
549 * In addition, EIO will skip do checkpoint, we need this as well.
551 release_dirty_inode(sbi
);
552 release_discard_addrs(sbi
);
554 f2fs_leave_shrinker(sbi
);
555 mutex_unlock(&sbi
->umount_mutex
);
557 iput(sbi
->node_inode
);
558 iput(sbi
->meta_inode
);
560 /* destroy f2fs internal modules */
561 destroy_node_manager(sbi
);
562 destroy_segment_manager(sbi
);
565 kobject_put(&sbi
->s_kobj
);
566 wait_for_completion(&sbi
->s_kobj_unregister
);
568 sb
->s_fs_info
= NULL
;
569 brelse(sbi
->raw_super_buf
);
573 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
575 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
577 trace_f2fs_sync_fs(sb
, sync
);
580 struct cp_control cpc
;
582 cpc
.reason
= __get_cp_reason(sbi
);
584 mutex_lock(&sbi
->gc_mutex
);
585 write_checkpoint(sbi
, &cpc
);
586 mutex_unlock(&sbi
->gc_mutex
);
588 f2fs_balance_fs(sbi
);
590 f2fs_trace_ios(NULL
, 1);
595 static int f2fs_freeze(struct super_block
*sb
)
599 if (f2fs_readonly(sb
))
602 err
= f2fs_sync_fs(sb
, 1);
606 static int f2fs_unfreeze(struct super_block
*sb
)
611 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
613 struct super_block
*sb
= dentry
->d_sb
;
614 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
615 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
616 block_t total_count
, user_block_count
, start_count
, ovp_count
;
618 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
619 user_block_count
= sbi
->user_block_count
;
620 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
621 ovp_count
= SM_I(sbi
)->ovp_segments
<< sbi
->log_blocks_per_seg
;
622 buf
->f_type
= F2FS_SUPER_MAGIC
;
623 buf
->f_bsize
= sbi
->blocksize
;
625 buf
->f_blocks
= total_count
- start_count
;
626 buf
->f_bfree
= buf
->f_blocks
- valid_user_blocks(sbi
) - ovp_count
;
627 buf
->f_bavail
= user_block_count
- valid_user_blocks(sbi
);
629 buf
->f_files
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
630 buf
->f_ffree
= buf
->f_files
- valid_inode_count(sbi
);
632 buf
->f_namelen
= F2FS_NAME_LEN
;
633 buf
->f_fsid
.val
[0] = (u32
)id
;
634 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
639 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
641 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
643 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
644 if (test_opt(sbi
, FORCE_FG_GC
))
645 seq_printf(seq
, ",background_gc=%s", "sync");
647 seq_printf(seq
, ",background_gc=%s", "on");
649 seq_printf(seq
, ",background_gc=%s", "off");
651 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
652 seq_puts(seq
, ",disable_roll_forward");
653 if (test_opt(sbi
, DISCARD
))
654 seq_puts(seq
, ",discard");
655 if (test_opt(sbi
, NOHEAP
))
656 seq_puts(seq
, ",no_heap_alloc");
657 #ifdef CONFIG_F2FS_FS_XATTR
658 if (test_opt(sbi
, XATTR_USER
))
659 seq_puts(seq
, ",user_xattr");
661 seq_puts(seq
, ",nouser_xattr");
662 if (test_opt(sbi
, INLINE_XATTR
))
663 seq_puts(seq
, ",inline_xattr");
665 #ifdef CONFIG_F2FS_FS_POSIX_ACL
666 if (test_opt(sbi
, POSIX_ACL
))
667 seq_puts(seq
, ",acl");
669 seq_puts(seq
, ",noacl");
671 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
672 seq_puts(seq
, ",disable_ext_identify");
673 if (test_opt(sbi
, INLINE_DATA
))
674 seq_puts(seq
, ",inline_data");
676 seq_puts(seq
, ",noinline_data");
677 if (test_opt(sbi
, INLINE_DENTRY
))
678 seq_puts(seq
, ",inline_dentry");
679 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
680 seq_puts(seq
, ",flush_merge");
681 if (test_opt(sbi
, NOBARRIER
))
682 seq_puts(seq
, ",nobarrier");
683 if (test_opt(sbi
, FASTBOOT
))
684 seq_puts(seq
, ",fastboot");
685 if (test_opt(sbi
, EXTENT_CACHE
))
686 seq_puts(seq
, ",extent_cache");
688 seq_puts(seq
, ",noextent_cache");
689 seq_printf(seq
, ",active_logs=%u", sbi
->active_logs
);
694 static int segment_info_seq_show(struct seq_file
*seq
, void *offset
)
696 struct super_block
*sb
= seq
->private;
697 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
698 unsigned int total_segs
=
699 le32_to_cpu(sbi
->raw_super
->segment_count_main
);
702 seq_puts(seq
, "format: segment_type|valid_blocks\n"
703 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
705 for (i
= 0; i
< total_segs
; i
++) {
706 struct seg_entry
*se
= get_seg_entry(sbi
, i
);
709 seq_printf(seq
, "%-10d", i
);
710 seq_printf(seq
, "%d|%-3u", se
->type
,
711 get_valid_blocks(sbi
, i
, 1));
712 if ((i
% 10) == 9 || i
== (total_segs
- 1))
721 static int segment_info_open_fs(struct inode
*inode
, struct file
*file
)
723 return single_open(file
, segment_info_seq_show
, PDE_DATA(inode
));
726 static const struct file_operations f2fs_seq_segment_info_fops
= {
727 .owner
= THIS_MODULE
,
728 .open
= segment_info_open_fs
,
731 .release
= single_release
,
734 static void default_options(struct f2fs_sb_info
*sbi
)
736 /* init some FS parameters */
737 sbi
->active_logs
= NR_CURSEG_TYPE
;
740 set_opt(sbi
, INLINE_DATA
);
741 set_opt(sbi
, EXTENT_CACHE
);
743 #ifdef CONFIG_F2FS_FS_XATTR
744 set_opt(sbi
, XATTR_USER
);
746 #ifdef CONFIG_F2FS_FS_POSIX_ACL
747 set_opt(sbi
, POSIX_ACL
);
751 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
753 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
754 struct f2fs_mount_info org_mount_opt
;
755 int err
, active_logs
;
756 bool need_restart_gc
= false;
757 bool need_stop_gc
= false;
758 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
763 * Save the old mount options in case we
764 * need to restore them.
766 org_mount_opt
= sbi
->mount_opt
;
767 active_logs
= sbi
->active_logs
;
769 sbi
->mount_opt
.opt
= 0;
770 default_options(sbi
);
772 /* parse mount options */
773 err
= parse_options(sb
, data
);
778 * Previous and new state of filesystem is RO,
779 * so skip checking GC and FLUSH_MERGE conditions.
781 if (f2fs_readonly(sb
) && (*flags
& MS_RDONLY
))
784 /* disallow enable/disable extent_cache dynamically */
785 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
787 f2fs_msg(sbi
->sb
, KERN_WARNING
,
788 "switch extent_cache option is not allowed");
793 * We stop the GC thread if FS is mounted as RO
794 * or if background_gc = off is passed in mount
795 * option. Also sync the filesystem.
797 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
798 if (sbi
->gc_thread
) {
801 need_restart_gc
= true;
803 } else if (!sbi
->gc_thread
) {
804 err
= start_gc_thread(sbi
);
811 * We stop issue flush thread if FS is mounted as RO
812 * or if flush_merge is not passed in mount option.
814 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
815 destroy_flush_cmd_control(sbi
);
816 } else if (!SM_I(sbi
)->cmd_control_info
) {
817 err
= create_flush_cmd_control(sbi
);
822 /* Update the POSIXACL Flag */
823 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
824 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
827 if (need_restart_gc
) {
828 if (start_gc_thread(sbi
))
829 f2fs_msg(sbi
->sb
, KERN_WARNING
,
830 "background gc thread has stopped");
831 } else if (need_stop_gc
) {
835 sbi
->mount_opt
= org_mount_opt
;
836 sbi
->active_logs
= active_logs
;
840 static struct super_operations f2fs_sops
= {
841 .alloc_inode
= f2fs_alloc_inode
,
842 .drop_inode
= f2fs_drop_inode
,
843 .destroy_inode
= f2fs_destroy_inode
,
844 .write_inode
= f2fs_write_inode
,
845 .dirty_inode
= f2fs_dirty_inode
,
846 .show_options
= f2fs_show_options
,
847 .evict_inode
= f2fs_evict_inode
,
848 .put_super
= f2fs_put_super
,
849 .sync_fs
= f2fs_sync_fs
,
850 .freeze_fs
= f2fs_freeze
,
851 .unfreeze_fs
= f2fs_unfreeze
,
852 .statfs
= f2fs_statfs
,
853 .remount_fs
= f2fs_remount
,
856 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
857 u64 ino
, u32 generation
)
859 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
862 if (check_nid_range(sbi
, ino
))
863 return ERR_PTR(-ESTALE
);
866 * f2fs_iget isn't quite right if the inode is currently unallocated!
867 * However f2fs_iget currently does appropriate checks to handle stale
868 * inodes so everything is OK.
870 inode
= f2fs_iget(sb
, ino
);
872 return ERR_CAST(inode
);
873 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
874 /* we didn't find the right inode.. */
876 return ERR_PTR(-ESTALE
);
881 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
882 int fh_len
, int fh_type
)
884 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
888 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
889 int fh_len
, int fh_type
)
891 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
895 static const struct export_operations f2fs_export_ops
= {
896 .fh_to_dentry
= f2fs_fh_to_dentry
,
897 .fh_to_parent
= f2fs_fh_to_parent
,
898 .get_parent
= f2fs_get_parent
,
901 static loff_t
max_file_size(unsigned bits
)
903 loff_t result
= (DEF_ADDRS_PER_INODE
- F2FS_INLINE_XATTR_ADDRS
);
904 loff_t leaf_count
= ADDRS_PER_BLOCK
;
906 /* two direct node blocks */
907 result
+= (leaf_count
* 2);
909 /* two indirect node blocks */
910 leaf_count
*= NIDS_PER_BLOCK
;
911 result
+= (leaf_count
* 2);
913 /* one double indirect node block */
914 leaf_count
*= NIDS_PER_BLOCK
;
915 result
+= leaf_count
;
921 static inline bool sanity_check_area_boundary(struct super_block
*sb
,
922 struct f2fs_super_block
*raw_super
)
924 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
925 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
926 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
927 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
928 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
929 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
930 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
931 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
932 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
933 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
934 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
935 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
936 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
938 if (segment0_blkaddr
!= cp_blkaddr
) {
939 f2fs_msg(sb
, KERN_INFO
,
940 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
941 segment0_blkaddr
, cp_blkaddr
);
945 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
947 f2fs_msg(sb
, KERN_INFO
,
948 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
949 cp_blkaddr
, sit_blkaddr
,
950 segment_count_ckpt
<< log_blocks_per_seg
);
954 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
956 f2fs_msg(sb
, KERN_INFO
,
957 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
958 sit_blkaddr
, nat_blkaddr
,
959 segment_count_sit
<< log_blocks_per_seg
);
963 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
965 f2fs_msg(sb
, KERN_INFO
,
966 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
967 nat_blkaddr
, ssa_blkaddr
,
968 segment_count_nat
<< log_blocks_per_seg
);
972 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
974 f2fs_msg(sb
, KERN_INFO
,
975 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
976 ssa_blkaddr
, main_blkaddr
,
977 segment_count_ssa
<< log_blocks_per_seg
);
981 if (main_blkaddr
+ (segment_count_main
<< log_blocks_per_seg
) !=
982 segment0_blkaddr
+ (segment_count
<< log_blocks_per_seg
)) {
983 f2fs_msg(sb
, KERN_INFO
,
984 "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
986 segment0_blkaddr
+ (segment_count
<< log_blocks_per_seg
),
987 segment_count_main
<< log_blocks_per_seg
);
994 static int sanity_check_raw_super(struct super_block
*sb
,
995 struct f2fs_super_block
*raw_super
)
997 block_t segment_count
, segs_per_sec
, secs_per_zone
;
998 block_t total_sections
, blocks_per_seg
;
999 unsigned int blocksize
;
1001 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
1002 f2fs_msg(sb
, KERN_INFO
,
1003 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1004 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
1008 /* Currently, support only 4KB page cache size */
1009 if (F2FS_BLKSIZE
!= PAGE_CACHE_SIZE
) {
1010 f2fs_msg(sb
, KERN_INFO
,
1011 "Invalid page_cache_size (%lu), supports only 4KB\n",
1016 /* Currently, support only 4KB block size */
1017 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
1018 if (blocksize
!= F2FS_BLKSIZE
) {
1019 f2fs_msg(sb
, KERN_INFO
,
1020 "Invalid blocksize (%u), supports only 4KB\n",
1025 /* check log blocks per segment */
1026 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
1027 f2fs_msg(sb
, KERN_INFO
,
1028 "Invalid log blocks per segment (%u)\n",
1029 le32_to_cpu(raw_super
->log_blocks_per_seg
));
1033 /* Currently, support 512/1024/2048/4096 bytes sector size */
1034 if (le32_to_cpu(raw_super
->log_sectorsize
) >
1035 F2FS_MAX_LOG_SECTOR_SIZE
||
1036 le32_to_cpu(raw_super
->log_sectorsize
) <
1037 F2FS_MIN_LOG_SECTOR_SIZE
) {
1038 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize (%u)",
1039 le32_to_cpu(raw_super
->log_sectorsize
));
1042 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
1043 le32_to_cpu(raw_super
->log_sectorsize
) !=
1044 F2FS_MAX_LOG_SECTOR_SIZE
) {
1045 f2fs_msg(sb
, KERN_INFO
,
1046 "Invalid log sectors per block(%u) log sectorsize(%u)",
1047 le32_to_cpu(raw_super
->log_sectors_per_block
),
1048 le32_to_cpu(raw_super
->log_sectorsize
));
1052 segment_count
= le32_to_cpu(raw_super
->segment_count
);
1053 segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
1054 secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
1055 total_sections
= le32_to_cpu(raw_super
->section_count
);
1057 /* blocks_per_seg should be 512, given the above check */
1058 blocks_per_seg
= 1 << le32_to_cpu(raw_super
->log_blocks_per_seg
);
1060 if (segment_count
> F2FS_MAX_SEGMENT
||
1061 segment_count
< F2FS_MIN_SEGMENTS
) {
1062 f2fs_msg(sb
, KERN_INFO
,
1063 "Invalid segment count (%u)",
1068 if (total_sections
> segment_count
||
1069 total_sections
< F2FS_MIN_SEGMENTS
||
1070 segs_per_sec
> segment_count
|| !segs_per_sec
) {
1071 f2fs_msg(sb
, KERN_INFO
,
1072 "Invalid segment/section count (%u, %u x %u)",
1073 segment_count
, total_sections
, segs_per_sec
);
1077 if ((segment_count
/ segs_per_sec
) < total_sections
) {
1078 f2fs_msg(sb
, KERN_INFO
,
1079 "Small segment_count (%u < %u * %u)",
1080 segment_count
, segs_per_sec
, total_sections
);
1084 if (segment_count
> (le64_to_cpu(raw_super
->block_count
) >> 9)) {
1085 f2fs_msg(sb
, KERN_INFO
,
1086 "Wrong segment_count / block_count (%u > %llu)",
1087 segment_count
, le64_to_cpu(raw_super
->block_count
));
1091 if (secs_per_zone
> total_sections
|| !secs_per_zone
) {
1092 f2fs_msg(sb
, KERN_INFO
,
1093 "Wrong secs_per_zone / total_sections (%u, %u)",
1094 secs_per_zone
, total_sections
);
1097 if (le32_to_cpu(raw_super
->extension_count
) > F2FS_MAX_EXTENSION
) {
1098 f2fs_msg(sb
, KERN_INFO
,
1099 "Corrupted extension count (%u > %u)",
1100 le32_to_cpu(raw_super
->extension_count
),
1101 F2FS_MAX_EXTENSION
);
1105 if (le32_to_cpu(raw_super
->cp_payload
) >
1106 (blocks_per_seg
- F2FS_CP_PACKS
)) {
1107 f2fs_msg(sb
, KERN_INFO
,
1108 "Insane cp_payload (%u > %u)",
1109 le32_to_cpu(raw_super
->cp_payload
),
1110 blocks_per_seg
- F2FS_CP_PACKS
);
1114 /* check reserved ino info */
1115 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
1116 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
1117 le32_to_cpu(raw_super
->root_ino
) != 3) {
1118 f2fs_msg(sb
, KERN_INFO
,
1119 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1120 le32_to_cpu(raw_super
->node_ino
),
1121 le32_to_cpu(raw_super
->meta_ino
),
1122 le32_to_cpu(raw_super
->root_ino
));
1126 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1127 if (sanity_check_area_boundary(sb
, raw_super
))
1133 int sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
1135 unsigned int total
, fsmeta
;
1136 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1137 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1138 unsigned int ovp_segments
, reserved_segments
;
1139 unsigned int main_segs
, blocks_per_seg
;
1140 unsigned int sit_segs
, nat_segs
;
1141 unsigned int sit_bitmap_size
, nat_bitmap_size
;
1142 unsigned int log_blocks_per_seg
;
1143 unsigned int segment_count_main
;
1144 unsigned int cp_pack_start_sum
, cp_payload
;
1145 block_t user_block_count
;
1148 total
= le32_to_cpu(raw_super
->segment_count
);
1149 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1150 sit_segs
= le32_to_cpu(raw_super
->segment_count_sit
);
1152 nat_segs
= le32_to_cpu(raw_super
->segment_count_nat
);
1154 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
1155 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
1157 if (unlikely(fsmeta
>= total
))
1160 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
1161 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
1163 if (unlikely(fsmeta
< F2FS_MIN_SEGMENTS
||
1164 ovp_segments
== 0 || reserved_segments
== 0)) {
1165 f2fs_msg(sbi
->sb
, KERN_ERR
,
1166 "Wrong layout: check mkfs.f2fs version");
1170 user_block_count
= le64_to_cpu(ckpt
->user_block_count
);
1171 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
1172 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1173 if (!user_block_count
|| user_block_count
>=
1174 segment_count_main
<< log_blocks_per_seg
) {
1175 f2fs_msg(sbi
->sb
, KERN_ERR
,
1176 "Wrong user_block_count: %u", user_block_count
);
1180 main_segs
= le32_to_cpu(raw_super
->segment_count_main
);
1181 blocks_per_seg
= sbi
->blocks_per_seg
;
1183 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
1184 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) >= main_segs
||
1185 le16_to_cpu(ckpt
->cur_node_blkoff
[i
]) >= blocks_per_seg
)
1187 for (j
= i
+ 1; j
< NR_CURSEG_NODE_TYPE
; j
++) {
1188 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
1189 le32_to_cpu(ckpt
->cur_node_segno
[j
])) {
1190 f2fs_msg(sbi
->sb
, KERN_ERR
,
1191 "Node segment (%u, %u) has the same "
1193 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
1198 for (i
= 0; i
< NR_CURSEG_DATA_TYPE
; i
++) {
1199 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) >= main_segs
||
1200 le16_to_cpu(ckpt
->cur_data_blkoff
[i
]) >= blocks_per_seg
)
1202 for (j
= i
+ 1; j
< NR_CURSEG_DATA_TYPE
; j
++) {
1203 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) ==
1204 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
1205 f2fs_msg(sbi
->sb
, KERN_ERR
,
1206 "Data segment (%u, %u) has the same "
1208 le32_to_cpu(ckpt
->cur_data_segno
[i
]));
1213 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
1214 for (j
= 0; j
< NR_CURSEG_DATA_TYPE
; j
++) {
1215 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
1216 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
1217 f2fs_msg(sbi
->sb
, KERN_ERR
,
1218 "Node segment (%u) and Data segment (%u)"
1219 " has the same segno: %u", i
, j
,
1220 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
1226 sit_bitmap_size
= le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
);
1227 nat_bitmap_size
= le32_to_cpu(ckpt
->nat_ver_bitmap_bytesize
);
1229 if (sit_bitmap_size
!= ((sit_segs
/ 2) << log_blocks_per_seg
) / 8 ||
1230 nat_bitmap_size
!= ((nat_segs
/ 2) << log_blocks_per_seg
) / 8) {
1231 f2fs_msg(sbi
->sb
, KERN_ERR
,
1232 "Wrong bitmap size: sit: %u, nat:%u",
1233 sit_bitmap_size
, nat_bitmap_size
);
1237 cp_pack_start_sum
= __start_sum_addr(sbi
);
1238 cp_payload
= __cp_payload(sbi
);
1239 if (cp_pack_start_sum
< cp_payload
+ 1 ||
1240 cp_pack_start_sum
> blocks_per_seg
- 1 -
1242 f2fs_msg(sbi
->sb
, KERN_ERR
,
1243 "Wrong cp_pack_start_sum: %u",
1248 if (unlikely(f2fs_cp_error(sbi
))) {
1249 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
1255 static void init_sb_info(struct f2fs_sb_info
*sbi
)
1257 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
1260 sbi
->log_sectors_per_block
=
1261 le32_to_cpu(raw_super
->log_sectors_per_block
);
1262 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
1263 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
1264 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1265 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
1266 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
1267 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
1268 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
1269 sbi
->total_node_count
=
1270 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
1271 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
1272 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
1273 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
1274 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
1275 sbi
->cur_victim_sec
= NULL_SECNO
;
1276 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
1278 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
1279 atomic_set(&sbi
->nr_pages
[i
], 0);
1281 sbi
->dir_level
= DEF_DIR_LEVEL
;
1282 sbi
->cp_interval
= DEF_CP_INTERVAL
;
1283 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
1285 INIT_LIST_HEAD(&sbi
->s_list
);
1286 mutex_init(&sbi
->umount_mutex
);
1290 * Read f2fs raw super block.
1291 * Because we have two copies of super block, so read the first one at first,
1292 * if the first one is invalid, move to read the second one.
1294 static int read_raw_super_block(struct super_block
*sb
,
1295 struct f2fs_super_block
**raw_super
,
1296 struct buffer_head
**raw_super_buf
,
1300 struct buffer_head
*buffer
;
1301 struct f2fs_super_block
*super
;
1305 buffer
= sb_bread(sb
, block
);
1308 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
1319 super
= (struct f2fs_super_block
*)
1320 ((char *)(buffer
)->b_data
+ F2FS_SUPER_OFFSET
);
1322 /* sanity checking of raw super */
1323 if (sanity_check_raw_super(sb
, super
)) {
1326 f2fs_msg(sb
, KERN_ERR
,
1327 "Can't find valid F2FS filesystem in %dth superblock",
1339 *raw_super_buf
= buffer
;
1342 /* already have a valid superblock */
1346 /* check the validity of the second superblock */
1353 /* No valid superblock */
1360 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
1362 struct buffer_head
*sbh
= sbi
->raw_super_buf
;
1363 sector_t block
= sbh
->b_blocknr
;
1366 /* write back-up superblock first */
1367 sbh
->b_blocknr
= block
? 0 : 1;
1368 mark_buffer_dirty(sbh
);
1369 err
= sync_dirty_buffer(sbh
);
1371 sbh
->b_blocknr
= block
;
1373 /* if we are in recovery path, skip writing valid superblock */
1377 /* write current valid superblock */
1378 mark_buffer_dirty(sbh
);
1379 err
= sync_dirty_buffer(sbh
);
1381 clear_buffer_write_io_error(sbh
);
1382 set_buffer_uptodate(sbh
);
1386 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
1388 struct f2fs_sb_info
*sbi
;
1389 struct f2fs_super_block
*raw_super
;
1390 struct buffer_head
*raw_super_buf
;
1393 bool retry
= true, need_fsck
= false;
1394 char *options
= NULL
;
1400 raw_super_buf
= NULL
;
1403 /* allocate memory for f2fs-specific super block info */
1404 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
1408 /* set a block size */
1409 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
1410 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
1414 err
= read_raw_super_block(sb
, &raw_super
, &raw_super_buf
, &recovery
);
1418 sb
->s_fs_info
= sbi
;
1419 default_options(sbi
);
1420 /* parse mount options */
1421 options
= kstrdup((const char *)data
, GFP_KERNEL
);
1422 if (data
&& !options
) {
1427 err
= parse_options(sb
, options
);
1431 sb
->s_maxbytes
= max_file_size(le32_to_cpu(raw_super
->log_blocksize
));
1432 sb
->s_max_links
= F2FS_LINK_MAX
;
1433 get_random_bytes(&sbi
->s_next_generation
, sizeof(u32
));
1435 sb
->s_op
= &f2fs_sops
;
1436 sb
->s_xattr
= f2fs_xattr_handlers
;
1437 sb
->s_export_op
= &f2fs_export_ops
;
1438 sb
->s_magic
= F2FS_SUPER_MAGIC
;
1439 sb
->s_time_gran
= 1;
1440 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
1441 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
1442 memcpy(sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
1444 /* init f2fs-specific super block info */
1446 sbi
->raw_super
= raw_super
;
1447 sbi
->raw_super_buf
= raw_super_buf
;
1448 mutex_init(&sbi
->gc_mutex
);
1449 mutex_init(&sbi
->writepages
);
1450 mutex_init(&sbi
->cp_mutex
);
1451 init_rwsem(&sbi
->node_write
);
1453 /* disallow all the data/node/meta page writes */
1454 set_sbi_flag(sbi
, SBI_POR_DOING
);
1455 spin_lock_init(&sbi
->stat_lock
);
1457 init_rwsem(&sbi
->read_io
.io_rwsem
);
1458 sbi
->read_io
.sbi
= sbi
;
1459 sbi
->read_io
.bio
= NULL
;
1460 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
1461 init_rwsem(&sbi
->write_io
[i
].io_rwsem
);
1462 sbi
->write_io
[i
].sbi
= sbi
;
1463 sbi
->write_io
[i
].bio
= NULL
;
1466 init_rwsem(&sbi
->cp_rwsem
);
1467 init_waitqueue_head(&sbi
->cp_wait
);
1470 /* get an inode for meta space */
1471 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
1472 if (IS_ERR(sbi
->meta_inode
)) {
1473 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
1474 err
= PTR_ERR(sbi
->meta_inode
);
1478 err
= get_valid_checkpoint(sbi
);
1480 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
1481 goto free_meta_inode
;
1484 sbi
->total_valid_node_count
=
1485 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
1486 sbi
->total_valid_inode_count
=
1487 le32_to_cpu(sbi
->ckpt
->valid_inode_count
);
1488 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
1489 sbi
->total_valid_block_count
=
1490 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
1491 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
1492 sbi
->alloc_valid_block_count
= 0;
1493 INIT_LIST_HEAD(&sbi
->dir_inode_list
);
1494 spin_lock_init(&sbi
->dir_inode_lock
);
1496 init_extent_cache_info(sbi
);
1498 init_ino_entry_info(sbi
);
1500 /* setup f2fs internal modules */
1501 err
= build_segment_manager(sbi
);
1503 f2fs_msg(sb
, KERN_ERR
,
1504 "Failed to initialize F2FS segment manager");
1507 err
= build_node_manager(sbi
);
1509 f2fs_msg(sb
, KERN_ERR
,
1510 "Failed to initialize F2FS node manager");
1514 build_gc_manager(sbi
);
1516 /* get an inode for node space */
1517 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
1518 if (IS_ERR(sbi
->node_inode
)) {
1519 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
1520 err
= PTR_ERR(sbi
->node_inode
);
1524 f2fs_join_shrinker(sbi
);
1526 /* if there are nt orphan nodes free them */
1527 err
= recover_orphan_inodes(sbi
);
1529 goto free_node_inode
;
1531 /* read root inode and dentry */
1532 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
1534 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
1535 err
= PTR_ERR(root
);
1536 goto free_node_inode
;
1538 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
1541 goto free_node_inode
;
1544 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
1547 goto free_root_inode
;
1550 err
= f2fs_build_stats(sbi
);
1552 goto free_root_inode
;
1555 sbi
->s_proc
= proc_mkdir(sb
->s_id
, f2fs_proc_root
);
1558 proc_create_data("segment_info", S_IRUGO
, sbi
->s_proc
,
1559 &f2fs_seq_segment_info_fops
, sb
);
1561 sbi
->s_kobj
.kset
= f2fs_kset
;
1562 init_completion(&sbi
->s_kobj_unregister
);
1563 err
= kobject_init_and_add(&sbi
->s_kobj
, &f2fs_ktype
, NULL
,
1568 /* recover fsynced data */
1569 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
1571 * mount should be failed, when device has readonly mode, and
1572 * previous checkpoint was not done by clean system shutdown.
1574 if (bdev_read_only(sb
->s_bdev
) &&
1575 !is_set_ckpt_flags(sbi
->ckpt
, CP_UMOUNT_FLAG
)) {
1581 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
1586 err
= recover_fsync_data(sbi
, false);
1589 f2fs_msg(sb
, KERN_ERR
,
1590 "Cannot recover all fsync data errno=%ld", err
);
1594 err
= recover_fsync_data(sbi
, true);
1596 if (!f2fs_readonly(sb
) && err
> 0) {
1598 f2fs_msg(sb
, KERN_ERR
,
1599 "Need to recover fsync data");
1604 /* recover_fsync_data() cleared this already */
1605 clear_sbi_flag(sbi
, SBI_POR_DOING
);
1608 * If filesystem is not mounted as read-only then
1609 * do start the gc_thread.
1611 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
1612 /* After POR, we can run background GC thread.*/
1613 err
= start_gc_thread(sbi
);
1619 /* recover broken superblock */
1620 if (recovery
&& !f2fs_readonly(sb
) && !bdev_read_only(sb
->s_bdev
)) {
1621 f2fs_msg(sb
, KERN_INFO
, "Recover invalid superblock");
1622 f2fs_commit_super(sbi
, true);
1625 sbi
->cp_expires
= round_jiffies_up(jiffies
);
1630 kobject_del(&sbi
->s_kobj
);
1633 remove_proc_entry("segment_info", sbi
->s_proc
);
1634 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
1636 f2fs_destroy_stats(sbi
);
1641 mutex_lock(&sbi
->umount_mutex
);
1642 f2fs_leave_shrinker(sbi
);
1643 iput(sbi
->node_inode
);
1644 mutex_unlock(&sbi
->umount_mutex
);
1646 destroy_node_manager(sbi
);
1648 destroy_segment_manager(sbi
);
1651 make_bad_inode(sbi
->meta_inode
);
1652 iput(sbi
->meta_inode
);
1656 brelse(raw_super_buf
);
1660 /* give only one another chance */
1663 shrink_dcache_sb(sb
);
1669 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
1670 const char *dev_name
, void *data
)
1672 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
1675 static void kill_f2fs_super(struct super_block
*sb
)
1678 set_sbi_flag(F2FS_SB(sb
), SBI_IS_CLOSE
);
1679 kill_block_super(sb
);
1682 static struct file_system_type f2fs_fs_type
= {
1683 .owner
= THIS_MODULE
,
1685 .mount
= f2fs_mount
,
1686 .kill_sb
= kill_f2fs_super
,
1687 .fs_flags
= FS_REQUIRES_DEV
,
1689 MODULE_ALIAS_FS("f2fs");
1691 static int __init
init_inodecache(void)
1693 f2fs_inode_cachep
= f2fs_kmem_cache_create("f2fs_inode_cache",
1694 sizeof(struct f2fs_inode_info
));
1695 if (!f2fs_inode_cachep
)
1700 static void destroy_inodecache(void)
1703 * Make sure all delayed rcu free inodes are flushed before we
1707 kmem_cache_destroy(f2fs_inode_cachep
);
1710 static int __init
init_f2fs_fs(void)
1714 if (PAGE_SIZE
!= F2FS_BLKSIZE
) {
1715 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
1716 PAGE_SIZE
, F2FS_BLKSIZE
);
1720 f2fs_build_trace_ios();
1722 err
= init_inodecache();
1725 err
= create_node_manager_caches();
1727 goto free_inodecache
;
1728 err
= create_segment_manager_caches();
1730 goto free_node_manager_caches
;
1731 err
= create_checkpoint_caches();
1733 goto free_segment_manager_caches
;
1734 err
= create_extent_cache();
1736 goto free_checkpoint_caches
;
1737 f2fs_kset
= kset_create_and_add("f2fs", NULL
, fs_kobj
);
1740 goto free_extent_cache
;
1742 err
= f2fs_init_crypto();
1746 err
= register_shrinker(&f2fs_shrinker_info
);
1750 err
= register_filesystem(&f2fs_fs_type
);
1753 f2fs_create_root_stats();
1754 f2fs_proc_root
= proc_mkdir("fs/f2fs", NULL
);
1758 unregister_shrinker(&f2fs_shrinker_info
);
1762 kset_unregister(f2fs_kset
);
1764 destroy_extent_cache();
1765 free_checkpoint_caches
:
1766 destroy_checkpoint_caches();
1767 free_segment_manager_caches
:
1768 destroy_segment_manager_caches();
1769 free_node_manager_caches
:
1770 destroy_node_manager_caches();
1772 destroy_inodecache();
1777 static void __exit
exit_f2fs_fs(void)
1779 remove_proc_entry("fs/f2fs", NULL
);
1780 f2fs_destroy_root_stats();
1781 unregister_shrinker(&f2fs_shrinker_info
);
1782 unregister_filesystem(&f2fs_fs_type
);
1784 destroy_extent_cache();
1785 destroy_checkpoint_caches();
1786 destroy_segment_manager_caches();
1787 destroy_node_manager_caches();
1788 destroy_inodecache();
1789 kset_unregister(f2fs_kset
);
1790 f2fs_destroy_trace_ios();
1793 module_init(init_f2fs_fs
)
1794 module_exit(exit_f2fs_fs
)
1796 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
1797 MODULE_DESCRIPTION("Flash Friendly File System");
1798 MODULE_LICENSE("GPL");