wlcore: Add RX_BA_WIN_SIZE_CHANGE_EVENT event
[linux/fpc-iii.git] / fs / f2fs / super.c
blob86e1cb8999577bb28100e1219a2a10d1004e898e
1 /*
2 * fs/f2fs/super.c
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
28 #include "f2fs.h"
29 #include "node.h"
30 #include "segment.h"
31 #include "xattr.h"
32 #include "gc.h"
33 #include "trace.h"
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
38 static struct proc_dir_entry *f2fs_proc_root;
39 static struct kmem_cache *f2fs_inode_cachep;
40 static struct kset *f2fs_kset;
42 /* f2fs-wide shrinker description */
43 static struct shrinker f2fs_shrinker_info = {
44 .scan_objects = f2fs_shrink_scan,
45 .count_objects = f2fs_shrink_count,
46 .seeks = DEFAULT_SEEKS,
49 enum {
50 Opt_gc_background,
51 Opt_disable_roll_forward,
52 Opt_norecovery,
53 Opt_discard,
54 Opt_noheap,
55 Opt_user_xattr,
56 Opt_nouser_xattr,
57 Opt_acl,
58 Opt_noacl,
59 Opt_active_logs,
60 Opt_disable_ext_identify,
61 Opt_inline_xattr,
62 Opt_inline_data,
63 Opt_inline_dentry,
64 Opt_flush_merge,
65 Opt_nobarrier,
66 Opt_fastboot,
67 Opt_extent_cache,
68 Opt_noextent_cache,
69 Opt_noinline_data,
70 Opt_err,
73 static match_table_t f2fs_tokens = {
74 {Opt_gc_background, "background_gc=%s"},
75 {Opt_disable_roll_forward, "disable_roll_forward"},
76 {Opt_norecovery, "norecovery"},
77 {Opt_discard, "discard"},
78 {Opt_noheap, "no_heap"},
79 {Opt_user_xattr, "user_xattr"},
80 {Opt_nouser_xattr, "nouser_xattr"},
81 {Opt_acl, "acl"},
82 {Opt_noacl, "noacl"},
83 {Opt_active_logs, "active_logs=%u"},
84 {Opt_disable_ext_identify, "disable_ext_identify"},
85 {Opt_inline_xattr, "inline_xattr"},
86 {Opt_inline_data, "inline_data"},
87 {Opt_inline_dentry, "inline_dentry"},
88 {Opt_flush_merge, "flush_merge"},
89 {Opt_nobarrier, "nobarrier"},
90 {Opt_fastboot, "fastboot"},
91 {Opt_extent_cache, "extent_cache"},
92 {Opt_noextent_cache, "noextent_cache"},
93 {Opt_noinline_data, "noinline_data"},
94 {Opt_err, NULL},
97 /* Sysfs support for f2fs */
98 enum {
99 GC_THREAD, /* struct f2fs_gc_thread */
100 SM_INFO, /* struct f2fs_sm_info */
101 NM_INFO, /* struct f2fs_nm_info */
102 F2FS_SBI, /* struct f2fs_sb_info */
105 struct f2fs_attr {
106 struct attribute attr;
107 ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
108 ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
109 const char *, size_t);
110 int struct_type;
111 int offset;
114 static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
116 if (struct_type == GC_THREAD)
117 return (unsigned char *)sbi->gc_thread;
118 else if (struct_type == SM_INFO)
119 return (unsigned char *)SM_I(sbi);
120 else if (struct_type == NM_INFO)
121 return (unsigned char *)NM_I(sbi);
122 else if (struct_type == F2FS_SBI)
123 return (unsigned char *)sbi;
124 return NULL;
127 static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
128 struct f2fs_sb_info *sbi, char *buf)
130 unsigned char *ptr = NULL;
131 unsigned int *ui;
133 ptr = __struct_ptr(sbi, a->struct_type);
134 if (!ptr)
135 return -EINVAL;
137 ui = (unsigned int *)(ptr + a->offset);
139 return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
142 static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
143 struct f2fs_sb_info *sbi,
144 const char *buf, size_t count)
146 unsigned char *ptr;
147 unsigned long t;
148 unsigned int *ui;
149 ssize_t ret;
151 ptr = __struct_ptr(sbi, a->struct_type);
152 if (!ptr)
153 return -EINVAL;
155 ui = (unsigned int *)(ptr + a->offset);
157 ret = kstrtoul(skip_spaces(buf), 0, &t);
158 if (ret < 0)
159 return ret;
160 *ui = t;
161 return count;
164 static ssize_t f2fs_attr_show(struct kobject *kobj,
165 struct attribute *attr, char *buf)
167 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
168 s_kobj);
169 struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
171 return a->show ? a->show(a, sbi, buf) : 0;
174 static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
175 const char *buf, size_t len)
177 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
178 s_kobj);
179 struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
181 return a->store ? a->store(a, sbi, buf, len) : 0;
184 static void f2fs_sb_release(struct kobject *kobj)
186 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
187 s_kobj);
188 complete(&sbi->s_kobj_unregister);
191 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
192 static struct f2fs_attr f2fs_attr_##_name = { \
193 .attr = {.name = __stringify(_name), .mode = _mode }, \
194 .show = _show, \
195 .store = _store, \
196 .struct_type = _struct_type, \
197 .offset = _offset \
200 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
201 F2FS_ATTR_OFFSET(struct_type, name, 0644, \
202 f2fs_sbi_show, f2fs_sbi_store, \
203 offsetof(struct struct_name, elname))
205 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
206 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
207 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
208 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
209 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
210 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
211 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
212 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
213 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
214 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
215 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
216 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
217 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
218 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
219 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, cp_interval);
221 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
222 static struct attribute *f2fs_attrs[] = {
223 ATTR_LIST(gc_min_sleep_time),
224 ATTR_LIST(gc_max_sleep_time),
225 ATTR_LIST(gc_no_gc_sleep_time),
226 ATTR_LIST(gc_idle),
227 ATTR_LIST(reclaim_segments),
228 ATTR_LIST(max_small_discards),
229 ATTR_LIST(batched_trim_sections),
230 ATTR_LIST(ipu_policy),
231 ATTR_LIST(min_ipu_util),
232 ATTR_LIST(min_fsync_blocks),
233 ATTR_LIST(max_victim_search),
234 ATTR_LIST(dir_level),
235 ATTR_LIST(ram_thresh),
236 ATTR_LIST(ra_nid_pages),
237 ATTR_LIST(cp_interval),
238 NULL,
241 static const struct sysfs_ops f2fs_attr_ops = {
242 .show = f2fs_attr_show,
243 .store = f2fs_attr_store,
246 static struct kobj_type f2fs_ktype = {
247 .default_attrs = f2fs_attrs,
248 .sysfs_ops = &f2fs_attr_ops,
249 .release = f2fs_sb_release,
252 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
254 struct va_format vaf;
255 va_list args;
257 va_start(args, fmt);
258 vaf.fmt = fmt;
259 vaf.va = &args;
260 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
261 va_end(args);
264 static void init_once(void *foo)
266 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
268 inode_init_once(&fi->vfs_inode);
271 static int parse_options(struct super_block *sb, char *options)
273 struct f2fs_sb_info *sbi = F2FS_SB(sb);
274 struct request_queue *q;
275 substring_t args[MAX_OPT_ARGS];
276 char *p, *name;
277 int arg = 0;
279 if (!options)
280 return 0;
282 while ((p = strsep(&options, ",")) != NULL) {
283 int token;
284 if (!*p)
285 continue;
287 * Initialize args struct so we know whether arg was
288 * found; some options take optional arguments.
290 args[0].to = args[0].from = NULL;
291 token = match_token(p, f2fs_tokens, args);
293 switch (token) {
294 case Opt_gc_background:
295 name = match_strdup(&args[0]);
297 if (!name)
298 return -ENOMEM;
299 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
300 set_opt(sbi, BG_GC);
301 clear_opt(sbi, FORCE_FG_GC);
302 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
303 clear_opt(sbi, BG_GC);
304 clear_opt(sbi, FORCE_FG_GC);
305 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
306 set_opt(sbi, BG_GC);
307 set_opt(sbi, FORCE_FG_GC);
308 } else {
309 kfree(name);
310 return -EINVAL;
312 kfree(name);
313 break;
314 case Opt_disable_roll_forward:
315 set_opt(sbi, DISABLE_ROLL_FORWARD);
316 break;
317 case Opt_norecovery:
318 /* this option mounts f2fs with ro */
319 set_opt(sbi, DISABLE_ROLL_FORWARD);
320 if (!f2fs_readonly(sb))
321 return -EINVAL;
322 break;
323 case Opt_discard:
324 q = bdev_get_queue(sb->s_bdev);
325 if (blk_queue_discard(q)) {
326 set_opt(sbi, DISCARD);
327 } else {
328 f2fs_msg(sb, KERN_WARNING,
329 "mounting with \"discard\" option, but "
330 "the device does not support discard");
332 break;
333 case Opt_noheap:
334 set_opt(sbi, NOHEAP);
335 break;
336 #ifdef CONFIG_F2FS_FS_XATTR
337 case Opt_user_xattr:
338 set_opt(sbi, XATTR_USER);
339 break;
340 case Opt_nouser_xattr:
341 clear_opt(sbi, XATTR_USER);
342 break;
343 case Opt_inline_xattr:
344 set_opt(sbi, INLINE_XATTR);
345 break;
346 #else
347 case Opt_user_xattr:
348 f2fs_msg(sb, KERN_INFO,
349 "user_xattr options not supported");
350 break;
351 case Opt_nouser_xattr:
352 f2fs_msg(sb, KERN_INFO,
353 "nouser_xattr options not supported");
354 break;
355 case Opt_inline_xattr:
356 f2fs_msg(sb, KERN_INFO,
357 "inline_xattr options not supported");
358 break;
359 #endif
360 #ifdef CONFIG_F2FS_FS_POSIX_ACL
361 case Opt_acl:
362 set_opt(sbi, POSIX_ACL);
363 break;
364 case Opt_noacl:
365 clear_opt(sbi, POSIX_ACL);
366 break;
367 #else
368 case Opt_acl:
369 f2fs_msg(sb, KERN_INFO, "acl options not supported");
370 break;
371 case Opt_noacl:
372 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
373 break;
374 #endif
375 case Opt_active_logs:
376 if (args->from && match_int(args, &arg))
377 return -EINVAL;
378 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
379 return -EINVAL;
380 sbi->active_logs = arg;
381 break;
382 case Opt_disable_ext_identify:
383 set_opt(sbi, DISABLE_EXT_IDENTIFY);
384 break;
385 case Opt_inline_data:
386 set_opt(sbi, INLINE_DATA);
387 break;
388 case Opt_inline_dentry:
389 set_opt(sbi, INLINE_DENTRY);
390 break;
391 case Opt_flush_merge:
392 set_opt(sbi, FLUSH_MERGE);
393 break;
394 case Opt_nobarrier:
395 set_opt(sbi, NOBARRIER);
396 break;
397 case Opt_fastboot:
398 set_opt(sbi, FASTBOOT);
399 break;
400 case Opt_extent_cache:
401 set_opt(sbi, EXTENT_CACHE);
402 break;
403 case Opt_noextent_cache:
404 clear_opt(sbi, EXTENT_CACHE);
405 break;
406 case Opt_noinline_data:
407 clear_opt(sbi, INLINE_DATA);
408 break;
409 default:
410 f2fs_msg(sb, KERN_ERR,
411 "Unrecognized mount option \"%s\" or missing value",
413 return -EINVAL;
416 return 0;
419 static struct inode *f2fs_alloc_inode(struct super_block *sb)
421 struct f2fs_inode_info *fi;
423 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
424 if (!fi)
425 return NULL;
427 init_once((void *) fi);
429 /* Initialize f2fs-specific inode info */
430 fi->vfs_inode.i_version = 1;
431 atomic_set(&fi->dirty_pages, 0);
432 fi->i_current_depth = 1;
433 fi->i_advise = 0;
434 init_rwsem(&fi->i_sem);
435 INIT_LIST_HEAD(&fi->inmem_pages);
436 mutex_init(&fi->inmem_lock);
438 set_inode_flag(fi, FI_NEW_INODE);
440 if (test_opt(F2FS_SB(sb), INLINE_XATTR))
441 set_inode_flag(fi, FI_INLINE_XATTR);
443 /* Will be used by directory only */
444 fi->i_dir_level = F2FS_SB(sb)->dir_level;
446 #ifdef CONFIG_F2FS_FS_ENCRYPTION
447 fi->i_crypt_info = NULL;
448 #endif
449 return &fi->vfs_inode;
452 static int f2fs_drop_inode(struct inode *inode)
455 * This is to avoid a deadlock condition like below.
456 * writeback_single_inode(inode)
457 * - f2fs_write_data_page
458 * - f2fs_gc -> iput -> evict
459 * - inode_wait_for_writeback(inode)
461 if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
462 if (!inode->i_nlink && !is_bad_inode(inode)) {
463 /* to avoid evict_inode call simultaneously */
464 atomic_inc(&inode->i_count);
465 spin_unlock(&inode->i_lock);
467 /* some remained atomic pages should discarded */
468 if (f2fs_is_atomic_file(inode))
469 commit_inmem_pages(inode, true);
471 /* should remain fi->extent_tree for writepage */
472 f2fs_destroy_extent_node(inode);
474 sb_start_intwrite(inode->i_sb);
475 i_size_write(inode, 0);
477 if (F2FS_HAS_BLOCKS(inode))
478 f2fs_truncate(inode, true);
480 sb_end_intwrite(inode->i_sb);
482 #ifdef CONFIG_F2FS_FS_ENCRYPTION
483 if (F2FS_I(inode)->i_crypt_info)
484 f2fs_free_encryption_info(inode,
485 F2FS_I(inode)->i_crypt_info);
486 #endif
487 spin_lock(&inode->i_lock);
488 atomic_dec(&inode->i_count);
490 return 0;
492 return generic_drop_inode(inode);
496 * f2fs_dirty_inode() is called from __mark_inode_dirty()
498 * We should call set_dirty_inode to write the dirty inode through write_inode.
500 static void f2fs_dirty_inode(struct inode *inode, int flags)
502 set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
505 static void f2fs_i_callback(struct rcu_head *head)
507 struct inode *inode = container_of(head, struct inode, i_rcu);
508 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
511 static void f2fs_destroy_inode(struct inode *inode)
513 call_rcu(&inode->i_rcu, f2fs_i_callback);
516 static void f2fs_put_super(struct super_block *sb)
518 struct f2fs_sb_info *sbi = F2FS_SB(sb);
520 if (sbi->s_proc) {
521 remove_proc_entry("segment_info", sbi->s_proc);
522 remove_proc_entry(sb->s_id, f2fs_proc_root);
524 kobject_del(&sbi->s_kobj);
526 stop_gc_thread(sbi);
528 /* prevent remaining shrinker jobs */
529 mutex_lock(&sbi->umount_mutex);
532 * We don't need to do checkpoint when superblock is clean.
533 * But, the previous checkpoint was not done by umount, it needs to do
534 * clean checkpoint again.
536 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
537 !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
538 struct cp_control cpc = {
539 .reason = CP_UMOUNT,
541 write_checkpoint(sbi, &cpc);
544 /* write_checkpoint can update stat informaion */
545 f2fs_destroy_stats(sbi);
548 * normally superblock is clean, so we need to release this.
549 * In addition, EIO will skip do checkpoint, we need this as well.
551 release_dirty_inode(sbi);
552 release_discard_addrs(sbi);
554 f2fs_leave_shrinker(sbi);
555 mutex_unlock(&sbi->umount_mutex);
557 iput(sbi->node_inode);
558 iput(sbi->meta_inode);
560 /* destroy f2fs internal modules */
561 destroy_node_manager(sbi);
562 destroy_segment_manager(sbi);
564 kfree(sbi->ckpt);
565 kobject_put(&sbi->s_kobj);
566 wait_for_completion(&sbi->s_kobj_unregister);
568 sb->s_fs_info = NULL;
569 brelse(sbi->raw_super_buf);
570 kfree(sbi);
573 int f2fs_sync_fs(struct super_block *sb, int sync)
575 struct f2fs_sb_info *sbi = F2FS_SB(sb);
577 trace_f2fs_sync_fs(sb, sync);
579 if (sync) {
580 struct cp_control cpc;
582 cpc.reason = __get_cp_reason(sbi);
584 mutex_lock(&sbi->gc_mutex);
585 write_checkpoint(sbi, &cpc);
586 mutex_unlock(&sbi->gc_mutex);
587 } else {
588 f2fs_balance_fs(sbi);
590 f2fs_trace_ios(NULL, 1);
592 return 0;
595 static int f2fs_freeze(struct super_block *sb)
597 int err;
599 if (f2fs_readonly(sb))
600 return 0;
602 err = f2fs_sync_fs(sb, 1);
603 return err;
606 static int f2fs_unfreeze(struct super_block *sb)
608 return 0;
611 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
613 struct super_block *sb = dentry->d_sb;
614 struct f2fs_sb_info *sbi = F2FS_SB(sb);
615 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
616 block_t total_count, user_block_count, start_count, ovp_count;
618 total_count = le64_to_cpu(sbi->raw_super->block_count);
619 user_block_count = sbi->user_block_count;
620 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
621 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
622 buf->f_type = F2FS_SUPER_MAGIC;
623 buf->f_bsize = sbi->blocksize;
625 buf->f_blocks = total_count - start_count;
626 buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
627 buf->f_bavail = user_block_count - valid_user_blocks(sbi);
629 buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
630 buf->f_ffree = buf->f_files - valid_inode_count(sbi);
632 buf->f_namelen = F2FS_NAME_LEN;
633 buf->f_fsid.val[0] = (u32)id;
634 buf->f_fsid.val[1] = (u32)(id >> 32);
636 return 0;
639 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
641 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
643 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
644 if (test_opt(sbi, FORCE_FG_GC))
645 seq_printf(seq, ",background_gc=%s", "sync");
646 else
647 seq_printf(seq, ",background_gc=%s", "on");
648 } else {
649 seq_printf(seq, ",background_gc=%s", "off");
651 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
652 seq_puts(seq, ",disable_roll_forward");
653 if (test_opt(sbi, DISCARD))
654 seq_puts(seq, ",discard");
655 if (test_opt(sbi, NOHEAP))
656 seq_puts(seq, ",no_heap_alloc");
657 #ifdef CONFIG_F2FS_FS_XATTR
658 if (test_opt(sbi, XATTR_USER))
659 seq_puts(seq, ",user_xattr");
660 else
661 seq_puts(seq, ",nouser_xattr");
662 if (test_opt(sbi, INLINE_XATTR))
663 seq_puts(seq, ",inline_xattr");
664 #endif
665 #ifdef CONFIG_F2FS_FS_POSIX_ACL
666 if (test_opt(sbi, POSIX_ACL))
667 seq_puts(seq, ",acl");
668 else
669 seq_puts(seq, ",noacl");
670 #endif
671 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
672 seq_puts(seq, ",disable_ext_identify");
673 if (test_opt(sbi, INLINE_DATA))
674 seq_puts(seq, ",inline_data");
675 else
676 seq_puts(seq, ",noinline_data");
677 if (test_opt(sbi, INLINE_DENTRY))
678 seq_puts(seq, ",inline_dentry");
679 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
680 seq_puts(seq, ",flush_merge");
681 if (test_opt(sbi, NOBARRIER))
682 seq_puts(seq, ",nobarrier");
683 if (test_opt(sbi, FASTBOOT))
684 seq_puts(seq, ",fastboot");
685 if (test_opt(sbi, EXTENT_CACHE))
686 seq_puts(seq, ",extent_cache");
687 else
688 seq_puts(seq, ",noextent_cache");
689 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
691 return 0;
694 static int segment_info_seq_show(struct seq_file *seq, void *offset)
696 struct super_block *sb = seq->private;
697 struct f2fs_sb_info *sbi = F2FS_SB(sb);
698 unsigned int total_segs =
699 le32_to_cpu(sbi->raw_super->segment_count_main);
700 int i;
702 seq_puts(seq, "format: segment_type|valid_blocks\n"
703 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
705 for (i = 0; i < total_segs; i++) {
706 struct seg_entry *se = get_seg_entry(sbi, i);
708 if ((i % 10) == 0)
709 seq_printf(seq, "%-10d", i);
710 seq_printf(seq, "%d|%-3u", se->type,
711 get_valid_blocks(sbi, i, 1));
712 if ((i % 10) == 9 || i == (total_segs - 1))
713 seq_putc(seq, '\n');
714 else
715 seq_putc(seq, ' ');
718 return 0;
721 static int segment_info_open_fs(struct inode *inode, struct file *file)
723 return single_open(file, segment_info_seq_show, PDE_DATA(inode));
726 static const struct file_operations f2fs_seq_segment_info_fops = {
727 .owner = THIS_MODULE,
728 .open = segment_info_open_fs,
729 .read = seq_read,
730 .llseek = seq_lseek,
731 .release = single_release,
734 static void default_options(struct f2fs_sb_info *sbi)
736 /* init some FS parameters */
737 sbi->active_logs = NR_CURSEG_TYPE;
739 set_opt(sbi, BG_GC);
740 set_opt(sbi, INLINE_DATA);
741 set_opt(sbi, EXTENT_CACHE);
743 #ifdef CONFIG_F2FS_FS_XATTR
744 set_opt(sbi, XATTR_USER);
745 #endif
746 #ifdef CONFIG_F2FS_FS_POSIX_ACL
747 set_opt(sbi, POSIX_ACL);
748 #endif
751 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
753 struct f2fs_sb_info *sbi = F2FS_SB(sb);
754 struct f2fs_mount_info org_mount_opt;
755 int err, active_logs;
756 bool need_restart_gc = false;
757 bool need_stop_gc = false;
758 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
760 sync_filesystem(sb);
763 * Save the old mount options in case we
764 * need to restore them.
766 org_mount_opt = sbi->mount_opt;
767 active_logs = sbi->active_logs;
769 sbi->mount_opt.opt = 0;
770 default_options(sbi);
772 /* parse mount options */
773 err = parse_options(sb, data);
774 if (err)
775 goto restore_opts;
778 * Previous and new state of filesystem is RO,
779 * so skip checking GC and FLUSH_MERGE conditions.
781 if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
782 goto skip;
784 /* disallow enable/disable extent_cache dynamically */
785 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
786 err = -EINVAL;
787 f2fs_msg(sbi->sb, KERN_WARNING,
788 "switch extent_cache option is not allowed");
789 goto restore_opts;
793 * We stop the GC thread if FS is mounted as RO
794 * or if background_gc = off is passed in mount
795 * option. Also sync the filesystem.
797 if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
798 if (sbi->gc_thread) {
799 stop_gc_thread(sbi);
800 f2fs_sync_fs(sb, 1);
801 need_restart_gc = true;
803 } else if (!sbi->gc_thread) {
804 err = start_gc_thread(sbi);
805 if (err)
806 goto restore_opts;
807 need_stop_gc = true;
811 * We stop issue flush thread if FS is mounted as RO
812 * or if flush_merge is not passed in mount option.
814 if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
815 destroy_flush_cmd_control(sbi);
816 } else if (!SM_I(sbi)->cmd_control_info) {
817 err = create_flush_cmd_control(sbi);
818 if (err)
819 goto restore_gc;
821 skip:
822 /* Update the POSIXACL Flag */
823 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
824 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
825 return 0;
826 restore_gc:
827 if (need_restart_gc) {
828 if (start_gc_thread(sbi))
829 f2fs_msg(sbi->sb, KERN_WARNING,
830 "background gc thread has stopped");
831 } else if (need_stop_gc) {
832 stop_gc_thread(sbi);
834 restore_opts:
835 sbi->mount_opt = org_mount_opt;
836 sbi->active_logs = active_logs;
837 return err;
840 static struct super_operations f2fs_sops = {
841 .alloc_inode = f2fs_alloc_inode,
842 .drop_inode = f2fs_drop_inode,
843 .destroy_inode = f2fs_destroy_inode,
844 .write_inode = f2fs_write_inode,
845 .dirty_inode = f2fs_dirty_inode,
846 .show_options = f2fs_show_options,
847 .evict_inode = f2fs_evict_inode,
848 .put_super = f2fs_put_super,
849 .sync_fs = f2fs_sync_fs,
850 .freeze_fs = f2fs_freeze,
851 .unfreeze_fs = f2fs_unfreeze,
852 .statfs = f2fs_statfs,
853 .remount_fs = f2fs_remount,
856 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
857 u64 ino, u32 generation)
859 struct f2fs_sb_info *sbi = F2FS_SB(sb);
860 struct inode *inode;
862 if (check_nid_range(sbi, ino))
863 return ERR_PTR(-ESTALE);
866 * f2fs_iget isn't quite right if the inode is currently unallocated!
867 * However f2fs_iget currently does appropriate checks to handle stale
868 * inodes so everything is OK.
870 inode = f2fs_iget(sb, ino);
871 if (IS_ERR(inode))
872 return ERR_CAST(inode);
873 if (unlikely(generation && inode->i_generation != generation)) {
874 /* we didn't find the right inode.. */
875 iput(inode);
876 return ERR_PTR(-ESTALE);
878 return inode;
881 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
882 int fh_len, int fh_type)
884 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
885 f2fs_nfs_get_inode);
888 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
889 int fh_len, int fh_type)
891 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
892 f2fs_nfs_get_inode);
895 static const struct export_operations f2fs_export_ops = {
896 .fh_to_dentry = f2fs_fh_to_dentry,
897 .fh_to_parent = f2fs_fh_to_parent,
898 .get_parent = f2fs_get_parent,
901 static loff_t max_file_size(unsigned bits)
903 loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
904 loff_t leaf_count = ADDRS_PER_BLOCK;
906 /* two direct node blocks */
907 result += (leaf_count * 2);
909 /* two indirect node blocks */
910 leaf_count *= NIDS_PER_BLOCK;
911 result += (leaf_count * 2);
913 /* one double indirect node block */
914 leaf_count *= NIDS_PER_BLOCK;
915 result += leaf_count;
917 result <<= bits;
918 return result;
921 static inline bool sanity_check_area_boundary(struct super_block *sb,
922 struct f2fs_super_block *raw_super)
924 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
925 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
926 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
927 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
928 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
929 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
930 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
931 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
932 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
933 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
934 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
935 u32 segment_count = le32_to_cpu(raw_super->segment_count);
936 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
938 if (segment0_blkaddr != cp_blkaddr) {
939 f2fs_msg(sb, KERN_INFO,
940 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
941 segment0_blkaddr, cp_blkaddr);
942 return true;
945 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
946 sit_blkaddr) {
947 f2fs_msg(sb, KERN_INFO,
948 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
949 cp_blkaddr, sit_blkaddr,
950 segment_count_ckpt << log_blocks_per_seg);
951 return true;
954 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
955 nat_blkaddr) {
956 f2fs_msg(sb, KERN_INFO,
957 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
958 sit_blkaddr, nat_blkaddr,
959 segment_count_sit << log_blocks_per_seg);
960 return true;
963 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
964 ssa_blkaddr) {
965 f2fs_msg(sb, KERN_INFO,
966 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
967 nat_blkaddr, ssa_blkaddr,
968 segment_count_nat << log_blocks_per_seg);
969 return true;
972 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
973 main_blkaddr) {
974 f2fs_msg(sb, KERN_INFO,
975 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
976 ssa_blkaddr, main_blkaddr,
977 segment_count_ssa << log_blocks_per_seg);
978 return true;
981 if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
982 segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
983 f2fs_msg(sb, KERN_INFO,
984 "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
985 main_blkaddr,
986 segment0_blkaddr + (segment_count << log_blocks_per_seg),
987 segment_count_main << log_blocks_per_seg);
988 return true;
991 return false;
994 static int sanity_check_raw_super(struct super_block *sb,
995 struct f2fs_super_block *raw_super)
997 unsigned int blocksize;
999 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
1000 f2fs_msg(sb, KERN_INFO,
1001 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1002 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
1003 return 1;
1006 /* Currently, support only 4KB page cache size */
1007 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
1008 f2fs_msg(sb, KERN_INFO,
1009 "Invalid page_cache_size (%lu), supports only 4KB\n",
1010 PAGE_CACHE_SIZE);
1011 return 1;
1014 /* Currently, support only 4KB block size */
1015 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
1016 if (blocksize != F2FS_BLKSIZE) {
1017 f2fs_msg(sb, KERN_INFO,
1018 "Invalid blocksize (%u), supports only 4KB\n",
1019 blocksize);
1020 return 1;
1023 /* check log blocks per segment */
1024 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
1025 f2fs_msg(sb, KERN_INFO,
1026 "Invalid log blocks per segment (%u)\n",
1027 le32_to_cpu(raw_super->log_blocks_per_seg));
1028 return 1;
1031 /* Currently, support 512/1024/2048/4096 bytes sector size */
1032 if (le32_to_cpu(raw_super->log_sectorsize) >
1033 F2FS_MAX_LOG_SECTOR_SIZE ||
1034 le32_to_cpu(raw_super->log_sectorsize) <
1035 F2FS_MIN_LOG_SECTOR_SIZE) {
1036 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
1037 le32_to_cpu(raw_super->log_sectorsize));
1038 return 1;
1040 if (le32_to_cpu(raw_super->log_sectors_per_block) +
1041 le32_to_cpu(raw_super->log_sectorsize) !=
1042 F2FS_MAX_LOG_SECTOR_SIZE) {
1043 f2fs_msg(sb, KERN_INFO,
1044 "Invalid log sectors per block(%u) log sectorsize(%u)",
1045 le32_to_cpu(raw_super->log_sectors_per_block),
1046 le32_to_cpu(raw_super->log_sectorsize));
1047 return 1;
1050 /* check reserved ino info */
1051 if (le32_to_cpu(raw_super->node_ino) != 1 ||
1052 le32_to_cpu(raw_super->meta_ino) != 2 ||
1053 le32_to_cpu(raw_super->root_ino) != 3) {
1054 f2fs_msg(sb, KERN_INFO,
1055 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1056 le32_to_cpu(raw_super->node_ino),
1057 le32_to_cpu(raw_super->meta_ino),
1058 le32_to_cpu(raw_super->root_ino));
1059 return 1;
1062 if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
1063 f2fs_msg(sb, KERN_INFO,
1064 "Invalid segment count (%u)",
1065 le32_to_cpu(raw_super->segment_count));
1066 return 1;
1069 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1070 if (sanity_check_area_boundary(sb, raw_super))
1071 return 1;
1073 return 0;
1076 static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
1078 unsigned int total, fsmeta;
1079 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1080 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1082 total = le32_to_cpu(raw_super->segment_count);
1083 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
1084 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
1085 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
1086 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
1087 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
1089 if (unlikely(fsmeta >= total))
1090 return 1;
1092 if (unlikely(f2fs_cp_error(sbi))) {
1093 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
1094 return 1;
1096 return 0;
1099 static void init_sb_info(struct f2fs_sb_info *sbi)
1101 struct f2fs_super_block *raw_super = sbi->raw_super;
1102 int i;
1104 sbi->log_sectors_per_block =
1105 le32_to_cpu(raw_super->log_sectors_per_block);
1106 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
1107 sbi->blocksize = 1 << sbi->log_blocksize;
1108 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1109 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
1110 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
1111 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
1112 sbi->total_sections = le32_to_cpu(raw_super->section_count);
1113 sbi->total_node_count =
1114 (le32_to_cpu(raw_super->segment_count_nat) / 2)
1115 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
1116 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
1117 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
1118 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
1119 sbi->cur_victim_sec = NULL_SECNO;
1120 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
1122 for (i = 0; i < NR_COUNT_TYPE; i++)
1123 atomic_set(&sbi->nr_pages[i], 0);
1125 sbi->dir_level = DEF_DIR_LEVEL;
1126 sbi->cp_interval = DEF_CP_INTERVAL;
1127 clear_sbi_flag(sbi, SBI_NEED_FSCK);
1129 INIT_LIST_HEAD(&sbi->s_list);
1130 mutex_init(&sbi->umount_mutex);
1134 * Read f2fs raw super block.
1135 * Because we have two copies of super block, so read the first one at first,
1136 * if the first one is invalid, move to read the second one.
1138 static int read_raw_super_block(struct super_block *sb,
1139 struct f2fs_super_block **raw_super,
1140 struct buffer_head **raw_super_buf,
1141 int *recovery)
1143 int block = 0;
1144 struct buffer_head *buffer;
1145 struct f2fs_super_block *super;
1146 int err = 0;
1148 retry:
1149 buffer = sb_bread(sb, block);
1150 if (!buffer) {
1151 *recovery = 1;
1152 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
1153 block + 1);
1154 if (block == 0) {
1155 block++;
1156 goto retry;
1157 } else {
1158 err = -EIO;
1159 goto out;
1163 super = (struct f2fs_super_block *)
1164 ((char *)(buffer)->b_data + F2FS_SUPER_OFFSET);
1166 /* sanity checking of raw super */
1167 if (sanity_check_raw_super(sb, super)) {
1168 brelse(buffer);
1169 *recovery = 1;
1170 f2fs_msg(sb, KERN_ERR,
1171 "Can't find valid F2FS filesystem in %dth superblock",
1172 block + 1);
1173 if (block == 0) {
1174 block++;
1175 goto retry;
1176 } else {
1177 err = -EINVAL;
1178 goto out;
1182 if (!*raw_super) {
1183 *raw_super_buf = buffer;
1184 *raw_super = super;
1185 } else {
1186 /* already have a valid superblock */
1187 brelse(buffer);
1190 /* check the validity of the second superblock */
1191 if (block == 0) {
1192 block++;
1193 goto retry;
1196 out:
1197 /* No valid superblock */
1198 if (!*raw_super)
1199 return err;
1201 return 0;
1204 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
1206 struct buffer_head *sbh = sbi->raw_super_buf;
1207 sector_t block = sbh->b_blocknr;
1208 int err;
1210 /* write back-up superblock first */
1211 sbh->b_blocknr = block ? 0 : 1;
1212 mark_buffer_dirty(sbh);
1213 err = sync_dirty_buffer(sbh);
1215 sbh->b_blocknr = block;
1217 /* if we are in recovery path, skip writing valid superblock */
1218 if (recover || err)
1219 goto out;
1221 /* write current valid superblock */
1222 mark_buffer_dirty(sbh);
1223 err = sync_dirty_buffer(sbh);
1224 out:
1225 clear_buffer_write_io_error(sbh);
1226 set_buffer_uptodate(sbh);
1227 return err;
1230 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
1232 struct f2fs_sb_info *sbi;
1233 struct f2fs_super_block *raw_super;
1234 struct buffer_head *raw_super_buf;
1235 struct inode *root;
1236 long err;
1237 bool retry = true, need_fsck = false;
1238 char *options = NULL;
1239 int recovery, i;
1241 try_onemore:
1242 err = -EINVAL;
1243 raw_super = NULL;
1244 raw_super_buf = NULL;
1245 recovery = 0;
1247 /* allocate memory for f2fs-specific super block info */
1248 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
1249 if (!sbi)
1250 return -ENOMEM;
1252 /* set a block size */
1253 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
1254 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
1255 goto free_sbi;
1258 err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery);
1259 if (err)
1260 goto free_sbi;
1262 sb->s_fs_info = sbi;
1263 default_options(sbi);
1264 /* parse mount options */
1265 options = kstrdup((const char *)data, GFP_KERNEL);
1266 if (data && !options) {
1267 err = -ENOMEM;
1268 goto free_sb_buf;
1271 err = parse_options(sb, options);
1272 if (err)
1273 goto free_options;
1275 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
1276 sb->s_max_links = F2FS_LINK_MAX;
1277 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
1279 sb->s_op = &f2fs_sops;
1280 sb->s_xattr = f2fs_xattr_handlers;
1281 sb->s_export_op = &f2fs_export_ops;
1282 sb->s_magic = F2FS_SUPER_MAGIC;
1283 sb->s_time_gran = 1;
1284 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
1285 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
1286 memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
1288 /* init f2fs-specific super block info */
1289 sbi->sb = sb;
1290 sbi->raw_super = raw_super;
1291 sbi->raw_super_buf = raw_super_buf;
1292 mutex_init(&sbi->gc_mutex);
1293 mutex_init(&sbi->writepages);
1294 mutex_init(&sbi->cp_mutex);
1295 init_rwsem(&sbi->node_write);
1297 /* disallow all the data/node/meta page writes */
1298 set_sbi_flag(sbi, SBI_POR_DOING);
1299 spin_lock_init(&sbi->stat_lock);
1301 init_rwsem(&sbi->read_io.io_rwsem);
1302 sbi->read_io.sbi = sbi;
1303 sbi->read_io.bio = NULL;
1304 for (i = 0; i < NR_PAGE_TYPE; i++) {
1305 init_rwsem(&sbi->write_io[i].io_rwsem);
1306 sbi->write_io[i].sbi = sbi;
1307 sbi->write_io[i].bio = NULL;
1310 init_rwsem(&sbi->cp_rwsem);
1311 init_waitqueue_head(&sbi->cp_wait);
1312 init_sb_info(sbi);
1314 /* get an inode for meta space */
1315 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
1316 if (IS_ERR(sbi->meta_inode)) {
1317 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
1318 err = PTR_ERR(sbi->meta_inode);
1319 goto free_options;
1322 err = get_valid_checkpoint(sbi);
1323 if (err) {
1324 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
1325 goto free_meta_inode;
1328 /* sanity checking of checkpoint */
1329 err = -EINVAL;
1330 if (sanity_check_ckpt(sbi)) {
1331 f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
1332 goto free_cp;
1335 sbi->total_valid_node_count =
1336 le32_to_cpu(sbi->ckpt->valid_node_count);
1337 sbi->total_valid_inode_count =
1338 le32_to_cpu(sbi->ckpt->valid_inode_count);
1339 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
1340 sbi->total_valid_block_count =
1341 le64_to_cpu(sbi->ckpt->valid_block_count);
1342 sbi->last_valid_block_count = sbi->total_valid_block_count;
1343 sbi->alloc_valid_block_count = 0;
1344 INIT_LIST_HEAD(&sbi->dir_inode_list);
1345 spin_lock_init(&sbi->dir_inode_lock);
1347 init_extent_cache_info(sbi);
1349 init_ino_entry_info(sbi);
1351 /* setup f2fs internal modules */
1352 err = build_segment_manager(sbi);
1353 if (err) {
1354 f2fs_msg(sb, KERN_ERR,
1355 "Failed to initialize F2FS segment manager");
1356 goto free_sm;
1358 err = build_node_manager(sbi);
1359 if (err) {
1360 f2fs_msg(sb, KERN_ERR,
1361 "Failed to initialize F2FS node manager");
1362 goto free_nm;
1365 build_gc_manager(sbi);
1367 /* get an inode for node space */
1368 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
1369 if (IS_ERR(sbi->node_inode)) {
1370 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
1371 err = PTR_ERR(sbi->node_inode);
1372 goto free_nm;
1375 f2fs_join_shrinker(sbi);
1377 /* if there are nt orphan nodes free them */
1378 err = recover_orphan_inodes(sbi);
1379 if (err)
1380 goto free_node_inode;
1382 /* read root inode and dentry */
1383 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
1384 if (IS_ERR(root)) {
1385 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
1386 err = PTR_ERR(root);
1387 goto free_node_inode;
1389 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1390 iput(root);
1391 err = -EINVAL;
1392 goto free_node_inode;
1395 sb->s_root = d_make_root(root); /* allocate root dentry */
1396 if (!sb->s_root) {
1397 err = -ENOMEM;
1398 goto free_root_inode;
1401 err = f2fs_build_stats(sbi);
1402 if (err)
1403 goto free_root_inode;
1405 if (f2fs_proc_root)
1406 sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
1408 if (sbi->s_proc)
1409 proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
1410 &f2fs_seq_segment_info_fops, sb);
1412 sbi->s_kobj.kset = f2fs_kset;
1413 init_completion(&sbi->s_kobj_unregister);
1414 err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
1415 "%s", sb->s_id);
1416 if (err)
1417 goto free_proc;
1419 /* recover fsynced data */
1420 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
1422 * mount should be failed, when device has readonly mode, and
1423 * previous checkpoint was not done by clean system shutdown.
1425 if (bdev_read_only(sb->s_bdev) &&
1426 !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
1427 err = -EROFS;
1428 goto free_kobj;
1431 if (need_fsck)
1432 set_sbi_flag(sbi, SBI_NEED_FSCK);
1434 err = recover_fsync_data(sbi);
1435 if (err) {
1436 need_fsck = true;
1437 f2fs_msg(sb, KERN_ERR,
1438 "Cannot recover all fsync data errno=%ld", err);
1439 goto free_kobj;
1442 /* recover_fsync_data() cleared this already */
1443 clear_sbi_flag(sbi, SBI_POR_DOING);
1446 * If filesystem is not mounted as read-only then
1447 * do start the gc_thread.
1449 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
1450 /* After POR, we can run background GC thread.*/
1451 err = start_gc_thread(sbi);
1452 if (err)
1453 goto free_kobj;
1455 kfree(options);
1457 /* recover broken superblock */
1458 if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
1459 f2fs_msg(sb, KERN_INFO, "Recover invalid superblock");
1460 f2fs_commit_super(sbi, true);
1463 sbi->cp_expires = round_jiffies_up(jiffies);
1465 return 0;
1467 free_kobj:
1468 kobject_del(&sbi->s_kobj);
1469 free_proc:
1470 if (sbi->s_proc) {
1471 remove_proc_entry("segment_info", sbi->s_proc);
1472 remove_proc_entry(sb->s_id, f2fs_proc_root);
1474 f2fs_destroy_stats(sbi);
1475 free_root_inode:
1476 dput(sb->s_root);
1477 sb->s_root = NULL;
1478 free_node_inode:
1479 mutex_lock(&sbi->umount_mutex);
1480 f2fs_leave_shrinker(sbi);
1481 iput(sbi->node_inode);
1482 mutex_unlock(&sbi->umount_mutex);
1483 free_nm:
1484 destroy_node_manager(sbi);
1485 free_sm:
1486 destroy_segment_manager(sbi);
1487 free_cp:
1488 kfree(sbi->ckpt);
1489 free_meta_inode:
1490 make_bad_inode(sbi->meta_inode);
1491 iput(sbi->meta_inode);
1492 free_options:
1493 kfree(options);
1494 free_sb_buf:
1495 brelse(raw_super_buf);
1496 free_sbi:
1497 kfree(sbi);
1499 /* give only one another chance */
1500 if (retry) {
1501 retry = false;
1502 shrink_dcache_sb(sb);
1503 goto try_onemore;
1505 return err;
1508 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
1509 const char *dev_name, void *data)
1511 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
1514 static void kill_f2fs_super(struct super_block *sb)
1516 if (sb->s_root)
1517 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
1518 kill_block_super(sb);
1521 static struct file_system_type f2fs_fs_type = {
1522 .owner = THIS_MODULE,
1523 .name = "f2fs",
1524 .mount = f2fs_mount,
1525 .kill_sb = kill_f2fs_super,
1526 .fs_flags = FS_REQUIRES_DEV,
1528 MODULE_ALIAS_FS("f2fs");
1530 static int __init init_inodecache(void)
1532 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
1533 sizeof(struct f2fs_inode_info));
1534 if (!f2fs_inode_cachep)
1535 return -ENOMEM;
1536 return 0;
1539 static void destroy_inodecache(void)
1542 * Make sure all delayed rcu free inodes are flushed before we
1543 * destroy cache.
1545 rcu_barrier();
1546 kmem_cache_destroy(f2fs_inode_cachep);
1549 static int __init init_f2fs_fs(void)
1551 int err;
1553 f2fs_build_trace_ios();
1555 err = init_inodecache();
1556 if (err)
1557 goto fail;
1558 err = create_node_manager_caches();
1559 if (err)
1560 goto free_inodecache;
1561 err = create_segment_manager_caches();
1562 if (err)
1563 goto free_node_manager_caches;
1564 err = create_checkpoint_caches();
1565 if (err)
1566 goto free_segment_manager_caches;
1567 err = create_extent_cache();
1568 if (err)
1569 goto free_checkpoint_caches;
1570 f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
1571 if (!f2fs_kset) {
1572 err = -ENOMEM;
1573 goto free_extent_cache;
1575 err = f2fs_init_crypto();
1576 if (err)
1577 goto free_kset;
1579 err = register_shrinker(&f2fs_shrinker_info);
1580 if (err)
1581 goto free_crypto;
1583 err = register_filesystem(&f2fs_fs_type);
1584 if (err)
1585 goto free_shrinker;
1586 f2fs_create_root_stats();
1587 f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
1588 return 0;
1590 free_shrinker:
1591 unregister_shrinker(&f2fs_shrinker_info);
1592 free_crypto:
1593 f2fs_exit_crypto();
1594 free_kset:
1595 kset_unregister(f2fs_kset);
1596 free_extent_cache:
1597 destroy_extent_cache();
1598 free_checkpoint_caches:
1599 destroy_checkpoint_caches();
1600 free_segment_manager_caches:
1601 destroy_segment_manager_caches();
1602 free_node_manager_caches:
1603 destroy_node_manager_caches();
1604 free_inodecache:
1605 destroy_inodecache();
1606 fail:
1607 return err;
1610 static void __exit exit_f2fs_fs(void)
1612 remove_proc_entry("fs/f2fs", NULL);
1613 f2fs_destroy_root_stats();
1614 unregister_shrinker(&f2fs_shrinker_info);
1615 unregister_filesystem(&f2fs_fs_type);
1616 f2fs_exit_crypto();
1617 destroy_extent_cache();
1618 destroy_checkpoint_caches();
1619 destroy_segment_manager_caches();
1620 destroy_node_manager_caches();
1621 destroy_inodecache();
1622 kset_unregister(f2fs_kset);
1623 f2fs_destroy_trace_ios();
1626 module_init(init_f2fs_fs)
1627 module_exit(exit_f2fs_fs)
1629 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
1630 MODULE_DESCRIPTION("Flash Friendly File System");
1631 MODULE_LICENSE("GPL");