4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
38 static struct proc_dir_entry
*f2fs_proc_root
;
39 static struct kmem_cache
*f2fs_inode_cachep
;
40 static struct kset
*f2fs_kset
;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
44 char *fault_name
[FAULT_MAX
] = {
45 [FAULT_KMALLOC
] = "kmalloc",
46 [FAULT_PAGE_ALLOC
] = "page alloc",
47 [FAULT_ALLOC_NID
] = "alloc nid",
48 [FAULT_ORPHAN
] = "orphan",
49 [FAULT_BLOCK
] = "no more block",
50 [FAULT_DIR_DEPTH
] = "too big dir depth",
51 [FAULT_EVICT_INODE
] = "evict_inode fail",
52 [FAULT_IO
] = "IO error",
53 [FAULT_CHECKPOINT
] = "checkpoint error",
56 static void f2fs_build_fault_attr(struct f2fs_sb_info
*sbi
,
59 struct f2fs_fault_info
*ffi
= &sbi
->fault_info
;
62 atomic_set(&ffi
->inject_ops
, 0);
63 ffi
->inject_rate
= rate
;
64 ffi
->inject_type
= (1 << FAULT_MAX
) - 1;
66 memset(ffi
, 0, sizeof(struct f2fs_fault_info
));
71 /* f2fs-wide shrinker description */
72 static struct shrinker f2fs_shrinker_info
= {
73 .scan_objects
= f2fs_shrink_scan
,
74 .count_objects
= f2fs_shrink_count
,
75 .seeks
= DEFAULT_SEEKS
,
80 Opt_disable_roll_forward
,
90 Opt_disable_ext_identify
,
110 static match_table_t f2fs_tokens
= {
111 {Opt_gc_background
, "background_gc=%s"},
112 {Opt_disable_roll_forward
, "disable_roll_forward"},
113 {Opt_norecovery
, "norecovery"},
114 {Opt_discard
, "discard"},
115 {Opt_nodiscard
, "nodiscard"},
116 {Opt_noheap
, "no_heap"},
117 {Opt_user_xattr
, "user_xattr"},
118 {Opt_nouser_xattr
, "nouser_xattr"},
120 {Opt_noacl
, "noacl"},
121 {Opt_active_logs
, "active_logs=%u"},
122 {Opt_disable_ext_identify
, "disable_ext_identify"},
123 {Opt_inline_xattr
, "inline_xattr"},
124 {Opt_inline_data
, "inline_data"},
125 {Opt_inline_dentry
, "inline_dentry"},
126 {Opt_noinline_dentry
, "noinline_dentry"},
127 {Opt_flush_merge
, "flush_merge"},
128 {Opt_noflush_merge
, "noflush_merge"},
129 {Opt_nobarrier
, "nobarrier"},
130 {Opt_fastboot
, "fastboot"},
131 {Opt_extent_cache
, "extent_cache"},
132 {Opt_noextent_cache
, "noextent_cache"},
133 {Opt_noinline_data
, "noinline_data"},
134 {Opt_data_flush
, "data_flush"},
135 {Opt_mode
, "mode=%s"},
136 {Opt_fault_injection
, "fault_injection=%u"},
137 {Opt_lazytime
, "lazytime"},
138 {Opt_nolazytime
, "nolazytime"},
142 /* Sysfs support for f2fs */
144 GC_THREAD
, /* struct f2fs_gc_thread */
145 SM_INFO
, /* struct f2fs_sm_info */
146 NM_INFO
, /* struct f2fs_nm_info */
147 F2FS_SBI
, /* struct f2fs_sb_info */
148 #ifdef CONFIG_F2FS_FAULT_INJECTION
149 FAULT_INFO_RATE
, /* struct f2fs_fault_info */
150 FAULT_INFO_TYPE
, /* struct f2fs_fault_info */
155 struct attribute attr
;
156 ssize_t (*show
)(struct f2fs_attr
*, struct f2fs_sb_info
*, char *);
157 ssize_t (*store
)(struct f2fs_attr
*, struct f2fs_sb_info
*,
158 const char *, size_t);
163 static unsigned char *__struct_ptr(struct f2fs_sb_info
*sbi
, int struct_type
)
165 if (struct_type
== GC_THREAD
)
166 return (unsigned char *)sbi
->gc_thread
;
167 else if (struct_type
== SM_INFO
)
168 return (unsigned char *)SM_I(sbi
);
169 else if (struct_type
== NM_INFO
)
170 return (unsigned char *)NM_I(sbi
);
171 else if (struct_type
== F2FS_SBI
)
172 return (unsigned char *)sbi
;
173 #ifdef CONFIG_F2FS_FAULT_INJECTION
174 else if (struct_type
== FAULT_INFO_RATE
||
175 struct_type
== FAULT_INFO_TYPE
)
176 return (unsigned char *)&sbi
->fault_info
;
181 static ssize_t
lifetime_write_kbytes_show(struct f2fs_attr
*a
,
182 struct f2fs_sb_info
*sbi
, char *buf
)
184 struct super_block
*sb
= sbi
->sb
;
186 if (!sb
->s_bdev
->bd_part
)
187 return snprintf(buf
, PAGE_SIZE
, "0\n");
189 return snprintf(buf
, PAGE_SIZE
, "%llu\n",
190 (unsigned long long)(sbi
->kbytes_written
+
191 BD_PART_WRITTEN(sbi
)));
194 static ssize_t
f2fs_sbi_show(struct f2fs_attr
*a
,
195 struct f2fs_sb_info
*sbi
, char *buf
)
197 unsigned char *ptr
= NULL
;
200 ptr
= __struct_ptr(sbi
, a
->struct_type
);
204 ui
= (unsigned int *)(ptr
+ a
->offset
);
206 return snprintf(buf
, PAGE_SIZE
, "%u\n", *ui
);
209 static ssize_t
f2fs_sbi_store(struct f2fs_attr
*a
,
210 struct f2fs_sb_info
*sbi
,
211 const char *buf
, size_t count
)
218 ptr
= __struct_ptr(sbi
, a
->struct_type
);
222 ui
= (unsigned int *)(ptr
+ a
->offset
);
224 ret
= kstrtoul(skip_spaces(buf
), 0, &t
);
227 #ifdef CONFIG_F2FS_FAULT_INJECTION
228 if (a
->struct_type
== FAULT_INFO_TYPE
&& t
>= (1 << FAULT_MAX
))
235 static ssize_t
f2fs_attr_show(struct kobject
*kobj
,
236 struct attribute
*attr
, char *buf
)
238 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
240 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
242 return a
->show
? a
->show(a
, sbi
, buf
) : 0;
245 static ssize_t
f2fs_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
246 const char *buf
, size_t len
)
248 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
250 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
252 return a
->store
? a
->store(a
, sbi
, buf
, len
) : 0;
255 static void f2fs_sb_release(struct kobject
*kobj
)
257 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
259 complete(&sbi
->s_kobj_unregister
);
262 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
263 static struct f2fs_attr f2fs_attr_##_name = { \
264 .attr = {.name = __stringify(_name), .mode = _mode }, \
267 .struct_type = _struct_type, \
271 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
272 F2FS_ATTR_OFFSET(struct_type, name, 0644, \
273 f2fs_sbi_show, f2fs_sbi_store, \
274 offsetof(struct struct_name, elname))
276 #define F2FS_GENERAL_RO_ATTR(name) \
277 static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
279 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_min_sleep_time
, min_sleep_time
);
280 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_max_sleep_time
, max_sleep_time
);
281 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_no_gc_sleep_time
, no_gc_sleep_time
);
282 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_idle
, gc_idle
);
283 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, reclaim_segments
, rec_prefree_segments
);
284 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, max_small_discards
, max_discards
);
285 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, batched_trim_sections
, trim_sections
);
286 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, ipu_policy
, ipu_policy
);
287 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_ipu_util
, min_ipu_util
);
288 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_fsync_blocks
, min_fsync_blocks
);
289 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ram_thresh
, ram_thresh
);
290 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ra_nid_pages
, ra_nid_pages
);
291 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, dirty_nats_ratio
, dirty_nats_ratio
);
292 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, max_victim_search
, max_victim_search
);
293 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, dir_level
, dir_level
);
294 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, cp_interval
, interval_time
[CP_TIME
]);
295 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, idle_interval
, interval_time
[REQ_TIME
]);
296 #ifdef CONFIG_F2FS_FAULT_INJECTION
297 F2FS_RW_ATTR(FAULT_INFO_RATE
, f2fs_fault_info
, inject_rate
, inject_rate
);
298 F2FS_RW_ATTR(FAULT_INFO_TYPE
, f2fs_fault_info
, inject_type
, inject_type
);
300 F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes
);
302 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
303 static struct attribute
*f2fs_attrs
[] = {
304 ATTR_LIST(gc_min_sleep_time
),
305 ATTR_LIST(gc_max_sleep_time
),
306 ATTR_LIST(gc_no_gc_sleep_time
),
308 ATTR_LIST(reclaim_segments
),
309 ATTR_LIST(max_small_discards
),
310 ATTR_LIST(batched_trim_sections
),
311 ATTR_LIST(ipu_policy
),
312 ATTR_LIST(min_ipu_util
),
313 ATTR_LIST(min_fsync_blocks
),
314 ATTR_LIST(max_victim_search
),
315 ATTR_LIST(dir_level
),
316 ATTR_LIST(ram_thresh
),
317 ATTR_LIST(ra_nid_pages
),
318 ATTR_LIST(dirty_nats_ratio
),
319 ATTR_LIST(cp_interval
),
320 ATTR_LIST(idle_interval
),
321 #ifdef CONFIG_F2FS_FAULT_INJECTION
322 ATTR_LIST(inject_rate
),
323 ATTR_LIST(inject_type
),
325 ATTR_LIST(lifetime_write_kbytes
),
329 static const struct sysfs_ops f2fs_attr_ops
= {
330 .show
= f2fs_attr_show
,
331 .store
= f2fs_attr_store
,
334 static struct kobj_type f2fs_ktype
= {
335 .default_attrs
= f2fs_attrs
,
336 .sysfs_ops
= &f2fs_attr_ops
,
337 .release
= f2fs_sb_release
,
340 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
342 struct va_format vaf
;
348 printk("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
352 static void init_once(void *foo
)
354 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
356 inode_init_once(&fi
->vfs_inode
);
359 static int parse_options(struct super_block
*sb
, char *options
)
361 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
362 struct request_queue
*q
;
363 substring_t args
[MAX_OPT_ARGS
];
370 while ((p
= strsep(&options
, ",")) != NULL
) {
375 * Initialize args struct so we know whether arg was
376 * found; some options take optional arguments.
378 args
[0].to
= args
[0].from
= NULL
;
379 token
= match_token(p
, f2fs_tokens
, args
);
382 case Opt_gc_background
:
383 name
= match_strdup(&args
[0]);
387 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
389 clear_opt(sbi
, FORCE_FG_GC
);
390 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
391 clear_opt(sbi
, BG_GC
);
392 clear_opt(sbi
, FORCE_FG_GC
);
393 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
395 set_opt(sbi
, FORCE_FG_GC
);
402 case Opt_disable_roll_forward
:
403 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
406 /* this option mounts f2fs with ro */
407 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
408 if (!f2fs_readonly(sb
))
412 q
= bdev_get_queue(sb
->s_bdev
);
413 if (blk_queue_discard(q
)) {
414 set_opt(sbi
, DISCARD
);
415 } else if (!f2fs_sb_mounted_blkzoned(sb
)) {
416 f2fs_msg(sb
, KERN_WARNING
,
417 "mounting with \"discard\" option, but "
418 "the device does not support discard");
422 if (f2fs_sb_mounted_blkzoned(sb
)) {
423 f2fs_msg(sb
, KERN_WARNING
,
424 "discard is required for zoned block devices");
427 clear_opt(sbi
, DISCARD
);
430 set_opt(sbi
, NOHEAP
);
432 #ifdef CONFIG_F2FS_FS_XATTR
434 set_opt(sbi
, XATTR_USER
);
436 case Opt_nouser_xattr
:
437 clear_opt(sbi
, XATTR_USER
);
439 case Opt_inline_xattr
:
440 set_opt(sbi
, INLINE_XATTR
);
444 f2fs_msg(sb
, KERN_INFO
,
445 "user_xattr options not supported");
447 case Opt_nouser_xattr
:
448 f2fs_msg(sb
, KERN_INFO
,
449 "nouser_xattr options not supported");
451 case Opt_inline_xattr
:
452 f2fs_msg(sb
, KERN_INFO
,
453 "inline_xattr options not supported");
456 #ifdef CONFIG_F2FS_FS_POSIX_ACL
458 set_opt(sbi
, POSIX_ACL
);
461 clear_opt(sbi
, POSIX_ACL
);
465 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
468 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
471 case Opt_active_logs
:
472 if (args
->from
&& match_int(args
, &arg
))
474 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
476 sbi
->active_logs
= arg
;
478 case Opt_disable_ext_identify
:
479 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
481 case Opt_inline_data
:
482 set_opt(sbi
, INLINE_DATA
);
484 case Opt_inline_dentry
:
485 set_opt(sbi
, INLINE_DENTRY
);
487 case Opt_noinline_dentry
:
488 clear_opt(sbi
, INLINE_DENTRY
);
490 case Opt_flush_merge
:
491 set_opt(sbi
, FLUSH_MERGE
);
493 case Opt_noflush_merge
:
494 clear_opt(sbi
, FLUSH_MERGE
);
497 set_opt(sbi
, NOBARRIER
);
500 set_opt(sbi
, FASTBOOT
);
502 case Opt_extent_cache
:
503 set_opt(sbi
, EXTENT_CACHE
);
505 case Opt_noextent_cache
:
506 clear_opt(sbi
, EXTENT_CACHE
);
508 case Opt_noinline_data
:
509 clear_opt(sbi
, INLINE_DATA
);
512 set_opt(sbi
, DATA_FLUSH
);
515 name
= match_strdup(&args
[0]);
519 if (strlen(name
) == 8 &&
520 !strncmp(name
, "adaptive", 8)) {
521 if (f2fs_sb_mounted_blkzoned(sb
)) {
522 f2fs_msg(sb
, KERN_WARNING
,
523 "adaptive mode is not allowed with "
524 "zoned block device feature");
528 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
529 } else if (strlen(name
) == 3 &&
530 !strncmp(name
, "lfs", 3)) {
531 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
538 case Opt_fault_injection
:
539 if (args
->from
&& match_int(args
, &arg
))
541 #ifdef CONFIG_F2FS_FAULT_INJECTION
542 f2fs_build_fault_attr(sbi
, arg
);
544 f2fs_msg(sb
, KERN_INFO
,
545 "FAULT_INJECTION was not selected");
549 sb
->s_flags
|= MS_LAZYTIME
;
552 sb
->s_flags
&= ~MS_LAZYTIME
;
555 f2fs_msg(sb
, KERN_ERR
,
556 "Unrecognized mount option \"%s\" or missing value",
564 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
566 struct f2fs_inode_info
*fi
;
568 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
572 init_once((void *) fi
);
574 /* Initialize f2fs-specific inode info */
575 fi
->vfs_inode
.i_version
= 1;
576 atomic_set(&fi
->dirty_pages
, 0);
577 fi
->i_current_depth
= 1;
579 init_rwsem(&fi
->i_sem
);
580 INIT_LIST_HEAD(&fi
->dirty_list
);
581 INIT_LIST_HEAD(&fi
->gdirty_list
);
582 INIT_LIST_HEAD(&fi
->inmem_pages
);
583 mutex_init(&fi
->inmem_lock
);
584 init_rwsem(&fi
->dio_rwsem
[READ
]);
585 init_rwsem(&fi
->dio_rwsem
[WRITE
]);
587 /* Will be used by directory only */
588 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
589 return &fi
->vfs_inode
;
592 static int f2fs_drop_inode(struct inode
*inode
)
595 * This is to avoid a deadlock condition like below.
596 * writeback_single_inode(inode)
597 * - f2fs_write_data_page
598 * - f2fs_gc -> iput -> evict
599 * - inode_wait_for_writeback(inode)
601 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
602 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
603 /* to avoid evict_inode call simultaneously */
604 atomic_inc(&inode
->i_count
);
605 spin_unlock(&inode
->i_lock
);
607 /* some remained atomic pages should discarded */
608 if (f2fs_is_atomic_file(inode
))
609 drop_inmem_pages(inode
);
611 /* should remain fi->extent_tree for writepage */
612 f2fs_destroy_extent_node(inode
);
614 sb_start_intwrite(inode
->i_sb
);
615 f2fs_i_size_write(inode
, 0);
617 if (F2FS_HAS_BLOCKS(inode
))
618 f2fs_truncate(inode
);
620 sb_end_intwrite(inode
->i_sb
);
622 fscrypt_put_encryption_info(inode
, NULL
);
623 spin_lock(&inode
->i_lock
);
624 atomic_dec(&inode
->i_count
);
629 return generic_drop_inode(inode
);
632 int f2fs_inode_dirtied(struct inode
*inode
, bool sync
)
634 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
637 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
638 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
641 set_inode_flag(inode
, FI_DIRTY_INODE
);
642 stat_inc_dirty_inode(sbi
, DIRTY_META
);
644 if (sync
&& list_empty(&F2FS_I(inode
)->gdirty_list
)) {
645 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
646 &sbi
->inode_list
[DIRTY_META
]);
647 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
649 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
653 void f2fs_inode_synced(struct inode
*inode
)
655 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
657 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
658 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
659 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
662 if (!list_empty(&F2FS_I(inode
)->gdirty_list
)) {
663 list_del_init(&F2FS_I(inode
)->gdirty_list
);
664 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
666 clear_inode_flag(inode
, FI_DIRTY_INODE
);
667 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
668 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
669 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
673 * f2fs_dirty_inode() is called from __mark_inode_dirty()
675 * We should call set_dirty_inode to write the dirty inode through write_inode.
677 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
679 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
681 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
682 inode
->i_ino
== F2FS_META_INO(sbi
))
685 if (flags
== I_DIRTY_TIME
)
688 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
689 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
691 f2fs_inode_dirtied(inode
, false);
694 static void f2fs_i_callback(struct rcu_head
*head
)
696 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
697 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
700 static void f2fs_destroy_inode(struct inode
*inode
)
702 call_rcu(&inode
->i_rcu
, f2fs_i_callback
);
705 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
707 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
708 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
711 static void destroy_device_list(struct f2fs_sb_info
*sbi
)
715 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
716 blkdev_put(FDEV(i
).bdev
, FMODE_EXCL
);
717 #ifdef CONFIG_BLK_DEV_ZONED
718 kfree(FDEV(i
).blkz_type
);
724 static void f2fs_put_super(struct super_block
*sb
)
726 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
729 remove_proc_entry("segment_info", sbi
->s_proc
);
730 remove_proc_entry("segment_bits", sbi
->s_proc
);
731 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
733 kobject_del(&sbi
->s_kobj
);
737 /* prevent remaining shrinker jobs */
738 mutex_lock(&sbi
->umount_mutex
);
741 * We don't need to do checkpoint when superblock is clean.
742 * But, the previous checkpoint was not done by umount, it needs to do
743 * clean checkpoint again.
745 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
746 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
747 struct cp_control cpc
= {
750 write_checkpoint(sbi
, &cpc
);
753 /* write_checkpoint can update stat informaion */
754 f2fs_destroy_stats(sbi
);
757 * normally superblock is clean, so we need to release this.
758 * In addition, EIO will skip do checkpoint, we need this as well.
760 release_ino_entry(sbi
, true);
762 f2fs_leave_shrinker(sbi
);
763 mutex_unlock(&sbi
->umount_mutex
);
765 /* our cp_error case, we can wait for any writeback page */
766 f2fs_flush_merged_bios(sbi
);
768 iput(sbi
->node_inode
);
769 iput(sbi
->meta_inode
);
771 /* destroy f2fs internal modules */
772 destroy_node_manager(sbi
);
773 destroy_segment_manager(sbi
);
776 kobject_put(&sbi
->s_kobj
);
777 wait_for_completion(&sbi
->s_kobj_unregister
);
779 sb
->s_fs_info
= NULL
;
780 if (sbi
->s_chksum_driver
)
781 crypto_free_shash(sbi
->s_chksum_driver
);
782 kfree(sbi
->raw_super
);
784 destroy_device_list(sbi
);
786 destroy_percpu_info(sbi
);
790 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
792 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
795 trace_f2fs_sync_fs(sb
, sync
);
798 struct cp_control cpc
;
800 cpc
.reason
= __get_cp_reason(sbi
);
802 mutex_lock(&sbi
->gc_mutex
);
803 err
= write_checkpoint(sbi
, &cpc
);
804 mutex_unlock(&sbi
->gc_mutex
);
806 f2fs_trace_ios(NULL
, 1);
811 static int f2fs_freeze(struct super_block
*sb
)
813 if (f2fs_readonly(sb
))
816 /* IO error happened before */
817 if (unlikely(f2fs_cp_error(F2FS_SB(sb
))))
820 /* must be clean, since sync_filesystem() was already called */
821 if (is_sbi_flag_set(F2FS_SB(sb
), SBI_IS_DIRTY
))
826 static int f2fs_unfreeze(struct super_block
*sb
)
831 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
833 struct super_block
*sb
= dentry
->d_sb
;
834 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
835 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
836 block_t total_count
, user_block_count
, start_count
, ovp_count
;
838 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
839 user_block_count
= sbi
->user_block_count
;
840 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
841 ovp_count
= SM_I(sbi
)->ovp_segments
<< sbi
->log_blocks_per_seg
;
842 buf
->f_type
= F2FS_SUPER_MAGIC
;
843 buf
->f_bsize
= sbi
->blocksize
;
845 buf
->f_blocks
= total_count
- start_count
;
846 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) + ovp_count
;
847 buf
->f_bavail
= user_block_count
- valid_user_blocks(sbi
);
849 buf
->f_files
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
850 buf
->f_ffree
= min(buf
->f_files
- valid_node_count(sbi
),
853 buf
->f_namelen
= F2FS_NAME_LEN
;
854 buf
->f_fsid
.val
[0] = (u32
)id
;
855 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
860 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
862 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
864 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
865 if (test_opt(sbi
, FORCE_FG_GC
))
866 seq_printf(seq
, ",background_gc=%s", "sync");
868 seq_printf(seq
, ",background_gc=%s", "on");
870 seq_printf(seq
, ",background_gc=%s", "off");
872 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
873 seq_puts(seq
, ",disable_roll_forward");
874 if (test_opt(sbi
, DISCARD
))
875 seq_puts(seq
, ",discard");
876 if (test_opt(sbi
, NOHEAP
))
877 seq_puts(seq
, ",no_heap_alloc");
878 #ifdef CONFIG_F2FS_FS_XATTR
879 if (test_opt(sbi
, XATTR_USER
))
880 seq_puts(seq
, ",user_xattr");
882 seq_puts(seq
, ",nouser_xattr");
883 if (test_opt(sbi
, INLINE_XATTR
))
884 seq_puts(seq
, ",inline_xattr");
886 #ifdef CONFIG_F2FS_FS_POSIX_ACL
887 if (test_opt(sbi
, POSIX_ACL
))
888 seq_puts(seq
, ",acl");
890 seq_puts(seq
, ",noacl");
892 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
893 seq_puts(seq
, ",disable_ext_identify");
894 if (test_opt(sbi
, INLINE_DATA
))
895 seq_puts(seq
, ",inline_data");
897 seq_puts(seq
, ",noinline_data");
898 if (test_opt(sbi
, INLINE_DENTRY
))
899 seq_puts(seq
, ",inline_dentry");
901 seq_puts(seq
, ",noinline_dentry");
902 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
903 seq_puts(seq
, ",flush_merge");
904 if (test_opt(sbi
, NOBARRIER
))
905 seq_puts(seq
, ",nobarrier");
906 if (test_opt(sbi
, FASTBOOT
))
907 seq_puts(seq
, ",fastboot");
908 if (test_opt(sbi
, EXTENT_CACHE
))
909 seq_puts(seq
, ",extent_cache");
911 seq_puts(seq
, ",noextent_cache");
912 if (test_opt(sbi
, DATA_FLUSH
))
913 seq_puts(seq
, ",data_flush");
915 seq_puts(seq
, ",mode=");
916 if (test_opt(sbi
, ADAPTIVE
))
917 seq_puts(seq
, "adaptive");
918 else if (test_opt(sbi
, LFS
))
919 seq_puts(seq
, "lfs");
920 seq_printf(seq
, ",active_logs=%u", sbi
->active_logs
);
925 static int segment_info_seq_show(struct seq_file
*seq
, void *offset
)
927 struct super_block
*sb
= seq
->private;
928 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
929 unsigned int total_segs
=
930 le32_to_cpu(sbi
->raw_super
->segment_count_main
);
933 seq_puts(seq
, "format: segment_type|valid_blocks\n"
934 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
936 for (i
= 0; i
< total_segs
; i
++) {
937 struct seg_entry
*se
= get_seg_entry(sbi
, i
);
940 seq_printf(seq
, "%-10d", i
);
941 seq_printf(seq
, "%d|%-3u", se
->type
,
942 get_valid_blocks(sbi
, i
, 1));
943 if ((i
% 10) == 9 || i
== (total_segs
- 1))
952 static int segment_bits_seq_show(struct seq_file
*seq
, void *offset
)
954 struct super_block
*sb
= seq
->private;
955 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
956 unsigned int total_segs
=
957 le32_to_cpu(sbi
->raw_super
->segment_count_main
);
960 seq_puts(seq
, "format: segment_type|valid_blocks|bitmaps\n"
961 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
963 for (i
= 0; i
< total_segs
; i
++) {
964 struct seg_entry
*se
= get_seg_entry(sbi
, i
);
966 seq_printf(seq
, "%-10d", i
);
967 seq_printf(seq
, "%d|%-3u|", se
->type
,
968 get_valid_blocks(sbi
, i
, 1));
969 for (j
= 0; j
< SIT_VBLOCK_MAP_SIZE
; j
++)
970 seq_printf(seq
, " %.2x", se
->cur_valid_map
[j
]);
976 #define F2FS_PROC_FILE_DEF(_name) \
977 static int _name##_open_fs(struct inode *inode, struct file *file) \
979 return single_open(file, _name##_seq_show, PDE_DATA(inode)); \
982 static const struct file_operations f2fs_seq_##_name##_fops = { \
983 .open = _name##_open_fs, \
985 .llseek = seq_lseek, \
986 .release = single_release, \
989 F2FS_PROC_FILE_DEF(segment_info
);
990 F2FS_PROC_FILE_DEF(segment_bits
);
992 static void default_options(struct f2fs_sb_info
*sbi
)
994 /* init some FS parameters */
995 sbi
->active_logs
= NR_CURSEG_TYPE
;
998 set_opt(sbi
, INLINE_DATA
);
999 set_opt(sbi
, INLINE_DENTRY
);
1000 set_opt(sbi
, EXTENT_CACHE
);
1001 sbi
->sb
->s_flags
|= MS_LAZYTIME
;
1002 set_opt(sbi
, FLUSH_MERGE
);
1003 if (f2fs_sb_mounted_blkzoned(sbi
->sb
)) {
1004 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
1005 set_opt(sbi
, DISCARD
);
1007 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
1010 #ifdef CONFIG_F2FS_FS_XATTR
1011 set_opt(sbi
, XATTR_USER
);
1013 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1014 set_opt(sbi
, POSIX_ACL
);
1017 #ifdef CONFIG_F2FS_FAULT_INJECTION
1018 f2fs_build_fault_attr(sbi
, 0);
1022 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
1024 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1025 struct f2fs_mount_info org_mount_opt
;
1026 int err
, active_logs
;
1027 bool need_restart_gc
= false;
1028 bool need_stop_gc
= false;
1029 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
1030 #ifdef CONFIG_F2FS_FAULT_INJECTION
1031 struct f2fs_fault_info ffi
= sbi
->fault_info
;
1035 * Save the old mount options in case we
1036 * need to restore them.
1038 org_mount_opt
= sbi
->mount_opt
;
1039 active_logs
= sbi
->active_logs
;
1041 /* recover superblocks we couldn't write due to previous RO mount */
1042 if (!(*flags
& MS_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
1043 err
= f2fs_commit_super(sbi
, false);
1044 f2fs_msg(sb
, KERN_INFO
,
1045 "Try to recover all the superblocks, ret: %d", err
);
1047 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1050 sbi
->mount_opt
.opt
= 0;
1051 default_options(sbi
);
1053 /* parse mount options */
1054 err
= parse_options(sb
, data
);
1059 * Previous and new state of filesystem is RO,
1060 * so skip checking GC and FLUSH_MERGE conditions.
1062 if (f2fs_readonly(sb
) && (*flags
& MS_RDONLY
))
1065 /* disallow enable/disable extent_cache dynamically */
1066 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
1068 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1069 "switch extent_cache option is not allowed");
1074 * We stop the GC thread if FS is mounted as RO
1075 * or if background_gc = off is passed in mount
1076 * option. Also sync the filesystem.
1078 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
1079 if (sbi
->gc_thread
) {
1080 stop_gc_thread(sbi
);
1081 need_restart_gc
= true;
1083 } else if (!sbi
->gc_thread
) {
1084 err
= start_gc_thread(sbi
);
1087 need_stop_gc
= true;
1090 if (*flags
& MS_RDONLY
) {
1091 writeback_inodes_sb(sb
, WB_REASON_SYNC
);
1094 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1095 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
1096 f2fs_sync_fs(sb
, 1);
1097 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
1101 * We stop issue flush thread if FS is mounted as RO
1102 * or if flush_merge is not passed in mount option.
1104 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
1105 clear_opt(sbi
, FLUSH_MERGE
);
1106 destroy_flush_cmd_control(sbi
, false);
1108 err
= create_flush_cmd_control(sbi
);
1113 /* Update the POSIXACL Flag */
1114 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
1115 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
1119 if (need_restart_gc
) {
1120 if (start_gc_thread(sbi
))
1121 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1122 "background gc thread has stopped");
1123 } else if (need_stop_gc
) {
1124 stop_gc_thread(sbi
);
1127 sbi
->mount_opt
= org_mount_opt
;
1128 sbi
->active_logs
= active_logs
;
1129 #ifdef CONFIG_F2FS_FAULT_INJECTION
1130 sbi
->fault_info
= ffi
;
1135 static struct super_operations f2fs_sops
= {
1136 .alloc_inode
= f2fs_alloc_inode
,
1137 .drop_inode
= f2fs_drop_inode
,
1138 .destroy_inode
= f2fs_destroy_inode
,
1139 .write_inode
= f2fs_write_inode
,
1140 .dirty_inode
= f2fs_dirty_inode
,
1141 .show_options
= f2fs_show_options
,
1142 .evict_inode
= f2fs_evict_inode
,
1143 .put_super
= f2fs_put_super
,
1144 .sync_fs
= f2fs_sync_fs
,
1145 .freeze_fs
= f2fs_freeze
,
1146 .unfreeze_fs
= f2fs_unfreeze
,
1147 .statfs
= f2fs_statfs
,
1148 .remount_fs
= f2fs_remount
,
1151 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1152 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
1154 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1155 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1159 static int f2fs_key_prefix(struct inode
*inode
, u8
**key
)
1161 *key
= F2FS_I_SB(inode
)->key_prefix
;
1162 return F2FS_I_SB(inode
)->key_prefix_size
;
1165 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
1168 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1169 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1170 ctx
, len
, fs_data
, XATTR_CREATE
);
1173 static unsigned f2fs_max_namelen(struct inode
*inode
)
1175 return S_ISLNK(inode
->i_mode
) ?
1176 inode
->i_sb
->s_blocksize
: F2FS_NAME_LEN
;
1179 static struct fscrypt_operations f2fs_cryptops
= {
1180 .get_context
= f2fs_get_context
,
1181 .key_prefix
= f2fs_key_prefix
,
1182 .set_context
= f2fs_set_context
,
1183 .is_encrypted
= f2fs_encrypted_inode
,
1184 .empty_dir
= f2fs_empty_dir
,
1185 .max_namelen
= f2fs_max_namelen
,
1188 static struct fscrypt_operations f2fs_cryptops
= {
1189 .is_encrypted
= f2fs_encrypted_inode
,
1193 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
1194 u64 ino
, u32 generation
)
1196 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1197 struct inode
*inode
;
1199 if (check_nid_range(sbi
, ino
))
1200 return ERR_PTR(-ESTALE
);
1203 * f2fs_iget isn't quite right if the inode is currently unallocated!
1204 * However f2fs_iget currently does appropriate checks to handle stale
1205 * inodes so everything is OK.
1207 inode
= f2fs_iget(sb
, ino
);
1209 return ERR_CAST(inode
);
1210 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
1211 /* we didn't find the right inode.. */
1213 return ERR_PTR(-ESTALE
);
1218 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
1219 int fh_len
, int fh_type
)
1221 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
1222 f2fs_nfs_get_inode
);
1225 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
1226 int fh_len
, int fh_type
)
1228 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
1229 f2fs_nfs_get_inode
);
1232 static const struct export_operations f2fs_export_ops
= {
1233 .fh_to_dentry
= f2fs_fh_to_dentry
,
1234 .fh_to_parent
= f2fs_fh_to_parent
,
1235 .get_parent
= f2fs_get_parent
,
1238 static loff_t
max_file_blocks(void)
1240 loff_t result
= (DEF_ADDRS_PER_INODE
- F2FS_INLINE_XATTR_ADDRS
);
1241 loff_t leaf_count
= ADDRS_PER_BLOCK
;
1243 /* two direct node blocks */
1244 result
+= (leaf_count
* 2);
1246 /* two indirect node blocks */
1247 leaf_count
*= NIDS_PER_BLOCK
;
1248 result
+= (leaf_count
* 2);
1250 /* one double indirect node block */
1251 leaf_count
*= NIDS_PER_BLOCK
;
1252 result
+= leaf_count
;
1257 static int __f2fs_commit_super(struct buffer_head
*bh
,
1258 struct f2fs_super_block
*super
)
1262 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
1263 set_buffer_uptodate(bh
);
1264 set_buffer_dirty(bh
);
1267 /* it's rare case, we can do fua all the time */
1268 return __sync_dirty_buffer(bh
, REQ_PREFLUSH
| REQ_FUA
);
1271 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
1272 struct buffer_head
*bh
)
1274 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
1275 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1276 struct super_block
*sb
= sbi
->sb
;
1277 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
1278 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
1279 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
1280 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
1281 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
1282 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
1283 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1284 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
1285 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
1286 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
1287 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
1288 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
1289 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1290 u64 main_end_blkaddr
= main_blkaddr
+
1291 (segment_count_main
<< log_blocks_per_seg
);
1292 u64 seg_end_blkaddr
= segment0_blkaddr
+
1293 (segment_count
<< log_blocks_per_seg
);
1295 if (segment0_blkaddr
!= cp_blkaddr
) {
1296 f2fs_msg(sb
, KERN_INFO
,
1297 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1298 segment0_blkaddr
, cp_blkaddr
);
1302 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
1304 f2fs_msg(sb
, KERN_INFO
,
1305 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1306 cp_blkaddr
, sit_blkaddr
,
1307 segment_count_ckpt
<< log_blocks_per_seg
);
1311 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
1313 f2fs_msg(sb
, KERN_INFO
,
1314 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1315 sit_blkaddr
, nat_blkaddr
,
1316 segment_count_sit
<< log_blocks_per_seg
);
1320 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
1322 f2fs_msg(sb
, KERN_INFO
,
1323 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1324 nat_blkaddr
, ssa_blkaddr
,
1325 segment_count_nat
<< log_blocks_per_seg
);
1329 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
1331 f2fs_msg(sb
, KERN_INFO
,
1332 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1333 ssa_blkaddr
, main_blkaddr
,
1334 segment_count_ssa
<< log_blocks_per_seg
);
1338 if (main_end_blkaddr
> seg_end_blkaddr
) {
1339 f2fs_msg(sb
, KERN_INFO
,
1340 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1343 (segment_count
<< log_blocks_per_seg
),
1344 segment_count_main
<< log_blocks_per_seg
);
1346 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
1350 /* fix in-memory information all the time */
1351 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
1352 segment0_blkaddr
) >> log_blocks_per_seg
);
1354 if (f2fs_readonly(sb
) || bdev_read_only(sb
->s_bdev
)) {
1355 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1358 err
= __f2fs_commit_super(bh
, NULL
);
1359 res
= err
? "failed" : "done";
1361 f2fs_msg(sb
, KERN_INFO
,
1362 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1365 (segment_count
<< log_blocks_per_seg
),
1366 segment_count_main
<< log_blocks_per_seg
);
1373 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
1374 struct buffer_head
*bh
)
1376 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
1377 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1378 struct super_block
*sb
= sbi
->sb
;
1379 unsigned int blocksize
;
1381 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
1382 f2fs_msg(sb
, KERN_INFO
,
1383 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1384 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
1388 /* Currently, support only 4KB page cache size */
1389 if (F2FS_BLKSIZE
!= PAGE_SIZE
) {
1390 f2fs_msg(sb
, KERN_INFO
,
1391 "Invalid page_cache_size (%lu), supports only 4KB\n",
1396 /* Currently, support only 4KB block size */
1397 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
1398 if (blocksize
!= F2FS_BLKSIZE
) {
1399 f2fs_msg(sb
, KERN_INFO
,
1400 "Invalid blocksize (%u), supports only 4KB\n",
1405 /* check log blocks per segment */
1406 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
1407 f2fs_msg(sb
, KERN_INFO
,
1408 "Invalid log blocks per segment (%u)\n",
1409 le32_to_cpu(raw_super
->log_blocks_per_seg
));
1413 /* Currently, support 512/1024/2048/4096 bytes sector size */
1414 if (le32_to_cpu(raw_super
->log_sectorsize
) >
1415 F2FS_MAX_LOG_SECTOR_SIZE
||
1416 le32_to_cpu(raw_super
->log_sectorsize
) <
1417 F2FS_MIN_LOG_SECTOR_SIZE
) {
1418 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize (%u)",
1419 le32_to_cpu(raw_super
->log_sectorsize
));
1422 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
1423 le32_to_cpu(raw_super
->log_sectorsize
) !=
1424 F2FS_MAX_LOG_SECTOR_SIZE
) {
1425 f2fs_msg(sb
, KERN_INFO
,
1426 "Invalid log sectors per block(%u) log sectorsize(%u)",
1427 le32_to_cpu(raw_super
->log_sectors_per_block
),
1428 le32_to_cpu(raw_super
->log_sectorsize
));
1432 /* check reserved ino info */
1433 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
1434 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
1435 le32_to_cpu(raw_super
->root_ino
) != 3) {
1436 f2fs_msg(sb
, KERN_INFO
,
1437 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1438 le32_to_cpu(raw_super
->node_ino
),
1439 le32_to_cpu(raw_super
->meta_ino
),
1440 le32_to_cpu(raw_super
->root_ino
));
1444 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1445 if (sanity_check_area_boundary(sbi
, bh
))
1451 int sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
1453 unsigned int total
, fsmeta
;
1454 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1455 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1456 unsigned int ovp_segments
, reserved_segments
;
1458 total
= le32_to_cpu(raw_super
->segment_count
);
1459 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1460 fsmeta
+= le32_to_cpu(raw_super
->segment_count_sit
);
1461 fsmeta
+= le32_to_cpu(raw_super
->segment_count_nat
);
1462 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
1463 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
1465 if (unlikely(fsmeta
>= total
))
1468 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
1469 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
1471 if (unlikely(fsmeta
< F2FS_MIN_SEGMENTS
||
1472 ovp_segments
== 0 || reserved_segments
== 0)) {
1473 f2fs_msg(sbi
->sb
, KERN_ERR
,
1474 "Wrong layout: check mkfs.f2fs version");
1478 if (unlikely(f2fs_cp_error(sbi
))) {
1479 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
1485 static void init_sb_info(struct f2fs_sb_info
*sbi
)
1487 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
1490 sbi
->log_sectors_per_block
=
1491 le32_to_cpu(raw_super
->log_sectors_per_block
);
1492 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
1493 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
1494 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1495 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
1496 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
1497 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
1498 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
1499 sbi
->total_node_count
=
1500 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
1501 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
1502 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
1503 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
1504 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
1505 sbi
->cur_victim_sec
= NULL_SECNO
;
1506 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
1508 sbi
->dir_level
= DEF_DIR_LEVEL
;
1509 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
1510 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
1511 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
1513 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
1514 atomic_set(&sbi
->nr_pages
[i
], 0);
1516 INIT_LIST_HEAD(&sbi
->s_list
);
1517 mutex_init(&sbi
->umount_mutex
);
1518 mutex_init(&sbi
->wio_mutex
[NODE
]);
1519 mutex_init(&sbi
->wio_mutex
[DATA
]);
1520 spin_lock_init(&sbi
->cp_lock
);
1522 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1523 memcpy(sbi
->key_prefix
, F2FS_KEY_DESC_PREFIX
,
1524 F2FS_KEY_DESC_PREFIX_SIZE
);
1525 sbi
->key_prefix_size
= F2FS_KEY_DESC_PREFIX_SIZE
;
1529 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
1533 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
1537 return percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
1541 #ifdef CONFIG_BLK_DEV_ZONED
1542 static int init_blkz_info(struct f2fs_sb_info
*sbi
, int devi
)
1544 struct block_device
*bdev
= FDEV(devi
).bdev
;
1545 sector_t nr_sectors
= bdev
->bd_part
->nr_sects
;
1546 sector_t sector
= 0;
1547 struct blk_zone
*zones
;
1548 unsigned int i
, nr_zones
;
1552 if (!f2fs_sb_mounted_blkzoned(sbi
->sb
))
1555 if (sbi
->blocks_per_blkz
&& sbi
->blocks_per_blkz
!=
1556 SECTOR_TO_BLOCK(bdev_zone_size(bdev
)))
1558 sbi
->blocks_per_blkz
= SECTOR_TO_BLOCK(bdev_zone_size(bdev
));
1559 if (sbi
->log_blocks_per_blkz
&& sbi
->log_blocks_per_blkz
!=
1560 __ilog2_u32(sbi
->blocks_per_blkz
))
1562 sbi
->log_blocks_per_blkz
= __ilog2_u32(sbi
->blocks_per_blkz
);
1563 FDEV(devi
).nr_blkz
= SECTOR_TO_BLOCK(nr_sectors
) >>
1564 sbi
->log_blocks_per_blkz
;
1565 if (nr_sectors
& (bdev_zone_size(bdev
) - 1))
1566 FDEV(devi
).nr_blkz
++;
1568 FDEV(devi
).blkz_type
= kmalloc(FDEV(devi
).nr_blkz
, GFP_KERNEL
);
1569 if (!FDEV(devi
).blkz_type
)
1572 #define F2FS_REPORT_NR_ZONES 4096
1574 zones
= kcalloc(F2FS_REPORT_NR_ZONES
, sizeof(struct blk_zone
),
1579 /* Get block zones type */
1580 while (zones
&& sector
< nr_sectors
) {
1582 nr_zones
= F2FS_REPORT_NR_ZONES
;
1583 err
= blkdev_report_zones(bdev
, sector
,
1593 for (i
= 0; i
< nr_zones
; i
++) {
1594 FDEV(devi
).blkz_type
[n
] = zones
[i
].type
;
1595 sector
+= zones
[i
].len
;
1607 * Read f2fs raw super block.
1608 * Because we have two copies of super block, so read both of them
1609 * to get the first valid one. If any one of them is broken, we pass
1610 * them recovery flag back to the caller.
1612 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
1613 struct f2fs_super_block
**raw_super
,
1614 int *valid_super_block
, int *recovery
)
1616 struct super_block
*sb
= sbi
->sb
;
1618 struct buffer_head
*bh
;
1619 struct f2fs_super_block
*super
;
1622 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
1626 for (block
= 0; block
< 2; block
++) {
1627 bh
= sb_bread(sb
, block
);
1629 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
1635 /* sanity checking of raw super */
1636 if (sanity_check_raw_super(sbi
, bh
)) {
1637 f2fs_msg(sb
, KERN_ERR
,
1638 "Can't find valid F2FS filesystem in %dth superblock",
1646 memcpy(super
, bh
->b_data
+ F2FS_SUPER_OFFSET
,
1648 *valid_super_block
= block
;
1654 /* Fail to read any one of the superblocks*/
1658 /* No valid superblock */
1667 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
1669 struct buffer_head
*bh
;
1672 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
1673 bdev_read_only(sbi
->sb
->s_bdev
)) {
1674 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1678 /* write back-up superblock first */
1679 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
? 0: 1);
1682 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
1685 /* if we are in recovery path, skip writing valid superblock */
1689 /* write current valid superblock */
1690 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
);
1693 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
1698 static int f2fs_scan_devices(struct f2fs_sb_info
*sbi
)
1700 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1703 for (i
= 0; i
< MAX_DEVICES
; i
++) {
1704 if (!RDEV(i
).path
[0])
1708 sbi
->devs
= kzalloc(sizeof(struct f2fs_dev_info
) *
1709 MAX_DEVICES
, GFP_KERNEL
);
1714 memcpy(FDEV(i
).path
, RDEV(i
).path
, MAX_PATH_LEN
);
1715 FDEV(i
).total_segments
= le32_to_cpu(RDEV(i
).total_segments
);
1717 FDEV(i
).start_blk
= 0;
1718 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
1719 (FDEV(i
).total_segments
<<
1720 sbi
->log_blocks_per_seg
) - 1 +
1721 le32_to_cpu(raw_super
->segment0_blkaddr
);
1723 FDEV(i
).start_blk
= FDEV(i
- 1).end_blk
+ 1;
1724 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
1725 (FDEV(i
).total_segments
<<
1726 sbi
->log_blocks_per_seg
) - 1;
1729 FDEV(i
).bdev
= blkdev_get_by_path(FDEV(i
).path
,
1730 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
1731 if (IS_ERR(FDEV(i
).bdev
))
1732 return PTR_ERR(FDEV(i
).bdev
);
1734 /* to release errored devices */
1735 sbi
->s_ndevs
= i
+ 1;
1737 #ifdef CONFIG_BLK_DEV_ZONED
1738 if (bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HM
&&
1739 !f2fs_sb_mounted_blkzoned(sbi
->sb
)) {
1740 f2fs_msg(sbi
->sb
, KERN_ERR
,
1741 "Zoned block device feature not enabled\n");
1744 if (bdev_zoned_model(FDEV(i
).bdev
) != BLK_ZONED_NONE
) {
1745 if (init_blkz_info(sbi
, i
)) {
1746 f2fs_msg(sbi
->sb
, KERN_ERR
,
1747 "Failed to initialize F2FS blkzone information");
1750 f2fs_msg(sbi
->sb
, KERN_INFO
,
1751 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
1753 FDEV(i
).total_segments
,
1754 FDEV(i
).start_blk
, FDEV(i
).end_blk
,
1755 bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HA
?
1756 "Host-aware" : "Host-managed");
1760 f2fs_msg(sbi
->sb
, KERN_INFO
,
1761 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
1763 FDEV(i
).total_segments
,
1764 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
1769 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
1771 struct f2fs_sb_info
*sbi
;
1772 struct f2fs_super_block
*raw_super
;
1775 bool retry
= true, need_fsck
= false;
1776 char *options
= NULL
;
1777 int recovery
, i
, valid_super_block
;
1778 struct curseg_info
*seg_i
;
1783 valid_super_block
= -1;
1786 /* allocate memory for f2fs-specific super block info */
1787 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
1793 /* Load the checksum driver */
1794 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
1795 if (IS_ERR(sbi
->s_chksum_driver
)) {
1796 f2fs_msg(sb
, KERN_ERR
, "Cannot load crc32 driver.");
1797 err
= PTR_ERR(sbi
->s_chksum_driver
);
1798 sbi
->s_chksum_driver
= NULL
;
1802 /* set a block size */
1803 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
1804 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
1808 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
1813 sb
->s_fs_info
= sbi
;
1814 sbi
->raw_super
= raw_super
;
1817 * The BLKZONED feature indicates that the drive was formatted with
1818 * zone alignment optimization. This is optional for host-aware
1819 * devices, but mandatory for host-managed zoned block devices.
1821 #ifndef CONFIG_BLK_DEV_ZONED
1822 if (f2fs_sb_mounted_blkzoned(sb
)) {
1823 f2fs_msg(sb
, KERN_ERR
,
1824 "Zoned block device support is not enabled\n");
1828 default_options(sbi
);
1829 /* parse mount options */
1830 options
= kstrdup((const char *)data
, GFP_KERNEL
);
1831 if (data
&& !options
) {
1836 err
= parse_options(sb
, options
);
1840 sbi
->max_file_blocks
= max_file_blocks();
1841 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
1842 le32_to_cpu(raw_super
->log_blocksize
);
1843 sb
->s_max_links
= F2FS_LINK_MAX
;
1844 get_random_bytes(&sbi
->s_next_generation
, sizeof(u32
));
1846 sb
->s_op
= &f2fs_sops
;
1847 sb
->s_cop
= &f2fs_cryptops
;
1848 sb
->s_xattr
= f2fs_xattr_handlers
;
1849 sb
->s_export_op
= &f2fs_export_ops
;
1850 sb
->s_magic
= F2FS_SUPER_MAGIC
;
1851 sb
->s_time_gran
= 1;
1852 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
1853 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
1854 memcpy(sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
1856 /* init f2fs-specific super block info */
1857 sbi
->valid_super_block
= valid_super_block
;
1858 mutex_init(&sbi
->gc_mutex
);
1859 mutex_init(&sbi
->cp_mutex
);
1860 init_rwsem(&sbi
->node_write
);
1862 /* disallow all the data/node/meta page writes */
1863 set_sbi_flag(sbi
, SBI_POR_DOING
);
1864 spin_lock_init(&sbi
->stat_lock
);
1866 init_rwsem(&sbi
->read_io
.io_rwsem
);
1867 sbi
->read_io
.sbi
= sbi
;
1868 sbi
->read_io
.bio
= NULL
;
1869 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
1870 init_rwsem(&sbi
->write_io
[i
].io_rwsem
);
1871 sbi
->write_io
[i
].sbi
= sbi
;
1872 sbi
->write_io
[i
].bio
= NULL
;
1875 init_rwsem(&sbi
->cp_rwsem
);
1876 init_waitqueue_head(&sbi
->cp_wait
);
1879 err
= init_percpu_info(sbi
);
1883 /* get an inode for meta space */
1884 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
1885 if (IS_ERR(sbi
->meta_inode
)) {
1886 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
1887 err
= PTR_ERR(sbi
->meta_inode
);
1891 err
= get_valid_checkpoint(sbi
);
1893 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
1894 goto free_meta_inode
;
1897 /* Initialize device list */
1898 err
= f2fs_scan_devices(sbi
);
1900 f2fs_msg(sb
, KERN_ERR
, "Failed to find devices");
1904 sbi
->total_valid_node_count
=
1905 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
1906 percpu_counter_set(&sbi
->total_valid_inode_count
,
1907 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
1908 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
1909 sbi
->total_valid_block_count
=
1910 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
1911 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
1913 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
1914 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
1915 spin_lock_init(&sbi
->inode_lock
[i
]);
1918 init_extent_cache_info(sbi
);
1920 init_ino_entry_info(sbi
);
1922 /* setup f2fs internal modules */
1923 err
= build_segment_manager(sbi
);
1925 f2fs_msg(sb
, KERN_ERR
,
1926 "Failed to initialize F2FS segment manager");
1929 err
= build_node_manager(sbi
);
1931 f2fs_msg(sb
, KERN_ERR
,
1932 "Failed to initialize F2FS node manager");
1936 /* For write statistics */
1937 if (sb
->s_bdev
->bd_part
)
1938 sbi
->sectors_written_start
=
1939 (u64
)part_stat_read(sb
->s_bdev
->bd_part
, sectors
[1]);
1941 /* Read accumulated write IO statistics if exists */
1942 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
1943 if (__exist_node_summaries(sbi
))
1944 sbi
->kbytes_written
=
1945 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
1947 build_gc_manager(sbi
);
1949 /* get an inode for node space */
1950 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
1951 if (IS_ERR(sbi
->node_inode
)) {
1952 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
1953 err
= PTR_ERR(sbi
->node_inode
);
1957 f2fs_join_shrinker(sbi
);
1959 /* if there are nt orphan nodes free them */
1960 err
= recover_orphan_inodes(sbi
);
1962 goto free_node_inode
;
1964 /* read root inode and dentry */
1965 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
1967 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
1968 err
= PTR_ERR(root
);
1969 goto free_node_inode
;
1971 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
1974 goto free_node_inode
;
1977 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
1980 goto free_root_inode
;
1983 err
= f2fs_build_stats(sbi
);
1985 goto free_root_inode
;
1988 sbi
->s_proc
= proc_mkdir(sb
->s_id
, f2fs_proc_root
);
1991 proc_create_data("segment_info", S_IRUGO
, sbi
->s_proc
,
1992 &f2fs_seq_segment_info_fops
, sb
);
1993 proc_create_data("segment_bits", S_IRUGO
, sbi
->s_proc
,
1994 &f2fs_seq_segment_bits_fops
, sb
);
1997 sbi
->s_kobj
.kset
= f2fs_kset
;
1998 init_completion(&sbi
->s_kobj_unregister
);
1999 err
= kobject_init_and_add(&sbi
->s_kobj
, &f2fs_ktype
, NULL
,
2004 /* recover fsynced data */
2005 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
2007 * mount should be failed, when device has readonly mode, and
2008 * previous checkpoint was not done by clean system shutdown.
2010 if (bdev_read_only(sb
->s_bdev
) &&
2011 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
2017 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2022 err
= recover_fsync_data(sbi
, false);
2025 f2fs_msg(sb
, KERN_ERR
,
2026 "Cannot recover all fsync data errno=%d", err
);
2030 err
= recover_fsync_data(sbi
, true);
2032 if (!f2fs_readonly(sb
) && err
> 0) {
2034 f2fs_msg(sb
, KERN_ERR
,
2035 "Need to recover fsync data");
2040 /* recover_fsync_data() cleared this already */
2041 clear_sbi_flag(sbi
, SBI_POR_DOING
);
2044 * If filesystem is not mounted as read-only then
2045 * do start the gc_thread.
2047 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
2048 /* After POR, we can run background GC thread.*/
2049 err
= start_gc_thread(sbi
);
2055 /* recover broken superblock */
2057 err
= f2fs_commit_super(sbi
, true);
2058 f2fs_msg(sb
, KERN_INFO
,
2059 "Try to recover %dth superblock, ret: %d",
2060 sbi
->valid_super_block
? 1 : 2, err
);
2063 f2fs_update_time(sbi
, CP_TIME
);
2064 f2fs_update_time(sbi
, REQ_TIME
);
2068 f2fs_sync_inode_meta(sbi
);
2069 kobject_del(&sbi
->s_kobj
);
2070 kobject_put(&sbi
->s_kobj
);
2071 wait_for_completion(&sbi
->s_kobj_unregister
);
2074 remove_proc_entry("segment_info", sbi
->s_proc
);
2075 remove_proc_entry("segment_bits", sbi
->s_proc
);
2076 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
2078 f2fs_destroy_stats(sbi
);
2083 truncate_inode_pages_final(NODE_MAPPING(sbi
));
2084 mutex_lock(&sbi
->umount_mutex
);
2085 release_ino_entry(sbi
, true);
2086 f2fs_leave_shrinker(sbi
);
2088 * Some dirty meta pages can be produced by recover_orphan_inodes()
2089 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2090 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2091 * falls into an infinite loop in sync_meta_pages().
2093 truncate_inode_pages_final(META_MAPPING(sbi
));
2094 iput(sbi
->node_inode
);
2095 mutex_unlock(&sbi
->umount_mutex
);
2097 destroy_node_manager(sbi
);
2099 destroy_segment_manager(sbi
);
2101 destroy_device_list(sbi
);
2104 make_bad_inode(sbi
->meta_inode
);
2105 iput(sbi
->meta_inode
);
2107 destroy_percpu_info(sbi
);
2112 if (sbi
->s_chksum_driver
)
2113 crypto_free_shash(sbi
->s_chksum_driver
);
2116 /* give only one another chance */
2119 shrink_dcache_sb(sb
);
2125 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
2126 const char *dev_name
, void *data
)
2128 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
2131 static void kill_f2fs_super(struct super_block
*sb
)
2134 set_sbi_flag(F2FS_SB(sb
), SBI_IS_CLOSE
);
2135 kill_block_super(sb
);
2138 static struct file_system_type f2fs_fs_type
= {
2139 .owner
= THIS_MODULE
,
2141 .mount
= f2fs_mount
,
2142 .kill_sb
= kill_f2fs_super
,
2143 .fs_flags
= FS_REQUIRES_DEV
,
2145 MODULE_ALIAS_FS("f2fs");
2147 static int __init
init_inodecache(void)
2149 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
2150 sizeof(struct f2fs_inode_info
), 0,
2151 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
2152 if (!f2fs_inode_cachep
)
2157 static void destroy_inodecache(void)
2160 * Make sure all delayed rcu free inodes are flushed before we
2164 kmem_cache_destroy(f2fs_inode_cachep
);
2167 static int __init
init_f2fs_fs(void)
2171 f2fs_build_trace_ios();
2173 err
= init_inodecache();
2176 err
= create_node_manager_caches();
2178 goto free_inodecache
;
2179 err
= create_segment_manager_caches();
2181 goto free_node_manager_caches
;
2182 err
= create_checkpoint_caches();
2184 goto free_segment_manager_caches
;
2185 err
= create_extent_cache();
2187 goto free_checkpoint_caches
;
2188 f2fs_kset
= kset_create_and_add("f2fs", NULL
, fs_kobj
);
2191 goto free_extent_cache
;
2193 err
= register_shrinker(&f2fs_shrinker_info
);
2197 err
= register_filesystem(&f2fs_fs_type
);
2200 err
= f2fs_create_root_stats();
2202 goto free_filesystem
;
2203 f2fs_proc_root
= proc_mkdir("fs/f2fs", NULL
);
2207 unregister_filesystem(&f2fs_fs_type
);
2209 unregister_shrinker(&f2fs_shrinker_info
);
2211 kset_unregister(f2fs_kset
);
2213 destroy_extent_cache();
2214 free_checkpoint_caches
:
2215 destroy_checkpoint_caches();
2216 free_segment_manager_caches
:
2217 destroy_segment_manager_caches();
2218 free_node_manager_caches
:
2219 destroy_node_manager_caches();
2221 destroy_inodecache();
2226 static void __exit
exit_f2fs_fs(void)
2228 remove_proc_entry("fs/f2fs", NULL
);
2229 f2fs_destroy_root_stats();
2230 unregister_filesystem(&f2fs_fs_type
);
2231 unregister_shrinker(&f2fs_shrinker_info
);
2232 kset_unregister(f2fs_kset
);
2233 destroy_extent_cache();
2234 destroy_checkpoint_caches();
2235 destroy_segment_manager_caches();
2236 destroy_node_manager_caches();
2237 destroy_inodecache();
2238 f2fs_destroy_trace_ios();
2241 module_init(init_f2fs_fs
)
2242 module_exit(exit_f2fs_fs
)
2244 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
2245 MODULE_DESCRIPTION("Flash Friendly File System");
2246 MODULE_LICENSE("GPL");