4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/f2fs_fs.h>
27 #include <linux/sysfs.h>
28 #include <linux/quota.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/f2fs.h>
40 static struct kmem_cache
*f2fs_inode_cachep
;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
44 char *fault_name
[FAULT_MAX
] = {
45 [FAULT_KMALLOC
] = "kmalloc",
46 [FAULT_KVMALLOC
] = "kvmalloc",
47 [FAULT_PAGE_ALLOC
] = "page alloc",
48 [FAULT_PAGE_GET
] = "page get",
49 [FAULT_ALLOC_BIO
] = "alloc bio",
50 [FAULT_ALLOC_NID
] = "alloc nid",
51 [FAULT_ORPHAN
] = "orphan",
52 [FAULT_BLOCK
] = "no more block",
53 [FAULT_DIR_DEPTH
] = "too big dir depth",
54 [FAULT_EVICT_INODE
] = "evict_inode fail",
55 [FAULT_TRUNCATE
] = "truncate fail",
56 [FAULT_IO
] = "IO error",
57 [FAULT_CHECKPOINT
] = "checkpoint error",
60 static void f2fs_build_fault_attr(struct f2fs_sb_info
*sbi
,
63 struct f2fs_fault_info
*ffi
= &sbi
->fault_info
;
66 atomic_set(&ffi
->inject_ops
, 0);
67 ffi
->inject_rate
= rate
;
68 ffi
->inject_type
= (1 << FAULT_MAX
) - 1;
70 memset(ffi
, 0, sizeof(struct f2fs_fault_info
));
75 /* f2fs-wide shrinker description */
76 static struct shrinker f2fs_shrinker_info
= {
77 .scan_objects
= f2fs_shrink_scan
,
78 .count_objects
= f2fs_shrink_count
,
79 .seeks
= DEFAULT_SEEKS
,
84 Opt_disable_roll_forward
,
95 Opt_disable_ext_identify
,
98 Opt_inline_xattr_size
,
135 static match_table_t f2fs_tokens
= {
136 {Opt_gc_background
, "background_gc=%s"},
137 {Opt_disable_roll_forward
, "disable_roll_forward"},
138 {Opt_norecovery
, "norecovery"},
139 {Opt_discard
, "discard"},
140 {Opt_nodiscard
, "nodiscard"},
141 {Opt_noheap
, "no_heap"},
143 {Opt_user_xattr
, "user_xattr"},
144 {Opt_nouser_xattr
, "nouser_xattr"},
146 {Opt_noacl
, "noacl"},
147 {Opt_active_logs
, "active_logs=%u"},
148 {Opt_disable_ext_identify
, "disable_ext_identify"},
149 {Opt_inline_xattr
, "inline_xattr"},
150 {Opt_noinline_xattr
, "noinline_xattr"},
151 {Opt_inline_xattr_size
, "inline_xattr_size=%u"},
152 {Opt_inline_data
, "inline_data"},
153 {Opt_inline_dentry
, "inline_dentry"},
154 {Opt_noinline_dentry
, "noinline_dentry"},
155 {Opt_flush_merge
, "flush_merge"},
156 {Opt_noflush_merge
, "noflush_merge"},
157 {Opt_nobarrier
, "nobarrier"},
158 {Opt_fastboot
, "fastboot"},
159 {Opt_extent_cache
, "extent_cache"},
160 {Opt_noextent_cache
, "noextent_cache"},
161 {Opt_noinline_data
, "noinline_data"},
162 {Opt_data_flush
, "data_flush"},
163 {Opt_reserve_root
, "reserve_root=%u"},
164 {Opt_resgid
, "resgid=%u"},
165 {Opt_resuid
, "resuid=%u"},
166 {Opt_mode
, "mode=%s"},
167 {Opt_io_size_bits
, "io_bits=%u"},
168 {Opt_fault_injection
, "fault_injection=%u"},
169 {Opt_lazytime
, "lazytime"},
170 {Opt_nolazytime
, "nolazytime"},
171 {Opt_quota
, "quota"},
172 {Opt_noquota
, "noquota"},
173 {Opt_usrquota
, "usrquota"},
174 {Opt_grpquota
, "grpquota"},
175 {Opt_prjquota
, "prjquota"},
176 {Opt_usrjquota
, "usrjquota=%s"},
177 {Opt_grpjquota
, "grpjquota=%s"},
178 {Opt_prjjquota
, "prjjquota=%s"},
179 {Opt_offusrjquota
, "usrjquota="},
180 {Opt_offgrpjquota
, "grpjquota="},
181 {Opt_offprjjquota
, "prjjquota="},
182 {Opt_jqfmt_vfsold
, "jqfmt=vfsold"},
183 {Opt_jqfmt_vfsv0
, "jqfmt=vfsv0"},
184 {Opt_jqfmt_vfsv1
, "jqfmt=vfsv1"},
188 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
190 struct va_format vaf
;
196 printk_ratelimited("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
200 static inline void limit_reserve_root(struct f2fs_sb_info
*sbi
)
202 block_t limit
= (sbi
->user_block_count
<< 1) / 1000;
205 if (test_opt(sbi
, RESERVE_ROOT
) && sbi
->root_reserved_blocks
> limit
) {
206 sbi
->root_reserved_blocks
= limit
;
207 f2fs_msg(sbi
->sb
, KERN_INFO
,
208 "Reduce reserved blocks for root = %u",
209 sbi
->root_reserved_blocks
);
211 if (!test_opt(sbi
, RESERVE_ROOT
) &&
212 (!uid_eq(sbi
->s_resuid
,
213 make_kuid(&init_user_ns
, F2FS_DEF_RESUID
)) ||
214 !gid_eq(sbi
->s_resgid
,
215 make_kgid(&init_user_ns
, F2FS_DEF_RESGID
))))
216 f2fs_msg(sbi
->sb
, KERN_INFO
,
217 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
218 from_kuid_munged(&init_user_ns
, sbi
->s_resuid
),
219 from_kgid_munged(&init_user_ns
, sbi
->s_resgid
));
222 static void init_once(void *foo
)
224 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
226 inode_init_once(&fi
->vfs_inode
);
230 static const char * const quotatypes
[] = INITQFNAMES
;
231 #define QTYPE2NAME(t) (quotatypes[t])
232 static int f2fs_set_qf_name(struct super_block
*sb
, int qtype
,
235 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
239 if (sb_any_quota_loaded(sb
) && !sbi
->s_qf_names
[qtype
]) {
240 f2fs_msg(sb
, KERN_ERR
,
241 "Cannot change journaled "
242 "quota options when quota turned on");
245 if (f2fs_sb_has_quota_ino(sb
)) {
246 f2fs_msg(sb
, KERN_INFO
,
247 "QUOTA feature is enabled, so ignore qf_name");
251 qname
= match_strdup(args
);
253 f2fs_msg(sb
, KERN_ERR
,
254 "Not enough memory for storing quotafile name");
257 if (sbi
->s_qf_names
[qtype
]) {
258 if (strcmp(sbi
->s_qf_names
[qtype
], qname
) == 0)
261 f2fs_msg(sb
, KERN_ERR
,
262 "%s quota file already specified",
266 if (strchr(qname
, '/')) {
267 f2fs_msg(sb
, KERN_ERR
,
268 "quotafile must be on filesystem root");
271 sbi
->s_qf_names
[qtype
] = qname
;
279 static int f2fs_clear_qf_name(struct super_block
*sb
, int qtype
)
281 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
283 if (sb_any_quota_loaded(sb
) && sbi
->s_qf_names
[qtype
]) {
284 f2fs_msg(sb
, KERN_ERR
, "Cannot change journaled quota options"
285 " when quota turned on");
288 kfree(sbi
->s_qf_names
[qtype
]);
289 sbi
->s_qf_names
[qtype
] = NULL
;
293 static int f2fs_check_quota_options(struct f2fs_sb_info
*sbi
)
296 * We do the test below only for project quotas. 'usrquota' and
297 * 'grpquota' mount options are allowed even without quota feature
298 * to support legacy quotas in quota files.
300 if (test_opt(sbi
, PRJQUOTA
) && !f2fs_sb_has_project_quota(sbi
->sb
)) {
301 f2fs_msg(sbi
->sb
, KERN_ERR
, "Project quota feature not enabled. "
302 "Cannot enable project quota enforcement.");
305 if (sbi
->s_qf_names
[USRQUOTA
] || sbi
->s_qf_names
[GRPQUOTA
] ||
306 sbi
->s_qf_names
[PRJQUOTA
]) {
307 if (test_opt(sbi
, USRQUOTA
) && sbi
->s_qf_names
[USRQUOTA
])
308 clear_opt(sbi
, USRQUOTA
);
310 if (test_opt(sbi
, GRPQUOTA
) && sbi
->s_qf_names
[GRPQUOTA
])
311 clear_opt(sbi
, GRPQUOTA
);
313 if (test_opt(sbi
, PRJQUOTA
) && sbi
->s_qf_names
[PRJQUOTA
])
314 clear_opt(sbi
, PRJQUOTA
);
316 if (test_opt(sbi
, GRPQUOTA
) || test_opt(sbi
, USRQUOTA
) ||
317 test_opt(sbi
, PRJQUOTA
)) {
318 f2fs_msg(sbi
->sb
, KERN_ERR
, "old and new quota "
323 if (!sbi
->s_jquota_fmt
) {
324 f2fs_msg(sbi
->sb
, KERN_ERR
, "journaled quota format "
330 if (f2fs_sb_has_quota_ino(sbi
->sb
) && sbi
->s_jquota_fmt
) {
331 f2fs_msg(sbi
->sb
, KERN_INFO
,
332 "QUOTA feature is enabled, so ignore jquota_fmt");
333 sbi
->s_jquota_fmt
= 0;
335 if (f2fs_sb_has_quota_ino(sbi
->sb
) && sb_rdonly(sbi
->sb
)) {
336 f2fs_msg(sbi
->sb
, KERN_INFO
,
337 "Filesystem with quota feature cannot be mounted RDWR "
338 "without CONFIG_QUOTA");
345 static int parse_options(struct super_block
*sb
, char *options
)
347 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
348 struct request_queue
*q
;
349 substring_t args
[MAX_OPT_ARGS
];
361 while ((p
= strsep(&options
, ",")) != NULL
) {
366 * Initialize args struct so we know whether arg was
367 * found; some options take optional arguments.
369 args
[0].to
= args
[0].from
= NULL
;
370 token
= match_token(p
, f2fs_tokens
, args
);
373 case Opt_gc_background
:
374 name
= match_strdup(&args
[0]);
378 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
380 clear_opt(sbi
, FORCE_FG_GC
);
381 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
382 clear_opt(sbi
, BG_GC
);
383 clear_opt(sbi
, FORCE_FG_GC
);
384 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
386 set_opt(sbi
, FORCE_FG_GC
);
393 case Opt_disable_roll_forward
:
394 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
397 /* this option mounts f2fs with ro */
398 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
399 if (!f2fs_readonly(sb
))
403 q
= bdev_get_queue(sb
->s_bdev
);
404 if (blk_queue_discard(q
)) {
405 set_opt(sbi
, DISCARD
);
406 } else if (!f2fs_sb_mounted_blkzoned(sb
)) {
407 f2fs_msg(sb
, KERN_WARNING
,
408 "mounting with \"discard\" option, but "
409 "the device does not support discard");
413 if (f2fs_sb_mounted_blkzoned(sb
)) {
414 f2fs_msg(sb
, KERN_WARNING
,
415 "discard is required for zoned block devices");
418 clear_opt(sbi
, DISCARD
);
421 set_opt(sbi
, NOHEAP
);
424 clear_opt(sbi
, NOHEAP
);
426 #ifdef CONFIG_F2FS_FS_XATTR
428 set_opt(sbi
, XATTR_USER
);
430 case Opt_nouser_xattr
:
431 clear_opt(sbi
, XATTR_USER
);
433 case Opt_inline_xattr
:
434 set_opt(sbi
, INLINE_XATTR
);
436 case Opt_noinline_xattr
:
437 clear_opt(sbi
, INLINE_XATTR
);
439 case Opt_inline_xattr_size
:
440 if (args
->from
&& match_int(args
, &arg
))
442 set_opt(sbi
, INLINE_XATTR_SIZE
);
443 sbi
->inline_xattr_size
= arg
;
447 f2fs_msg(sb
, KERN_INFO
,
448 "user_xattr options not supported");
450 case Opt_nouser_xattr
:
451 f2fs_msg(sb
, KERN_INFO
,
452 "nouser_xattr options not supported");
454 case Opt_inline_xattr
:
455 f2fs_msg(sb
, KERN_INFO
,
456 "inline_xattr options not supported");
458 case Opt_noinline_xattr
:
459 f2fs_msg(sb
, KERN_INFO
,
460 "noinline_xattr options not supported");
463 #ifdef CONFIG_F2FS_FS_POSIX_ACL
465 set_opt(sbi
, POSIX_ACL
);
468 clear_opt(sbi
, POSIX_ACL
);
472 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
475 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
478 case Opt_active_logs
:
479 if (args
->from
&& match_int(args
, &arg
))
481 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
483 sbi
->active_logs
= arg
;
485 case Opt_disable_ext_identify
:
486 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
488 case Opt_inline_data
:
489 set_opt(sbi
, INLINE_DATA
);
491 case Opt_inline_dentry
:
492 set_opt(sbi
, INLINE_DENTRY
);
494 case Opt_noinline_dentry
:
495 clear_opt(sbi
, INLINE_DENTRY
);
497 case Opt_flush_merge
:
498 set_opt(sbi
, FLUSH_MERGE
);
500 case Opt_noflush_merge
:
501 clear_opt(sbi
, FLUSH_MERGE
);
504 set_opt(sbi
, NOBARRIER
);
507 set_opt(sbi
, FASTBOOT
);
509 case Opt_extent_cache
:
510 set_opt(sbi
, EXTENT_CACHE
);
512 case Opt_noextent_cache
:
513 clear_opt(sbi
, EXTENT_CACHE
);
515 case Opt_noinline_data
:
516 clear_opt(sbi
, INLINE_DATA
);
519 set_opt(sbi
, DATA_FLUSH
);
521 case Opt_reserve_root
:
522 if (args
->from
&& match_int(args
, &arg
))
524 if (test_opt(sbi
, RESERVE_ROOT
)) {
525 f2fs_msg(sb
, KERN_INFO
,
526 "Preserve previous reserve_root=%u",
527 sbi
->root_reserved_blocks
);
529 sbi
->root_reserved_blocks
= arg
;
530 set_opt(sbi
, RESERVE_ROOT
);
534 if (args
->from
&& match_int(args
, &arg
))
536 uid
= make_kuid(current_user_ns(), arg
);
537 if (!uid_valid(uid
)) {
538 f2fs_msg(sb
, KERN_ERR
,
539 "Invalid uid value %d", arg
);
545 if (args
->from
&& match_int(args
, &arg
))
547 gid
= make_kgid(current_user_ns(), arg
);
548 if (!gid_valid(gid
)) {
549 f2fs_msg(sb
, KERN_ERR
,
550 "Invalid gid value %d", arg
);
556 name
= match_strdup(&args
[0]);
560 if (strlen(name
) == 8 &&
561 !strncmp(name
, "adaptive", 8)) {
562 if (f2fs_sb_mounted_blkzoned(sb
)) {
563 f2fs_msg(sb
, KERN_WARNING
,
564 "adaptive mode is not allowed with "
565 "zoned block device feature");
569 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
570 } else if (strlen(name
) == 3 &&
571 !strncmp(name
, "lfs", 3)) {
572 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
579 case Opt_io_size_bits
:
580 if (args
->from
&& match_int(args
, &arg
))
582 if (arg
> __ilog2_u32(BIO_MAX_PAGES
)) {
583 f2fs_msg(sb
, KERN_WARNING
,
584 "Not support %d, larger than %d",
585 1 << arg
, BIO_MAX_PAGES
);
588 sbi
->write_io_size_bits
= arg
;
590 case Opt_fault_injection
:
591 if (args
->from
&& match_int(args
, &arg
))
593 #ifdef CONFIG_F2FS_FAULT_INJECTION
594 f2fs_build_fault_attr(sbi
, arg
);
595 set_opt(sbi
, FAULT_INJECTION
);
597 f2fs_msg(sb
, KERN_INFO
,
598 "FAULT_INJECTION was not selected");
602 sb
->s_flags
|= SB_LAZYTIME
;
605 sb
->s_flags
&= ~SB_LAZYTIME
;
610 set_opt(sbi
, USRQUOTA
);
613 set_opt(sbi
, GRPQUOTA
);
616 set_opt(sbi
, PRJQUOTA
);
619 ret
= f2fs_set_qf_name(sb
, USRQUOTA
, &args
[0]);
624 ret
= f2fs_set_qf_name(sb
, GRPQUOTA
, &args
[0]);
629 ret
= f2fs_set_qf_name(sb
, PRJQUOTA
, &args
[0]);
633 case Opt_offusrjquota
:
634 ret
= f2fs_clear_qf_name(sb
, USRQUOTA
);
638 case Opt_offgrpjquota
:
639 ret
= f2fs_clear_qf_name(sb
, GRPQUOTA
);
643 case Opt_offprjjquota
:
644 ret
= f2fs_clear_qf_name(sb
, PRJQUOTA
);
648 case Opt_jqfmt_vfsold
:
649 sbi
->s_jquota_fmt
= QFMT_VFS_OLD
;
651 case Opt_jqfmt_vfsv0
:
652 sbi
->s_jquota_fmt
= QFMT_VFS_V0
;
654 case Opt_jqfmt_vfsv1
:
655 sbi
->s_jquota_fmt
= QFMT_VFS_V1
;
658 clear_opt(sbi
, QUOTA
);
659 clear_opt(sbi
, USRQUOTA
);
660 clear_opt(sbi
, GRPQUOTA
);
661 clear_opt(sbi
, PRJQUOTA
);
671 case Opt_offusrjquota
:
672 case Opt_offgrpjquota
:
673 case Opt_offprjjquota
:
674 case Opt_jqfmt_vfsold
:
675 case Opt_jqfmt_vfsv0
:
676 case Opt_jqfmt_vfsv1
:
678 f2fs_msg(sb
, KERN_INFO
,
679 "quota operations not supported");
683 f2fs_msg(sb
, KERN_ERR
,
684 "Unrecognized mount option \"%s\" or missing value",
690 if (f2fs_check_quota_options(sbi
))
694 if (F2FS_IO_SIZE_BITS(sbi
) && !test_opt(sbi
, LFS
)) {
695 f2fs_msg(sb
, KERN_ERR
,
696 "Should set mode=lfs with %uKB-sized IO",
697 F2FS_IO_SIZE_KB(sbi
));
701 if (test_opt(sbi
, INLINE_XATTR_SIZE
)) {
702 if (!test_opt(sbi
, INLINE_XATTR
)) {
703 f2fs_msg(sb
, KERN_ERR
,
704 "inline_xattr_size option should be "
705 "set with inline_xattr option");
708 if (!sbi
->inline_xattr_size
||
709 sbi
->inline_xattr_size
>= DEF_ADDRS_PER_INODE
-
710 F2FS_TOTAL_EXTRA_ATTR_SIZE
-
711 DEF_INLINE_RESERVED_SIZE
-
712 DEF_MIN_INLINE_SIZE
) {
713 f2fs_msg(sb
, KERN_ERR
,
714 "inline xattr size is out of range");
721 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
723 struct f2fs_inode_info
*fi
;
725 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
729 init_once((void *) fi
);
731 /* Initialize f2fs-specific inode info */
732 atomic_set(&fi
->dirty_pages
, 0);
733 fi
->i_current_depth
= 1;
735 init_rwsem(&fi
->i_sem
);
736 INIT_LIST_HEAD(&fi
->dirty_list
);
737 INIT_LIST_HEAD(&fi
->gdirty_list
);
738 INIT_LIST_HEAD(&fi
->inmem_ilist
);
739 INIT_LIST_HEAD(&fi
->inmem_pages
);
740 mutex_init(&fi
->inmem_lock
);
741 init_rwsem(&fi
->dio_rwsem
[READ
]);
742 init_rwsem(&fi
->dio_rwsem
[WRITE
]);
743 init_rwsem(&fi
->i_mmap_sem
);
744 init_rwsem(&fi
->i_xattr_sem
);
747 memset(&fi
->i_dquot
, 0, sizeof(fi
->i_dquot
));
748 fi
->i_reserved_quota
= 0;
750 /* Will be used by directory only */
751 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
753 return &fi
->vfs_inode
;
756 static int f2fs_drop_inode(struct inode
*inode
)
760 * This is to avoid a deadlock condition like below.
761 * writeback_single_inode(inode)
762 * - f2fs_write_data_page
763 * - f2fs_gc -> iput -> evict
764 * - inode_wait_for_writeback(inode)
766 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
767 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
768 /* to avoid evict_inode call simultaneously */
769 atomic_inc(&inode
->i_count
);
770 spin_unlock(&inode
->i_lock
);
772 /* some remained atomic pages should discarded */
773 if (f2fs_is_atomic_file(inode
))
774 drop_inmem_pages(inode
);
776 /* should remain fi->extent_tree for writepage */
777 f2fs_destroy_extent_node(inode
);
779 sb_start_intwrite(inode
->i_sb
);
780 f2fs_i_size_write(inode
, 0);
782 if (F2FS_HAS_BLOCKS(inode
))
783 f2fs_truncate(inode
);
785 sb_end_intwrite(inode
->i_sb
);
787 spin_lock(&inode
->i_lock
);
788 atomic_dec(&inode
->i_count
);
790 trace_f2fs_drop_inode(inode
, 0);
793 ret
= generic_drop_inode(inode
);
794 trace_f2fs_drop_inode(inode
, ret
);
798 int f2fs_inode_dirtied(struct inode
*inode
, bool sync
)
800 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
803 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
804 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
807 set_inode_flag(inode
, FI_DIRTY_INODE
);
808 stat_inc_dirty_inode(sbi
, DIRTY_META
);
810 if (sync
&& list_empty(&F2FS_I(inode
)->gdirty_list
)) {
811 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
812 &sbi
->inode_list
[DIRTY_META
]);
813 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
815 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
819 void f2fs_inode_synced(struct inode
*inode
)
821 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
823 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
824 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
825 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
828 if (!list_empty(&F2FS_I(inode
)->gdirty_list
)) {
829 list_del_init(&F2FS_I(inode
)->gdirty_list
);
830 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
832 clear_inode_flag(inode
, FI_DIRTY_INODE
);
833 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
834 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
835 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
839 * f2fs_dirty_inode() is called from __mark_inode_dirty()
841 * We should call set_dirty_inode to write the dirty inode through write_inode.
843 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
845 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
847 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
848 inode
->i_ino
== F2FS_META_INO(sbi
))
851 if (flags
== I_DIRTY_TIME
)
854 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
855 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
857 f2fs_inode_dirtied(inode
, false);
860 static void f2fs_i_callback(struct rcu_head
*head
)
862 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
863 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
866 static void f2fs_destroy_inode(struct inode
*inode
)
868 call_rcu(&inode
->i_rcu
, f2fs_i_callback
);
871 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
873 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
874 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
877 static void destroy_device_list(struct f2fs_sb_info
*sbi
)
881 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
882 blkdev_put(FDEV(i
).bdev
, FMODE_EXCL
);
883 #ifdef CONFIG_BLK_DEV_ZONED
884 kfree(FDEV(i
).blkz_type
);
890 static void f2fs_put_super(struct super_block
*sb
)
892 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
896 f2fs_quota_off_umount(sb
);
898 /* prevent remaining shrinker jobs */
899 mutex_lock(&sbi
->umount_mutex
);
902 * We don't need to do checkpoint when superblock is clean.
903 * But, the previous checkpoint was not done by umount, it needs to do
904 * clean checkpoint again.
906 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
907 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
908 struct cp_control cpc
= {
911 write_checkpoint(sbi
, &cpc
);
914 /* be sure to wait for any on-going discard commands */
915 dropped
= f2fs_wait_discard_bios(sbi
);
917 if (f2fs_discard_en(sbi
) && !sbi
->discard_blks
&& !dropped
) {
918 struct cp_control cpc
= {
919 .reason
= CP_UMOUNT
| CP_TRIMMED
,
921 write_checkpoint(sbi
, &cpc
);
924 /* write_checkpoint can update stat informaion */
925 f2fs_destroy_stats(sbi
);
928 * normally superblock is clean, so we need to release this.
929 * In addition, EIO will skip do checkpoint, we need this as well.
931 release_ino_entry(sbi
, true);
933 f2fs_leave_shrinker(sbi
);
934 mutex_unlock(&sbi
->umount_mutex
);
936 /* our cp_error case, we can wait for any writeback page */
937 f2fs_flush_merged_writes(sbi
);
939 iput(sbi
->node_inode
);
940 iput(sbi
->meta_inode
);
942 /* destroy f2fs internal modules */
943 destroy_node_manager(sbi
);
944 destroy_segment_manager(sbi
);
948 f2fs_unregister_sysfs(sbi
);
950 sb
->s_fs_info
= NULL
;
951 if (sbi
->s_chksum_driver
)
952 crypto_free_shash(sbi
->s_chksum_driver
);
953 kfree(sbi
->raw_super
);
955 destroy_device_list(sbi
);
956 mempool_destroy(sbi
->write_io_dummy
);
958 for (i
= 0; i
< MAXQUOTAS
; i
++)
959 kfree(sbi
->s_qf_names
[i
]);
961 destroy_percpu_info(sbi
);
962 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
963 kfree(sbi
->write_io
[i
]);
967 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
969 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
972 if (unlikely(f2fs_cp_error(sbi
)))
975 trace_f2fs_sync_fs(sb
, sync
);
977 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
981 struct cp_control cpc
;
983 cpc
.reason
= __get_cp_reason(sbi
);
985 mutex_lock(&sbi
->gc_mutex
);
986 err
= write_checkpoint(sbi
, &cpc
);
987 mutex_unlock(&sbi
->gc_mutex
);
989 f2fs_trace_ios(NULL
, 1);
994 static int f2fs_freeze(struct super_block
*sb
)
996 if (f2fs_readonly(sb
))
999 /* IO error happened before */
1000 if (unlikely(f2fs_cp_error(F2FS_SB(sb
))))
1003 /* must be clean, since sync_filesystem() was already called */
1004 if (is_sbi_flag_set(F2FS_SB(sb
), SBI_IS_DIRTY
))
1009 static int f2fs_unfreeze(struct super_block
*sb
)
1015 static int f2fs_statfs_project(struct super_block
*sb
,
1016 kprojid_t projid
, struct kstatfs
*buf
)
1019 struct dquot
*dquot
;
1023 qid
= make_kqid_projid(projid
);
1024 dquot
= dqget(sb
, qid
);
1026 return PTR_ERR(dquot
);
1027 spin_lock(&dq_data_lock
);
1029 limit
= (dquot
->dq_dqb
.dqb_bsoftlimit
?
1030 dquot
->dq_dqb
.dqb_bsoftlimit
:
1031 dquot
->dq_dqb
.dqb_bhardlimit
) >> sb
->s_blocksize_bits
;
1032 if (limit
&& buf
->f_blocks
> limit
) {
1033 curblock
= dquot
->dq_dqb
.dqb_curspace
>> sb
->s_blocksize_bits
;
1034 buf
->f_blocks
= limit
;
1035 buf
->f_bfree
= buf
->f_bavail
=
1036 (buf
->f_blocks
> curblock
) ?
1037 (buf
->f_blocks
- curblock
) : 0;
1040 limit
= dquot
->dq_dqb
.dqb_isoftlimit
?
1041 dquot
->dq_dqb
.dqb_isoftlimit
:
1042 dquot
->dq_dqb
.dqb_ihardlimit
;
1043 if (limit
&& buf
->f_files
> limit
) {
1044 buf
->f_files
= limit
;
1046 (buf
->f_files
> dquot
->dq_dqb
.dqb_curinodes
) ?
1047 (buf
->f_files
- dquot
->dq_dqb
.dqb_curinodes
) : 0;
1050 spin_unlock(&dq_data_lock
);
1056 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1058 struct super_block
*sb
= dentry
->d_sb
;
1059 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1060 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
1061 block_t total_count
, user_block_count
, start_count
;
1062 u64 avail_node_count
;
1064 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
1065 user_block_count
= sbi
->user_block_count
;
1066 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
1067 buf
->f_type
= F2FS_SUPER_MAGIC
;
1068 buf
->f_bsize
= sbi
->blocksize
;
1070 buf
->f_blocks
= total_count
- start_count
;
1071 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) -
1072 sbi
->current_reserved_blocks
;
1073 if (buf
->f_bfree
> sbi
->root_reserved_blocks
)
1074 buf
->f_bavail
= buf
->f_bfree
- sbi
->root_reserved_blocks
;
1078 avail_node_count
= sbi
->total_node_count
- sbi
->nquota_files
-
1079 F2FS_RESERVED_NODE_NUM
;
1081 if (avail_node_count
> user_block_count
) {
1082 buf
->f_files
= user_block_count
;
1083 buf
->f_ffree
= buf
->f_bavail
;
1085 buf
->f_files
= avail_node_count
;
1086 buf
->f_ffree
= min(avail_node_count
- valid_node_count(sbi
),
1090 buf
->f_namelen
= F2FS_NAME_LEN
;
1091 buf
->f_fsid
.val
[0] = (u32
)id
;
1092 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
1095 if (is_inode_flag_set(dentry
->d_inode
, FI_PROJ_INHERIT
) &&
1096 sb_has_quota_limits_enabled(sb
, PRJQUOTA
)) {
1097 f2fs_statfs_project(sb
, F2FS_I(dentry
->d_inode
)->i_projid
, buf
);
1103 static inline void f2fs_show_quota_options(struct seq_file
*seq
,
1104 struct super_block
*sb
)
1107 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1109 if (sbi
->s_jquota_fmt
) {
1112 switch (sbi
->s_jquota_fmt
) {
1123 seq_printf(seq
, ",jqfmt=%s", fmtname
);
1126 if (sbi
->s_qf_names
[USRQUOTA
])
1127 seq_show_option(seq
, "usrjquota", sbi
->s_qf_names
[USRQUOTA
]);
1129 if (sbi
->s_qf_names
[GRPQUOTA
])
1130 seq_show_option(seq
, "grpjquota", sbi
->s_qf_names
[GRPQUOTA
]);
1132 if (sbi
->s_qf_names
[PRJQUOTA
])
1133 seq_show_option(seq
, "prjjquota", sbi
->s_qf_names
[PRJQUOTA
]);
1137 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
1139 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
1141 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
1142 if (test_opt(sbi
, FORCE_FG_GC
))
1143 seq_printf(seq
, ",background_gc=%s", "sync");
1145 seq_printf(seq
, ",background_gc=%s", "on");
1147 seq_printf(seq
, ",background_gc=%s", "off");
1149 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
1150 seq_puts(seq
, ",disable_roll_forward");
1151 if (test_opt(sbi
, DISCARD
))
1152 seq_puts(seq
, ",discard");
1153 if (test_opt(sbi
, NOHEAP
))
1154 seq_puts(seq
, ",no_heap");
1156 seq_puts(seq
, ",heap");
1157 #ifdef CONFIG_F2FS_FS_XATTR
1158 if (test_opt(sbi
, XATTR_USER
))
1159 seq_puts(seq
, ",user_xattr");
1161 seq_puts(seq
, ",nouser_xattr");
1162 if (test_opt(sbi
, INLINE_XATTR
))
1163 seq_puts(seq
, ",inline_xattr");
1165 seq_puts(seq
, ",noinline_xattr");
1166 if (test_opt(sbi
, INLINE_XATTR_SIZE
))
1167 seq_printf(seq
, ",inline_xattr_size=%u",
1168 sbi
->inline_xattr_size
);
1170 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1171 if (test_opt(sbi
, POSIX_ACL
))
1172 seq_puts(seq
, ",acl");
1174 seq_puts(seq
, ",noacl");
1176 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
1177 seq_puts(seq
, ",disable_ext_identify");
1178 if (test_opt(sbi
, INLINE_DATA
))
1179 seq_puts(seq
, ",inline_data");
1181 seq_puts(seq
, ",noinline_data");
1182 if (test_opt(sbi
, INLINE_DENTRY
))
1183 seq_puts(seq
, ",inline_dentry");
1185 seq_puts(seq
, ",noinline_dentry");
1186 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
1187 seq_puts(seq
, ",flush_merge");
1188 if (test_opt(sbi
, NOBARRIER
))
1189 seq_puts(seq
, ",nobarrier");
1190 if (test_opt(sbi
, FASTBOOT
))
1191 seq_puts(seq
, ",fastboot");
1192 if (test_opt(sbi
, EXTENT_CACHE
))
1193 seq_puts(seq
, ",extent_cache");
1195 seq_puts(seq
, ",noextent_cache");
1196 if (test_opt(sbi
, DATA_FLUSH
))
1197 seq_puts(seq
, ",data_flush");
1199 seq_puts(seq
, ",mode=");
1200 if (test_opt(sbi
, ADAPTIVE
))
1201 seq_puts(seq
, "adaptive");
1202 else if (test_opt(sbi
, LFS
))
1203 seq_puts(seq
, "lfs");
1204 seq_printf(seq
, ",active_logs=%u", sbi
->active_logs
);
1205 if (test_opt(sbi
, RESERVE_ROOT
))
1206 seq_printf(seq
, ",reserve_root=%u,resuid=%u,resgid=%u",
1207 sbi
->root_reserved_blocks
,
1208 from_kuid_munged(&init_user_ns
, sbi
->s_resuid
),
1209 from_kgid_munged(&init_user_ns
, sbi
->s_resgid
));
1210 if (F2FS_IO_SIZE_BITS(sbi
))
1211 seq_printf(seq
, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi
));
1212 #ifdef CONFIG_F2FS_FAULT_INJECTION
1213 if (test_opt(sbi
, FAULT_INJECTION
))
1214 seq_printf(seq
, ",fault_injection=%u",
1215 sbi
->fault_info
.inject_rate
);
1218 if (test_opt(sbi
, QUOTA
))
1219 seq_puts(seq
, ",quota");
1220 if (test_opt(sbi
, USRQUOTA
))
1221 seq_puts(seq
, ",usrquota");
1222 if (test_opt(sbi
, GRPQUOTA
))
1223 seq_puts(seq
, ",grpquota");
1224 if (test_opt(sbi
, PRJQUOTA
))
1225 seq_puts(seq
, ",prjquota");
1227 f2fs_show_quota_options(seq
, sbi
->sb
);
1232 static void default_options(struct f2fs_sb_info
*sbi
)
1234 /* init some FS parameters */
1235 sbi
->active_logs
= NR_CURSEG_TYPE
;
1236 sbi
->inline_xattr_size
= DEFAULT_INLINE_XATTR_ADDRS
;
1238 set_opt(sbi
, BG_GC
);
1239 set_opt(sbi
, INLINE_XATTR
);
1240 set_opt(sbi
, INLINE_DATA
);
1241 set_opt(sbi
, INLINE_DENTRY
);
1242 set_opt(sbi
, EXTENT_CACHE
);
1243 set_opt(sbi
, NOHEAP
);
1244 sbi
->sb
->s_flags
|= SB_LAZYTIME
;
1245 set_opt(sbi
, FLUSH_MERGE
);
1246 if (f2fs_sb_mounted_blkzoned(sbi
->sb
)) {
1247 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
1248 set_opt(sbi
, DISCARD
);
1250 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
1253 #ifdef CONFIG_F2FS_FS_XATTR
1254 set_opt(sbi
, XATTR_USER
);
1256 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1257 set_opt(sbi
, POSIX_ACL
);
1260 #ifdef CONFIG_F2FS_FAULT_INJECTION
1261 f2fs_build_fault_attr(sbi
, 0);
1266 static int f2fs_enable_quotas(struct super_block
*sb
);
1268 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
1270 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1271 struct f2fs_mount_info org_mount_opt
;
1272 unsigned long old_sb_flags
;
1273 int err
, active_logs
;
1274 bool need_restart_gc
= false;
1275 bool need_stop_gc
= false;
1276 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
1277 #ifdef CONFIG_F2FS_FAULT_INJECTION
1278 struct f2fs_fault_info ffi
= sbi
->fault_info
;
1282 char *s_qf_names
[MAXQUOTAS
];
1287 * Save the old mount options in case we
1288 * need to restore them.
1290 org_mount_opt
= sbi
->mount_opt
;
1291 old_sb_flags
= sb
->s_flags
;
1292 active_logs
= sbi
->active_logs
;
1295 s_jquota_fmt
= sbi
->s_jquota_fmt
;
1296 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1297 if (sbi
->s_qf_names
[i
]) {
1298 s_qf_names
[i
] = kstrdup(sbi
->s_qf_names
[i
],
1300 if (!s_qf_names
[i
]) {
1301 for (j
= 0; j
< i
; j
++)
1302 kfree(s_qf_names
[j
]);
1306 s_qf_names
[i
] = NULL
;
1311 /* recover superblocks we couldn't write due to previous RO mount */
1312 if (!(*flags
& SB_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
1313 err
= f2fs_commit_super(sbi
, false);
1314 f2fs_msg(sb
, KERN_INFO
,
1315 "Try to recover all the superblocks, ret: %d", err
);
1317 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1320 default_options(sbi
);
1322 /* parse mount options */
1323 err
= parse_options(sb
, data
);
1328 * Previous and new state of filesystem is RO,
1329 * so skip checking GC and FLUSH_MERGE conditions.
1331 if (f2fs_readonly(sb
) && (*flags
& SB_RDONLY
))
1335 if (!f2fs_readonly(sb
) && (*flags
& SB_RDONLY
)) {
1336 err
= dquot_suspend(sb
, -1);
1339 } else if (f2fs_readonly(sb
) && !(*flags
& MS_RDONLY
)) {
1340 /* dquot_resume needs RW */
1341 sb
->s_flags
&= ~SB_RDONLY
;
1342 if (sb_any_quota_suspended(sb
)) {
1343 dquot_resume(sb
, -1);
1344 } else if (f2fs_sb_has_quota_ino(sb
)) {
1345 err
= f2fs_enable_quotas(sb
);
1351 /* disallow enable/disable extent_cache dynamically */
1352 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
1354 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1355 "switch extent_cache option is not allowed");
1360 * We stop the GC thread if FS is mounted as RO
1361 * or if background_gc = off is passed in mount
1362 * option. Also sync the filesystem.
1364 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
1365 if (sbi
->gc_thread
) {
1366 stop_gc_thread(sbi
);
1367 need_restart_gc
= true;
1369 } else if (!sbi
->gc_thread
) {
1370 err
= start_gc_thread(sbi
);
1373 need_stop_gc
= true;
1376 if (*flags
& SB_RDONLY
) {
1377 writeback_inodes_sb(sb
, WB_REASON_SYNC
);
1380 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1381 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
1382 f2fs_sync_fs(sb
, 1);
1383 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
1387 * We stop issue flush thread if FS is mounted as RO
1388 * or if flush_merge is not passed in mount option.
1390 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
1391 clear_opt(sbi
, FLUSH_MERGE
);
1392 destroy_flush_cmd_control(sbi
, false);
1394 err
= create_flush_cmd_control(sbi
);
1400 /* Release old quota file names */
1401 for (i
= 0; i
< MAXQUOTAS
; i
++)
1402 kfree(s_qf_names
[i
]);
1404 /* Update the POSIXACL Flag */
1405 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
1406 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
1408 limit_reserve_root(sbi
);
1411 if (need_restart_gc
) {
1412 if (start_gc_thread(sbi
))
1413 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1414 "background gc thread has stopped");
1415 } else if (need_stop_gc
) {
1416 stop_gc_thread(sbi
);
1420 sbi
->s_jquota_fmt
= s_jquota_fmt
;
1421 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1422 kfree(sbi
->s_qf_names
[i
]);
1423 sbi
->s_qf_names
[i
] = s_qf_names
[i
];
1426 sbi
->mount_opt
= org_mount_opt
;
1427 sbi
->active_logs
= active_logs
;
1428 sb
->s_flags
= old_sb_flags
;
1429 #ifdef CONFIG_F2FS_FAULT_INJECTION
1430 sbi
->fault_info
= ffi
;
1436 /* Read data from quotafile */
1437 static ssize_t
f2fs_quota_read(struct super_block
*sb
, int type
, char *data
,
1438 size_t len
, loff_t off
)
1440 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1441 struct address_space
*mapping
= inode
->i_mapping
;
1442 block_t blkidx
= F2FS_BYTES_TO_BLK(off
);
1443 int offset
= off
& (sb
->s_blocksize
- 1);
1446 loff_t i_size
= i_size_read(inode
);
1453 if (off
+ len
> i_size
)
1456 while (toread
> 0) {
1457 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
, toread
);
1459 page
= read_mapping_page(mapping
, blkidx
, NULL
);
1461 if (PTR_ERR(page
) == -ENOMEM
) {
1462 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1465 return PTR_ERR(page
);
1470 if (unlikely(page
->mapping
!= mapping
)) {
1471 f2fs_put_page(page
, 1);
1474 if (unlikely(!PageUptodate(page
))) {
1475 f2fs_put_page(page
, 1);
1479 kaddr
= kmap_atomic(page
);
1480 memcpy(data
, kaddr
+ offset
, tocopy
);
1481 kunmap_atomic(kaddr
);
1482 f2fs_put_page(page
, 1);
1492 /* Write to quotafile */
1493 static ssize_t
f2fs_quota_write(struct super_block
*sb
, int type
,
1494 const char *data
, size_t len
, loff_t off
)
1496 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1497 struct address_space
*mapping
= inode
->i_mapping
;
1498 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
1499 int offset
= off
& (sb
->s_blocksize
- 1);
1500 size_t towrite
= len
;
1506 while (towrite
> 0) {
1507 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
,
1510 err
= a_ops
->write_begin(NULL
, mapping
, off
, tocopy
, 0,
1512 if (unlikely(err
)) {
1513 if (err
== -ENOMEM
) {
1514 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1520 kaddr
= kmap_atomic(page
);
1521 memcpy(kaddr
+ offset
, data
, tocopy
);
1522 kunmap_atomic(kaddr
);
1523 flush_dcache_page(page
);
1525 a_ops
->write_end(NULL
, mapping
, off
, tocopy
, tocopy
,
1536 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1537 f2fs_mark_inode_dirty_sync(inode
, false);
1538 return len
- towrite
;
1541 static struct dquot
**f2fs_get_dquots(struct inode
*inode
)
1543 return F2FS_I(inode
)->i_dquot
;
1546 static qsize_t
*f2fs_get_reserved_space(struct inode
*inode
)
1548 return &F2FS_I(inode
)->i_reserved_quota
;
1551 static int f2fs_quota_on_mount(struct f2fs_sb_info
*sbi
, int type
)
1553 return dquot_quota_on_mount(sbi
->sb
, sbi
->s_qf_names
[type
],
1554 sbi
->s_jquota_fmt
, type
);
1557 int f2fs_enable_quota_files(struct f2fs_sb_info
*sbi
, bool rdonly
)
1562 if (f2fs_sb_has_quota_ino(sbi
->sb
) && rdonly
) {
1563 err
= f2fs_enable_quotas(sbi
->sb
);
1565 f2fs_msg(sbi
->sb
, KERN_ERR
,
1566 "Cannot turn on quota_ino: %d", err
);
1572 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1573 if (sbi
->s_qf_names
[i
]) {
1574 err
= f2fs_quota_on_mount(sbi
, i
);
1579 f2fs_msg(sbi
->sb
, KERN_ERR
,
1580 "Cannot turn on quotas: %d on %d", err
, i
);
1586 static int f2fs_quota_enable(struct super_block
*sb
, int type
, int format_id
,
1589 struct inode
*qf_inode
;
1590 unsigned long qf_inum
;
1593 BUG_ON(!f2fs_sb_has_quota_ino(sb
));
1595 qf_inum
= f2fs_qf_ino(sb
, type
);
1599 qf_inode
= f2fs_iget(sb
, qf_inum
);
1600 if (IS_ERR(qf_inode
)) {
1601 f2fs_msg(sb
, KERN_ERR
,
1602 "Bad quota inode %u:%lu", type
, qf_inum
);
1603 return PTR_ERR(qf_inode
);
1606 /* Don't account quota for quota files to avoid recursion */
1607 qf_inode
->i_flags
|= S_NOQUOTA
;
1608 err
= dquot_enable(qf_inode
, type
, format_id
, flags
);
1613 static int f2fs_enable_quotas(struct super_block
*sb
)
1616 unsigned long qf_inum
;
1617 bool quota_mopt
[MAXQUOTAS
] = {
1618 test_opt(F2FS_SB(sb
), USRQUOTA
),
1619 test_opt(F2FS_SB(sb
), GRPQUOTA
),
1620 test_opt(F2FS_SB(sb
), PRJQUOTA
),
1623 sb_dqopt(sb
)->flags
|= DQUOT_QUOTA_SYS_FILE
| DQUOT_NOLIST_DIRTY
;
1624 for (type
= 0; type
< MAXQUOTAS
; type
++) {
1625 qf_inum
= f2fs_qf_ino(sb
, type
);
1627 err
= f2fs_quota_enable(sb
, type
, QFMT_VFS_V1
,
1628 DQUOT_USAGE_ENABLED
|
1629 (quota_mopt
[type
] ? DQUOT_LIMITS_ENABLED
: 0));
1631 f2fs_msg(sb
, KERN_ERR
,
1632 "Failed to enable quota tracking "
1633 "(type=%d, err=%d). Please run "
1634 "fsck to fix.", type
, err
);
1635 for (type
--; type
>= 0; type
--)
1636 dquot_quota_off(sb
, type
);
1644 static int f2fs_quota_sync(struct super_block
*sb
, int type
)
1646 struct quota_info
*dqopt
= sb_dqopt(sb
);
1650 ret
= dquot_writeback_dquots(sb
, type
);
1655 * Now when everything is written we can discard the pagecache so
1656 * that userspace sees the changes.
1658 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1659 if (type
!= -1 && cnt
!= type
)
1661 if (!sb_has_quota_active(sb
, cnt
))
1664 ret
= filemap_write_and_wait(dqopt
->files
[cnt
]->i_mapping
);
1668 inode_lock(dqopt
->files
[cnt
]);
1669 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
1670 inode_unlock(dqopt
->files
[cnt
]);
1675 static int f2fs_quota_on(struct super_block
*sb
, int type
, int format_id
,
1676 const struct path
*path
)
1678 struct inode
*inode
;
1681 err
= f2fs_quota_sync(sb
, type
);
1685 err
= dquot_quota_on(sb
, type
, format_id
, path
);
1689 inode
= d_inode(path
->dentry
);
1692 F2FS_I(inode
)->i_flags
|= FS_NOATIME_FL
| FS_IMMUTABLE_FL
;
1693 inode_set_flags(inode
, S_NOATIME
| S_IMMUTABLE
,
1694 S_NOATIME
| S_IMMUTABLE
);
1695 inode_unlock(inode
);
1696 f2fs_mark_inode_dirty_sync(inode
, false);
1701 static int f2fs_quota_off(struct super_block
*sb
, int type
)
1703 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1706 if (!inode
|| !igrab(inode
))
1707 return dquot_quota_off(sb
, type
);
1709 f2fs_quota_sync(sb
, type
);
1711 err
= dquot_quota_off(sb
, type
);
1712 if (err
|| f2fs_sb_has_quota_ino(sb
))
1716 F2FS_I(inode
)->i_flags
&= ~(FS_NOATIME_FL
| FS_IMMUTABLE_FL
);
1717 inode_set_flags(inode
, 0, S_NOATIME
| S_IMMUTABLE
);
1718 inode_unlock(inode
);
1719 f2fs_mark_inode_dirty_sync(inode
, false);
1725 void f2fs_quota_off_umount(struct super_block
*sb
)
1729 for (type
= 0; type
< MAXQUOTAS
; type
++)
1730 f2fs_quota_off(sb
, type
);
1733 static int f2fs_get_projid(struct inode
*inode
, kprojid_t
*projid
)
1735 *projid
= F2FS_I(inode
)->i_projid
;
1739 static const struct dquot_operations f2fs_quota_operations
= {
1740 .get_reserved_space
= f2fs_get_reserved_space
,
1741 .write_dquot
= dquot_commit
,
1742 .acquire_dquot
= dquot_acquire
,
1743 .release_dquot
= dquot_release
,
1744 .mark_dirty
= dquot_mark_dquot_dirty
,
1745 .write_info
= dquot_commit_info
,
1746 .alloc_dquot
= dquot_alloc
,
1747 .destroy_dquot
= dquot_destroy
,
1748 .get_projid
= f2fs_get_projid
,
1749 .get_next_id
= dquot_get_next_id
,
1752 static const struct quotactl_ops f2fs_quotactl_ops
= {
1753 .quota_on
= f2fs_quota_on
,
1754 .quota_off
= f2fs_quota_off
,
1755 .quota_sync
= f2fs_quota_sync
,
1756 .get_state
= dquot_get_state
,
1757 .set_info
= dquot_set_dqinfo
,
1758 .get_dqblk
= dquot_get_dqblk
,
1759 .set_dqblk
= dquot_set_dqblk
,
1760 .get_nextdqblk
= dquot_get_next_dqblk
,
1763 void f2fs_quota_off_umount(struct super_block
*sb
)
1768 static const struct super_operations f2fs_sops
= {
1769 .alloc_inode
= f2fs_alloc_inode
,
1770 .drop_inode
= f2fs_drop_inode
,
1771 .destroy_inode
= f2fs_destroy_inode
,
1772 .write_inode
= f2fs_write_inode
,
1773 .dirty_inode
= f2fs_dirty_inode
,
1774 .show_options
= f2fs_show_options
,
1776 .quota_read
= f2fs_quota_read
,
1777 .quota_write
= f2fs_quota_write
,
1778 .get_dquots
= f2fs_get_dquots
,
1780 .evict_inode
= f2fs_evict_inode
,
1781 .put_super
= f2fs_put_super
,
1782 .sync_fs
= f2fs_sync_fs
,
1783 .freeze_fs
= f2fs_freeze
,
1784 .unfreeze_fs
= f2fs_unfreeze
,
1785 .statfs
= f2fs_statfs
,
1786 .remount_fs
= f2fs_remount
,
1789 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1790 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
1792 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1793 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1797 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
1800 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1801 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1802 ctx
, len
, fs_data
, XATTR_CREATE
);
1805 static unsigned f2fs_max_namelen(struct inode
*inode
)
1807 return S_ISLNK(inode
->i_mode
) ?
1808 inode
->i_sb
->s_blocksize
: F2FS_NAME_LEN
;
1811 static const struct fscrypt_operations f2fs_cryptops
= {
1812 .key_prefix
= "f2fs:",
1813 .get_context
= f2fs_get_context
,
1814 .set_context
= f2fs_set_context
,
1815 .empty_dir
= f2fs_empty_dir
,
1816 .max_namelen
= f2fs_max_namelen
,
1820 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
1821 u64 ino
, u32 generation
)
1823 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1824 struct inode
*inode
;
1826 if (check_nid_range(sbi
, ino
))
1827 return ERR_PTR(-ESTALE
);
1830 * f2fs_iget isn't quite right if the inode is currently unallocated!
1831 * However f2fs_iget currently does appropriate checks to handle stale
1832 * inodes so everything is OK.
1834 inode
= f2fs_iget(sb
, ino
);
1836 return ERR_CAST(inode
);
1837 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
1838 /* we didn't find the right inode.. */
1840 return ERR_PTR(-ESTALE
);
1845 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
1846 int fh_len
, int fh_type
)
1848 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
1849 f2fs_nfs_get_inode
);
1852 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
1853 int fh_len
, int fh_type
)
1855 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
1856 f2fs_nfs_get_inode
);
1859 static const struct export_operations f2fs_export_ops
= {
1860 .fh_to_dentry
= f2fs_fh_to_dentry
,
1861 .fh_to_parent
= f2fs_fh_to_parent
,
1862 .get_parent
= f2fs_get_parent
,
1865 static loff_t
max_file_blocks(void)
1868 loff_t leaf_count
= ADDRS_PER_BLOCK
;
1871 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
1872 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
1873 * space in inode.i_addr, it will be more safe to reassign
1877 /* two direct node blocks */
1878 result
+= (leaf_count
* 2);
1880 /* two indirect node blocks */
1881 leaf_count
*= NIDS_PER_BLOCK
;
1882 result
+= (leaf_count
* 2);
1884 /* one double indirect node block */
1885 leaf_count
*= NIDS_PER_BLOCK
;
1886 result
+= leaf_count
;
1891 static int __f2fs_commit_super(struct buffer_head
*bh
,
1892 struct f2fs_super_block
*super
)
1896 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
1897 set_buffer_uptodate(bh
);
1898 set_buffer_dirty(bh
);
1901 /* it's rare case, we can do fua all the time */
1902 return __sync_dirty_buffer(bh
, REQ_SYNC
| REQ_PREFLUSH
| REQ_FUA
);
1905 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
1906 struct buffer_head
*bh
)
1908 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
1909 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1910 struct super_block
*sb
= sbi
->sb
;
1911 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
1912 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
1913 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
1914 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
1915 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
1916 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
1917 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1918 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
1919 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
1920 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
1921 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
1922 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
1923 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1924 u64 main_end_blkaddr
= main_blkaddr
+
1925 (segment_count_main
<< log_blocks_per_seg
);
1926 u64 seg_end_blkaddr
= segment0_blkaddr
+
1927 (segment_count
<< log_blocks_per_seg
);
1929 if (segment0_blkaddr
!= cp_blkaddr
) {
1930 f2fs_msg(sb
, KERN_INFO
,
1931 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1932 segment0_blkaddr
, cp_blkaddr
);
1936 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
1938 f2fs_msg(sb
, KERN_INFO
,
1939 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1940 cp_blkaddr
, sit_blkaddr
,
1941 segment_count_ckpt
<< log_blocks_per_seg
);
1945 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
1947 f2fs_msg(sb
, KERN_INFO
,
1948 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1949 sit_blkaddr
, nat_blkaddr
,
1950 segment_count_sit
<< log_blocks_per_seg
);
1954 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
1956 f2fs_msg(sb
, KERN_INFO
,
1957 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1958 nat_blkaddr
, ssa_blkaddr
,
1959 segment_count_nat
<< log_blocks_per_seg
);
1963 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
1965 f2fs_msg(sb
, KERN_INFO
,
1966 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1967 ssa_blkaddr
, main_blkaddr
,
1968 segment_count_ssa
<< log_blocks_per_seg
);
1972 if (main_end_blkaddr
> seg_end_blkaddr
) {
1973 f2fs_msg(sb
, KERN_INFO
,
1974 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1977 (segment_count
<< log_blocks_per_seg
),
1978 segment_count_main
<< log_blocks_per_seg
);
1980 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
1984 /* fix in-memory information all the time */
1985 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
1986 segment0_blkaddr
) >> log_blocks_per_seg
);
1988 if (f2fs_readonly(sb
) || bdev_read_only(sb
->s_bdev
)) {
1989 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1992 err
= __f2fs_commit_super(bh
, NULL
);
1993 res
= err
? "failed" : "done";
1995 f2fs_msg(sb
, KERN_INFO
,
1996 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1999 (segment_count
<< log_blocks_per_seg
),
2000 segment_count_main
<< log_blocks_per_seg
);
2007 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
2008 struct buffer_head
*bh
)
2010 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
2011 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
2012 struct super_block
*sb
= sbi
->sb
;
2013 unsigned int blocksize
;
2015 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
2016 f2fs_msg(sb
, KERN_INFO
,
2017 "Magic Mismatch, valid(0x%x) - read(0x%x)",
2018 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
2022 /* Currently, support only 4KB page cache size */
2023 if (F2FS_BLKSIZE
!= PAGE_SIZE
) {
2024 f2fs_msg(sb
, KERN_INFO
,
2025 "Invalid page_cache_size (%lu), supports only 4KB\n",
2030 /* Currently, support only 4KB block size */
2031 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
2032 if (blocksize
!= F2FS_BLKSIZE
) {
2033 f2fs_msg(sb
, KERN_INFO
,
2034 "Invalid blocksize (%u), supports only 4KB\n",
2039 /* check log blocks per segment */
2040 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
2041 f2fs_msg(sb
, KERN_INFO
,
2042 "Invalid log blocks per segment (%u)\n",
2043 le32_to_cpu(raw_super
->log_blocks_per_seg
));
2047 /* Currently, support 512/1024/2048/4096 bytes sector size */
2048 if (le32_to_cpu(raw_super
->log_sectorsize
) >
2049 F2FS_MAX_LOG_SECTOR_SIZE
||
2050 le32_to_cpu(raw_super
->log_sectorsize
) <
2051 F2FS_MIN_LOG_SECTOR_SIZE
) {
2052 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize (%u)",
2053 le32_to_cpu(raw_super
->log_sectorsize
));
2056 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
2057 le32_to_cpu(raw_super
->log_sectorsize
) !=
2058 F2FS_MAX_LOG_SECTOR_SIZE
) {
2059 f2fs_msg(sb
, KERN_INFO
,
2060 "Invalid log sectors per block(%u) log sectorsize(%u)",
2061 le32_to_cpu(raw_super
->log_sectors_per_block
),
2062 le32_to_cpu(raw_super
->log_sectorsize
));
2066 /* check reserved ino info */
2067 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
2068 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
2069 le32_to_cpu(raw_super
->root_ino
) != 3) {
2070 f2fs_msg(sb
, KERN_INFO
,
2071 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2072 le32_to_cpu(raw_super
->node_ino
),
2073 le32_to_cpu(raw_super
->meta_ino
),
2074 le32_to_cpu(raw_super
->root_ino
));
2078 if (le32_to_cpu(raw_super
->segment_count
) > F2FS_MAX_SEGMENT
) {
2079 f2fs_msg(sb
, KERN_INFO
,
2080 "Invalid segment count (%u)",
2081 le32_to_cpu(raw_super
->segment_count
));
2085 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2086 if (sanity_check_area_boundary(sbi
, bh
))
2092 int sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
2094 unsigned int total
, fsmeta
;
2095 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
2096 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
2097 unsigned int ovp_segments
, reserved_segments
;
2098 unsigned int main_segs
, blocks_per_seg
;
2101 total
= le32_to_cpu(raw_super
->segment_count
);
2102 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
2103 fsmeta
+= le32_to_cpu(raw_super
->segment_count_sit
);
2104 fsmeta
+= le32_to_cpu(raw_super
->segment_count_nat
);
2105 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
2106 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
2108 if (unlikely(fsmeta
>= total
))
2111 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
2112 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
2114 if (unlikely(fsmeta
< F2FS_MIN_SEGMENTS
||
2115 ovp_segments
== 0 || reserved_segments
== 0)) {
2116 f2fs_msg(sbi
->sb
, KERN_ERR
,
2117 "Wrong layout: check mkfs.f2fs version");
2121 main_segs
= le32_to_cpu(raw_super
->segment_count_main
);
2122 blocks_per_seg
= sbi
->blocks_per_seg
;
2124 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
2125 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) >= main_segs
||
2126 le16_to_cpu(ckpt
->cur_node_blkoff
[i
]) >= blocks_per_seg
)
2129 for (i
= 0; i
< NR_CURSEG_DATA_TYPE
; i
++) {
2130 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) >= main_segs
||
2131 le16_to_cpu(ckpt
->cur_data_blkoff
[i
]) >= blocks_per_seg
)
2135 if (unlikely(f2fs_cp_error(sbi
))) {
2136 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
2142 static void init_sb_info(struct f2fs_sb_info
*sbi
)
2144 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
2147 sbi
->log_sectors_per_block
=
2148 le32_to_cpu(raw_super
->log_sectors_per_block
);
2149 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
2150 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
2151 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2152 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
2153 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
2154 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
2155 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
2156 sbi
->total_node_count
=
2157 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
2158 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
2159 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
2160 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
2161 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
2162 sbi
->cur_victim_sec
= NULL_SECNO
;
2163 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
2165 sbi
->dir_level
= DEF_DIR_LEVEL
;
2166 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
2167 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
2168 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
2170 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
2171 atomic_set(&sbi
->nr_pages
[i
], 0);
2173 atomic_set(&sbi
->wb_sync_req
, 0);
2175 INIT_LIST_HEAD(&sbi
->s_list
);
2176 mutex_init(&sbi
->umount_mutex
);
2177 for (i
= 0; i
< NR_PAGE_TYPE
- 1; i
++)
2178 for (j
= HOT
; j
< NR_TEMP_TYPE
; j
++)
2179 mutex_init(&sbi
->wio_mutex
[i
][j
]);
2180 spin_lock_init(&sbi
->cp_lock
);
2182 sbi
->dirty_device
= 0;
2183 spin_lock_init(&sbi
->dev_lock
);
2186 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
2190 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
2194 return percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
2198 #ifdef CONFIG_BLK_DEV_ZONED
2199 static int init_blkz_info(struct f2fs_sb_info
*sbi
, int devi
)
2201 struct block_device
*bdev
= FDEV(devi
).bdev
;
2202 sector_t nr_sectors
= bdev
->bd_part
->nr_sects
;
2203 sector_t sector
= 0;
2204 struct blk_zone
*zones
;
2205 unsigned int i
, nr_zones
;
2209 if (!f2fs_sb_mounted_blkzoned(sbi
->sb
))
2212 if (sbi
->blocks_per_blkz
&& sbi
->blocks_per_blkz
!=
2213 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
)))
2215 sbi
->blocks_per_blkz
= SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
));
2216 if (sbi
->log_blocks_per_blkz
&& sbi
->log_blocks_per_blkz
!=
2217 __ilog2_u32(sbi
->blocks_per_blkz
))
2219 sbi
->log_blocks_per_blkz
= __ilog2_u32(sbi
->blocks_per_blkz
);
2220 FDEV(devi
).nr_blkz
= SECTOR_TO_BLOCK(nr_sectors
) >>
2221 sbi
->log_blocks_per_blkz
;
2222 if (nr_sectors
& (bdev_zone_sectors(bdev
) - 1))
2223 FDEV(devi
).nr_blkz
++;
2225 FDEV(devi
).blkz_type
= f2fs_kmalloc(sbi
, FDEV(devi
).nr_blkz
,
2227 if (!FDEV(devi
).blkz_type
)
2230 #define F2FS_REPORT_NR_ZONES 4096
2232 zones
= f2fs_kzalloc(sbi
, sizeof(struct blk_zone
) *
2233 F2FS_REPORT_NR_ZONES
, GFP_KERNEL
);
2237 /* Get block zones type */
2238 while (zones
&& sector
< nr_sectors
) {
2240 nr_zones
= F2FS_REPORT_NR_ZONES
;
2241 err
= blkdev_report_zones(bdev
, sector
,
2251 for (i
= 0; i
< nr_zones
; i
++) {
2252 FDEV(devi
).blkz_type
[n
] = zones
[i
].type
;
2253 sector
+= zones
[i
].len
;
2265 * Read f2fs raw super block.
2266 * Because we have two copies of super block, so read both of them
2267 * to get the first valid one. If any one of them is broken, we pass
2268 * them recovery flag back to the caller.
2270 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
2271 struct f2fs_super_block
**raw_super
,
2272 int *valid_super_block
, int *recovery
)
2274 struct super_block
*sb
= sbi
->sb
;
2276 struct buffer_head
*bh
;
2277 struct f2fs_super_block
*super
;
2280 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
2284 for (block
= 0; block
< 2; block
++) {
2285 bh
= sb_bread(sb
, block
);
2287 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
2293 /* sanity checking of raw super */
2294 if (sanity_check_raw_super(sbi
, bh
)) {
2295 f2fs_msg(sb
, KERN_ERR
,
2296 "Can't find valid F2FS filesystem in %dth superblock",
2304 memcpy(super
, bh
->b_data
+ F2FS_SUPER_OFFSET
,
2306 *valid_super_block
= block
;
2312 /* Fail to read any one of the superblocks*/
2316 /* No valid superblock */
2325 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
2327 struct buffer_head
*bh
;
2330 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
2331 bdev_read_only(sbi
->sb
->s_bdev
)) {
2332 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
2336 /* write back-up superblock first */
2337 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
? 0: 1);
2340 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
2343 /* if we are in recovery path, skip writing valid superblock */
2347 /* write current valid superblock */
2348 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
);
2351 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
2356 static int f2fs_scan_devices(struct f2fs_sb_info
*sbi
)
2358 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
2359 unsigned int max_devices
= MAX_DEVICES
;
2362 /* Initialize single device information */
2363 if (!RDEV(0).path
[0]) {
2364 if (!bdev_is_zoned(sbi
->sb
->s_bdev
))
2370 * Initialize multiple devices information, or single
2371 * zoned block device information.
2373 sbi
->devs
= f2fs_kzalloc(sbi
, sizeof(struct f2fs_dev_info
) *
2374 max_devices
, GFP_KERNEL
);
2378 for (i
= 0; i
< max_devices
; i
++) {
2380 if (i
> 0 && !RDEV(i
).path
[0])
2383 if (max_devices
== 1) {
2384 /* Single zoned block device mount */
2386 blkdev_get_by_dev(sbi
->sb
->s_bdev
->bd_dev
,
2387 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
2389 /* Multi-device mount */
2390 memcpy(FDEV(i
).path
, RDEV(i
).path
, MAX_PATH_LEN
);
2391 FDEV(i
).total_segments
=
2392 le32_to_cpu(RDEV(i
).total_segments
);
2394 FDEV(i
).start_blk
= 0;
2395 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
2396 (FDEV(i
).total_segments
<<
2397 sbi
->log_blocks_per_seg
) - 1 +
2398 le32_to_cpu(raw_super
->segment0_blkaddr
);
2400 FDEV(i
).start_blk
= FDEV(i
- 1).end_blk
+ 1;
2401 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
2402 (FDEV(i
).total_segments
<<
2403 sbi
->log_blocks_per_seg
) - 1;
2405 FDEV(i
).bdev
= blkdev_get_by_path(FDEV(i
).path
,
2406 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
2408 if (IS_ERR(FDEV(i
).bdev
))
2409 return PTR_ERR(FDEV(i
).bdev
);
2411 /* to release errored devices */
2412 sbi
->s_ndevs
= i
+ 1;
2414 #ifdef CONFIG_BLK_DEV_ZONED
2415 if (bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HM
&&
2416 !f2fs_sb_mounted_blkzoned(sbi
->sb
)) {
2417 f2fs_msg(sbi
->sb
, KERN_ERR
,
2418 "Zoned block device feature not enabled\n");
2421 if (bdev_zoned_model(FDEV(i
).bdev
) != BLK_ZONED_NONE
) {
2422 if (init_blkz_info(sbi
, i
)) {
2423 f2fs_msg(sbi
->sb
, KERN_ERR
,
2424 "Failed to initialize F2FS blkzone information");
2427 if (max_devices
== 1)
2429 f2fs_msg(sbi
->sb
, KERN_INFO
,
2430 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
2432 FDEV(i
).total_segments
,
2433 FDEV(i
).start_blk
, FDEV(i
).end_blk
,
2434 bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HA
?
2435 "Host-aware" : "Host-managed");
2439 f2fs_msg(sbi
->sb
, KERN_INFO
,
2440 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
2442 FDEV(i
).total_segments
,
2443 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
2445 f2fs_msg(sbi
->sb
, KERN_INFO
,
2446 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi
));
2450 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
2452 struct f2fs_sb_info
*sbi
;
2453 struct f2fs_super_block
*raw_super
;
2456 bool retry
= true, need_fsck
= false;
2457 char *options
= NULL
;
2458 int recovery
, i
, valid_super_block
;
2459 struct curseg_info
*seg_i
;
2464 valid_super_block
= -1;
2467 /* allocate memory for f2fs-specific super block info */
2468 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
2474 /* Load the checksum driver */
2475 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
2476 if (IS_ERR(sbi
->s_chksum_driver
)) {
2477 f2fs_msg(sb
, KERN_ERR
, "Cannot load crc32 driver.");
2478 err
= PTR_ERR(sbi
->s_chksum_driver
);
2479 sbi
->s_chksum_driver
= NULL
;
2483 /* set a block size */
2484 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
2485 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
2489 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
2494 sb
->s_fs_info
= sbi
;
2495 sbi
->raw_super
= raw_super
;
2497 sbi
->s_resuid
= make_kuid(&init_user_ns
, F2FS_DEF_RESUID
);
2498 sbi
->s_resgid
= make_kgid(&init_user_ns
, F2FS_DEF_RESGID
);
2500 /* precompute checksum seed for metadata */
2501 if (f2fs_sb_has_inode_chksum(sb
))
2502 sbi
->s_chksum_seed
= f2fs_chksum(sbi
, ~0, raw_super
->uuid
,
2503 sizeof(raw_super
->uuid
));
2506 * The BLKZONED feature indicates that the drive was formatted with
2507 * zone alignment optimization. This is optional for host-aware
2508 * devices, but mandatory for host-managed zoned block devices.
2510 #ifndef CONFIG_BLK_DEV_ZONED
2511 if (f2fs_sb_mounted_blkzoned(sb
)) {
2512 f2fs_msg(sb
, KERN_ERR
,
2513 "Zoned block device support is not enabled\n");
2518 default_options(sbi
);
2519 /* parse mount options */
2520 options
= kstrdup((const char *)data
, GFP_KERNEL
);
2521 if (data
&& !options
) {
2526 err
= parse_options(sb
, options
);
2530 sbi
->max_file_blocks
= max_file_blocks();
2531 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
2532 le32_to_cpu(raw_super
->log_blocksize
);
2533 sb
->s_max_links
= F2FS_LINK_MAX
;
2534 get_random_bytes(&sbi
->s_next_generation
, sizeof(u32
));
2537 sb
->dq_op
= &f2fs_quota_operations
;
2538 if (f2fs_sb_has_quota_ino(sb
))
2539 sb
->s_qcop
= &dquot_quotactl_sysfile_ops
;
2541 sb
->s_qcop
= &f2fs_quotactl_ops
;
2542 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
2544 if (f2fs_sb_has_quota_ino(sbi
->sb
)) {
2545 for (i
= 0; i
< MAXQUOTAS
; i
++) {
2546 if (f2fs_qf_ino(sbi
->sb
, i
))
2547 sbi
->nquota_files
++;
2552 sb
->s_op
= &f2fs_sops
;
2553 #ifdef CONFIG_F2FS_FS_ENCRYPTION
2554 sb
->s_cop
= &f2fs_cryptops
;
2556 sb
->s_xattr
= f2fs_xattr_handlers
;
2557 sb
->s_export_op
= &f2fs_export_ops
;
2558 sb
->s_magic
= F2FS_SUPER_MAGIC
;
2559 sb
->s_time_gran
= 1;
2560 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
2561 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
2562 memcpy(&sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
2563 sb
->s_iflags
|= SB_I_CGROUPWB
;
2565 /* init f2fs-specific super block info */
2566 sbi
->valid_super_block
= valid_super_block
;
2567 mutex_init(&sbi
->gc_mutex
);
2568 mutex_init(&sbi
->cp_mutex
);
2569 init_rwsem(&sbi
->node_write
);
2570 init_rwsem(&sbi
->node_change
);
2572 /* disallow all the data/node/meta page writes */
2573 set_sbi_flag(sbi
, SBI_POR_DOING
);
2574 spin_lock_init(&sbi
->stat_lock
);
2576 /* init iostat info */
2577 spin_lock_init(&sbi
->iostat_lock
);
2578 sbi
->iostat_enable
= false;
2580 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
2581 int n
= (i
== META
) ? 1: NR_TEMP_TYPE
;
2584 sbi
->write_io
[i
] = f2fs_kmalloc(sbi
,
2585 n
* sizeof(struct f2fs_bio_info
),
2587 if (!sbi
->write_io
[i
]) {
2592 for (j
= HOT
; j
< n
; j
++) {
2593 init_rwsem(&sbi
->write_io
[i
][j
].io_rwsem
);
2594 sbi
->write_io
[i
][j
].sbi
= sbi
;
2595 sbi
->write_io
[i
][j
].bio
= NULL
;
2596 spin_lock_init(&sbi
->write_io
[i
][j
].io_lock
);
2597 INIT_LIST_HEAD(&sbi
->write_io
[i
][j
].io_list
);
2601 init_rwsem(&sbi
->cp_rwsem
);
2602 init_waitqueue_head(&sbi
->cp_wait
);
2605 err
= init_percpu_info(sbi
);
2609 if (F2FS_IO_SIZE(sbi
) > 1) {
2610 sbi
->write_io_dummy
=
2611 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi
) - 1), 0);
2612 if (!sbi
->write_io_dummy
) {
2618 /* get an inode for meta space */
2619 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
2620 if (IS_ERR(sbi
->meta_inode
)) {
2621 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
2622 err
= PTR_ERR(sbi
->meta_inode
);
2626 err
= get_valid_checkpoint(sbi
);
2628 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
2629 goto free_meta_inode
;
2632 /* Initialize device list */
2633 err
= f2fs_scan_devices(sbi
);
2635 f2fs_msg(sb
, KERN_ERR
, "Failed to find devices");
2639 sbi
->total_valid_node_count
=
2640 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
2641 percpu_counter_set(&sbi
->total_valid_inode_count
,
2642 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
2643 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
2644 sbi
->total_valid_block_count
=
2645 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
2646 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
2647 sbi
->reserved_blocks
= 0;
2648 sbi
->current_reserved_blocks
= 0;
2649 limit_reserve_root(sbi
);
2651 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
2652 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
2653 spin_lock_init(&sbi
->inode_lock
[i
]);
2656 init_extent_cache_info(sbi
);
2658 init_ino_entry_info(sbi
);
2660 /* setup f2fs internal modules */
2661 err
= build_segment_manager(sbi
);
2663 f2fs_msg(sb
, KERN_ERR
,
2664 "Failed to initialize F2FS segment manager");
2667 err
= build_node_manager(sbi
);
2669 f2fs_msg(sb
, KERN_ERR
,
2670 "Failed to initialize F2FS node manager");
2674 /* For write statistics */
2675 if (sb
->s_bdev
->bd_part
)
2676 sbi
->sectors_written_start
=
2677 (u64
)part_stat_read(sb
->s_bdev
->bd_part
, sectors
[1]);
2679 /* Read accumulated write IO statistics if exists */
2680 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
2681 if (__exist_node_summaries(sbi
))
2682 sbi
->kbytes_written
=
2683 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
2685 build_gc_manager(sbi
);
2687 /* get an inode for node space */
2688 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
2689 if (IS_ERR(sbi
->node_inode
)) {
2690 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
2691 err
= PTR_ERR(sbi
->node_inode
);
2695 err
= f2fs_build_stats(sbi
);
2697 goto free_node_inode
;
2699 /* read root inode and dentry */
2700 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
2702 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
2703 err
= PTR_ERR(root
);
2706 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
2709 goto free_node_inode
;
2712 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
2715 goto free_root_inode
;
2718 err
= f2fs_register_sysfs(sbi
);
2720 goto free_root_inode
;
2724 * Turn on quotas which were not enabled for read-only mounts if
2725 * filesystem has quota feature, so that they are updated correctly.
2727 if (f2fs_sb_has_quota_ino(sb
) && !sb_rdonly(sb
)) {
2728 err
= f2fs_enable_quotas(sb
);
2730 f2fs_msg(sb
, KERN_ERR
,
2731 "Cannot turn on quotas: error %d", err
);
2736 /* if there are nt orphan nodes free them */
2737 err
= recover_orphan_inodes(sbi
);
2741 /* recover fsynced data */
2742 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
2744 * mount should be failed, when device has readonly mode, and
2745 * previous checkpoint was not done by clean system shutdown.
2747 if (bdev_read_only(sb
->s_bdev
) &&
2748 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
2754 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2759 err
= recover_fsync_data(sbi
, false);
2762 f2fs_msg(sb
, KERN_ERR
,
2763 "Cannot recover all fsync data errno=%d", err
);
2767 err
= recover_fsync_data(sbi
, true);
2769 if (!f2fs_readonly(sb
) && err
> 0) {
2771 f2fs_msg(sb
, KERN_ERR
,
2772 "Need to recover fsync data");
2777 /* recover_fsync_data() cleared this already */
2778 clear_sbi_flag(sbi
, SBI_POR_DOING
);
2781 * If filesystem is not mounted as read-only then
2782 * do start the gc_thread.
2784 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
2785 /* After POR, we can run background GC thread.*/
2786 err
= start_gc_thread(sbi
);
2792 /* recover broken superblock */
2794 err
= f2fs_commit_super(sbi
, true);
2795 f2fs_msg(sb
, KERN_INFO
,
2796 "Try to recover %dth superblock, ret: %d",
2797 sbi
->valid_super_block
? 1 : 2, err
);
2800 f2fs_join_shrinker(sbi
);
2802 f2fs_msg(sbi
->sb
, KERN_NOTICE
, "Mounted with checkpoint version = %llx",
2803 cur_cp_version(F2FS_CKPT(sbi
)));
2804 f2fs_update_time(sbi
, CP_TIME
);
2805 f2fs_update_time(sbi
, REQ_TIME
);
2810 if (f2fs_sb_has_quota_ino(sb
) && !sb_rdonly(sb
))
2811 f2fs_quota_off_umount(sbi
->sb
);
2813 f2fs_sync_inode_meta(sbi
);
2815 * Some dirty meta pages can be produced by recover_orphan_inodes()
2816 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2817 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2818 * falls into an infinite loop in sync_meta_pages().
2820 truncate_inode_pages_final(META_MAPPING(sbi
));
2824 f2fs_unregister_sysfs(sbi
);
2829 f2fs_destroy_stats(sbi
);
2831 release_ino_entry(sbi
, true);
2832 truncate_inode_pages_final(NODE_MAPPING(sbi
));
2833 iput(sbi
->node_inode
);
2835 destroy_node_manager(sbi
);
2837 destroy_segment_manager(sbi
);
2839 destroy_device_list(sbi
);
2842 make_bad_inode(sbi
->meta_inode
);
2843 iput(sbi
->meta_inode
);
2845 mempool_destroy(sbi
->write_io_dummy
);
2847 destroy_percpu_info(sbi
);
2849 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
2850 kfree(sbi
->write_io
[i
]);
2853 for (i
= 0; i
< MAXQUOTAS
; i
++)
2854 kfree(sbi
->s_qf_names
[i
]);
2860 if (sbi
->s_chksum_driver
)
2861 crypto_free_shash(sbi
->s_chksum_driver
);
2864 /* give only one another chance */
2867 shrink_dcache_sb(sb
);
2873 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
2874 const char *dev_name
, void *data
)
2876 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
2879 static void kill_f2fs_super(struct super_block
*sb
)
2882 set_sbi_flag(F2FS_SB(sb
), SBI_IS_CLOSE
);
2883 stop_gc_thread(F2FS_SB(sb
));
2884 stop_discard_thread(F2FS_SB(sb
));
2886 kill_block_super(sb
);
2889 static struct file_system_type f2fs_fs_type
= {
2890 .owner
= THIS_MODULE
,
2892 .mount
= f2fs_mount
,
2893 .kill_sb
= kill_f2fs_super
,
2894 .fs_flags
= FS_REQUIRES_DEV
,
2896 MODULE_ALIAS_FS("f2fs");
2898 static int __init
init_inodecache(void)
2900 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
2901 sizeof(struct f2fs_inode_info
), 0,
2902 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
2903 if (!f2fs_inode_cachep
)
2908 static void destroy_inodecache(void)
2911 * Make sure all delayed rcu free inodes are flushed before we
2915 kmem_cache_destroy(f2fs_inode_cachep
);
2918 static int __init
init_f2fs_fs(void)
2922 f2fs_build_trace_ios();
2924 err
= init_inodecache();
2927 err
= create_node_manager_caches();
2929 goto free_inodecache
;
2930 err
= create_segment_manager_caches();
2932 goto free_node_manager_caches
;
2933 err
= create_checkpoint_caches();
2935 goto free_segment_manager_caches
;
2936 err
= create_extent_cache();
2938 goto free_checkpoint_caches
;
2939 err
= f2fs_init_sysfs();
2941 goto free_extent_cache
;
2942 err
= register_shrinker(&f2fs_shrinker_info
);
2945 err
= register_filesystem(&f2fs_fs_type
);
2948 err
= f2fs_create_root_stats();
2950 goto free_filesystem
;
2954 unregister_filesystem(&f2fs_fs_type
);
2956 unregister_shrinker(&f2fs_shrinker_info
);
2960 destroy_extent_cache();
2961 free_checkpoint_caches
:
2962 destroy_checkpoint_caches();
2963 free_segment_manager_caches
:
2964 destroy_segment_manager_caches();
2965 free_node_manager_caches
:
2966 destroy_node_manager_caches();
2968 destroy_inodecache();
2973 static void __exit
exit_f2fs_fs(void)
2975 f2fs_destroy_root_stats();
2976 unregister_filesystem(&f2fs_fs_type
);
2977 unregister_shrinker(&f2fs_shrinker_info
);
2979 destroy_extent_cache();
2980 destroy_checkpoint_caches();
2981 destroy_segment_manager_caches();
2982 destroy_node_manager_caches();
2983 destroy_inodecache();
2984 f2fs_destroy_trace_ios();
2987 module_init(init_f2fs_fs
)
2988 module_exit(exit_f2fs_fs
)
2990 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
2991 MODULE_DESCRIPTION("Flash Friendly File System");
2992 MODULE_LICENSE("GPL");