1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/module.h>
9 #include <linux/init.h>
11 #include <linux/statfs.h>
12 #include <linux/buffer_head.h>
13 #include <linux/backing-dev.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
27 #include <linux/part_stat.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/f2fs.h>
39 static struct kmem_cache
*f2fs_inode_cachep
;
41 #ifdef CONFIG_F2FS_FAULT_INJECTION
43 const char *f2fs_fault_name
[FAULT_MAX
] = {
44 [FAULT_KMALLOC
] = "kmalloc",
45 [FAULT_KVMALLOC
] = "kvmalloc",
46 [FAULT_PAGE_ALLOC
] = "page alloc",
47 [FAULT_PAGE_GET
] = "page get",
48 [FAULT_ALLOC_BIO
] = "alloc bio",
49 [FAULT_ALLOC_NID
] = "alloc nid",
50 [FAULT_ORPHAN
] = "orphan",
51 [FAULT_BLOCK
] = "no more block",
52 [FAULT_DIR_DEPTH
] = "too big dir depth",
53 [FAULT_EVICT_INODE
] = "evict_inode fail",
54 [FAULT_TRUNCATE
] = "truncate fail",
55 [FAULT_READ_IO
] = "read IO error",
56 [FAULT_CHECKPOINT
] = "checkpoint error",
57 [FAULT_DISCARD
] = "discard error",
58 [FAULT_WRITE_IO
] = "write IO error",
61 void f2fs_build_fault_attr(struct f2fs_sb_info
*sbi
, unsigned int rate
,
64 struct f2fs_fault_info
*ffi
= &F2FS_OPTION(sbi
).fault_info
;
67 atomic_set(&ffi
->inject_ops
, 0);
68 ffi
->inject_rate
= rate
;
72 ffi
->inject_type
= type
;
75 memset(ffi
, 0, sizeof(struct f2fs_fault_info
));
79 /* f2fs-wide shrinker description */
80 static struct shrinker f2fs_shrinker_info
= {
81 .scan_objects
= f2fs_shrink_scan
,
82 .count_objects
= f2fs_shrink_count
,
83 .seeks
= DEFAULT_SEEKS
,
88 Opt_disable_roll_forward
,
99 Opt_disable_ext_identify
,
102 Opt_inline_xattr_size
,
140 Opt_test_dummy_encryption
,
142 Opt_checkpoint_disable
,
143 Opt_checkpoint_disable_cap
,
144 Opt_checkpoint_disable_cap_perc
,
145 Opt_checkpoint_enable
,
146 Opt_compress_algorithm
,
147 Opt_compress_log_size
,
148 Opt_compress_extension
,
153 static match_table_t f2fs_tokens
= {
154 {Opt_gc_background
, "background_gc=%s"},
155 {Opt_disable_roll_forward
, "disable_roll_forward"},
156 {Opt_norecovery
, "norecovery"},
157 {Opt_discard
, "discard"},
158 {Opt_nodiscard
, "nodiscard"},
159 {Opt_noheap
, "no_heap"},
161 {Opt_user_xattr
, "user_xattr"},
162 {Opt_nouser_xattr
, "nouser_xattr"},
164 {Opt_noacl
, "noacl"},
165 {Opt_active_logs
, "active_logs=%u"},
166 {Opt_disable_ext_identify
, "disable_ext_identify"},
167 {Opt_inline_xattr
, "inline_xattr"},
168 {Opt_noinline_xattr
, "noinline_xattr"},
169 {Opt_inline_xattr_size
, "inline_xattr_size=%u"},
170 {Opt_inline_data
, "inline_data"},
171 {Opt_inline_dentry
, "inline_dentry"},
172 {Opt_noinline_dentry
, "noinline_dentry"},
173 {Opt_flush_merge
, "flush_merge"},
174 {Opt_noflush_merge
, "noflush_merge"},
175 {Opt_nobarrier
, "nobarrier"},
176 {Opt_fastboot
, "fastboot"},
177 {Opt_extent_cache
, "extent_cache"},
178 {Opt_noextent_cache
, "noextent_cache"},
179 {Opt_noinline_data
, "noinline_data"},
180 {Opt_data_flush
, "data_flush"},
181 {Opt_reserve_root
, "reserve_root=%u"},
182 {Opt_resgid
, "resgid=%u"},
183 {Opt_resuid
, "resuid=%u"},
184 {Opt_mode
, "mode=%s"},
185 {Opt_io_size_bits
, "io_bits=%u"},
186 {Opt_fault_injection
, "fault_injection=%u"},
187 {Opt_fault_type
, "fault_type=%u"},
188 {Opt_lazytime
, "lazytime"},
189 {Opt_nolazytime
, "nolazytime"},
190 {Opt_quota
, "quota"},
191 {Opt_noquota
, "noquota"},
192 {Opt_usrquota
, "usrquota"},
193 {Opt_grpquota
, "grpquota"},
194 {Opt_prjquota
, "prjquota"},
195 {Opt_usrjquota
, "usrjquota=%s"},
196 {Opt_grpjquota
, "grpjquota=%s"},
197 {Opt_prjjquota
, "prjjquota=%s"},
198 {Opt_offusrjquota
, "usrjquota="},
199 {Opt_offgrpjquota
, "grpjquota="},
200 {Opt_offprjjquota
, "prjjquota="},
201 {Opt_jqfmt_vfsold
, "jqfmt=vfsold"},
202 {Opt_jqfmt_vfsv0
, "jqfmt=vfsv0"},
203 {Opt_jqfmt_vfsv1
, "jqfmt=vfsv1"},
204 {Opt_whint
, "whint_mode=%s"},
205 {Opt_alloc
, "alloc_mode=%s"},
206 {Opt_fsync
, "fsync_mode=%s"},
207 {Opt_test_dummy_encryption
, "test_dummy_encryption=%s"},
208 {Opt_test_dummy_encryption
, "test_dummy_encryption"},
209 {Opt_inlinecrypt
, "inlinecrypt"},
210 {Opt_checkpoint_disable
, "checkpoint=disable"},
211 {Opt_checkpoint_disable_cap
, "checkpoint=disable:%u"},
212 {Opt_checkpoint_disable_cap_perc
, "checkpoint=disable:%u%%"},
213 {Opt_checkpoint_enable
, "checkpoint=enable"},
214 {Opt_compress_algorithm
, "compress_algorithm=%s"},
215 {Opt_compress_log_size
, "compress_log_size=%u"},
216 {Opt_compress_extension
, "compress_extension=%s"},
221 void f2fs_printk(struct f2fs_sb_info
*sbi
, const char *fmt
, ...)
223 struct va_format vaf
;
229 level
= printk_get_level(fmt
);
230 vaf
.fmt
= printk_skip_level(fmt
);
232 printk("%c%cF2FS-fs (%s): %pV\n",
233 KERN_SOH_ASCII
, level
, sbi
->sb
->s_id
, &vaf
);
238 #ifdef CONFIG_UNICODE
239 static const struct f2fs_sb_encodings
{
243 } f2fs_sb_encoding_map
[] = {
244 {F2FS_ENC_UTF8_12_1
, "utf8", "12.1.0"},
247 static int f2fs_sb_read_encoding(const struct f2fs_super_block
*sb
,
248 const struct f2fs_sb_encodings
**encoding
,
251 __u16 magic
= le16_to_cpu(sb
->s_encoding
);
254 for (i
= 0; i
< ARRAY_SIZE(f2fs_sb_encoding_map
); i
++)
255 if (magic
== f2fs_sb_encoding_map
[i
].magic
)
258 if (i
>= ARRAY_SIZE(f2fs_sb_encoding_map
))
261 *encoding
= &f2fs_sb_encoding_map
[i
];
262 *flags
= le16_to_cpu(sb
->s_encoding_flags
);
268 static inline void limit_reserve_root(struct f2fs_sb_info
*sbi
)
270 block_t limit
= min((sbi
->user_block_count
<< 1) / 1000,
271 sbi
->user_block_count
- sbi
->reserved_blocks
);
274 if (test_opt(sbi
, RESERVE_ROOT
) &&
275 F2FS_OPTION(sbi
).root_reserved_blocks
> limit
) {
276 F2FS_OPTION(sbi
).root_reserved_blocks
= limit
;
277 f2fs_info(sbi
, "Reduce reserved blocks for root = %u",
278 F2FS_OPTION(sbi
).root_reserved_blocks
);
280 if (!test_opt(sbi
, RESERVE_ROOT
) &&
281 (!uid_eq(F2FS_OPTION(sbi
).s_resuid
,
282 make_kuid(&init_user_ns
, F2FS_DEF_RESUID
)) ||
283 !gid_eq(F2FS_OPTION(sbi
).s_resgid
,
284 make_kgid(&init_user_ns
, F2FS_DEF_RESGID
))))
285 f2fs_info(sbi
, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
286 from_kuid_munged(&init_user_ns
,
287 F2FS_OPTION(sbi
).s_resuid
),
288 from_kgid_munged(&init_user_ns
,
289 F2FS_OPTION(sbi
).s_resgid
));
292 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info
*sbi
)
294 if (!F2FS_OPTION(sbi
).unusable_cap_perc
)
297 if (F2FS_OPTION(sbi
).unusable_cap_perc
== 100)
298 F2FS_OPTION(sbi
).unusable_cap
= sbi
->user_block_count
;
300 F2FS_OPTION(sbi
).unusable_cap
= (sbi
->user_block_count
/ 100) *
301 F2FS_OPTION(sbi
).unusable_cap_perc
;
303 f2fs_info(sbi
, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
304 F2FS_OPTION(sbi
).unusable_cap
,
305 F2FS_OPTION(sbi
).unusable_cap_perc
);
308 static void init_once(void *foo
)
310 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
312 inode_init_once(&fi
->vfs_inode
);
316 static const char * const quotatypes
[] = INITQFNAMES
;
317 #define QTYPE2NAME(t) (quotatypes[t])
318 static int f2fs_set_qf_name(struct super_block
*sb
, int qtype
,
321 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
325 if (sb_any_quota_loaded(sb
) && !F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
326 f2fs_err(sbi
, "Cannot change journaled quota options when quota turned on");
329 if (f2fs_sb_has_quota_ino(sbi
)) {
330 f2fs_info(sbi
, "QUOTA feature is enabled, so ignore qf_name");
334 qname
= match_strdup(args
);
336 f2fs_err(sbi
, "Not enough memory for storing quotafile name");
339 if (F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
340 if (strcmp(F2FS_OPTION(sbi
).s_qf_names
[qtype
], qname
) == 0)
343 f2fs_err(sbi
, "%s quota file already specified",
347 if (strchr(qname
, '/')) {
348 f2fs_err(sbi
, "quotafile must be on filesystem root");
351 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = qname
;
359 static int f2fs_clear_qf_name(struct super_block
*sb
, int qtype
)
361 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
363 if (sb_any_quota_loaded(sb
) && F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
364 f2fs_err(sbi
, "Cannot change journaled quota options when quota turned on");
367 kfree(F2FS_OPTION(sbi
).s_qf_names
[qtype
]);
368 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = NULL
;
372 static int f2fs_check_quota_options(struct f2fs_sb_info
*sbi
)
375 * We do the test below only for project quotas. 'usrquota' and
376 * 'grpquota' mount options are allowed even without quota feature
377 * to support legacy quotas in quota files.
379 if (test_opt(sbi
, PRJQUOTA
) && !f2fs_sb_has_project_quota(sbi
)) {
380 f2fs_err(sbi
, "Project quota feature not enabled. Cannot enable project quota enforcement.");
383 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
] ||
384 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
] ||
385 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]) {
386 if (test_opt(sbi
, USRQUOTA
) &&
387 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
388 clear_opt(sbi
, USRQUOTA
);
390 if (test_opt(sbi
, GRPQUOTA
) &&
391 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
392 clear_opt(sbi
, GRPQUOTA
);
394 if (test_opt(sbi
, PRJQUOTA
) &&
395 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
396 clear_opt(sbi
, PRJQUOTA
);
398 if (test_opt(sbi
, GRPQUOTA
) || test_opt(sbi
, USRQUOTA
) ||
399 test_opt(sbi
, PRJQUOTA
)) {
400 f2fs_err(sbi
, "old and new quota format mixing");
404 if (!F2FS_OPTION(sbi
).s_jquota_fmt
) {
405 f2fs_err(sbi
, "journaled quota format not specified");
410 if (f2fs_sb_has_quota_ino(sbi
) && F2FS_OPTION(sbi
).s_jquota_fmt
) {
411 f2fs_info(sbi
, "QUOTA feature is enabled, so ignore jquota_fmt");
412 F2FS_OPTION(sbi
).s_jquota_fmt
= 0;
418 static int f2fs_set_test_dummy_encryption(struct super_block
*sb
,
420 const substring_t
*arg
,
423 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
424 #ifdef CONFIG_FS_ENCRYPTION
427 if (!f2fs_sb_has_encrypt(sbi
)) {
428 f2fs_err(sbi
, "Encrypt feature is off");
433 * This mount option is just for testing, and it's not worthwhile to
434 * implement the extra complexity (e.g. RCU protection) that would be
435 * needed to allow it to be set or changed during remount. We do allow
436 * it to be specified during remount, but only if there is no change.
438 if (is_remount
&& !F2FS_OPTION(sbi
).dummy_enc_policy
.policy
) {
439 f2fs_warn(sbi
, "Can't set test_dummy_encryption on remount");
442 err
= fscrypt_set_test_dummy_encryption(
443 sb
, arg
->from
, &F2FS_OPTION(sbi
).dummy_enc_policy
);
447 "Can't change test_dummy_encryption on remount");
448 else if (err
== -EINVAL
)
449 f2fs_warn(sbi
, "Value of option \"%s\" is unrecognized",
452 f2fs_warn(sbi
, "Error processing option \"%s\" [%d]",
456 f2fs_warn(sbi
, "Test dummy encryption mode enabled");
458 f2fs_warn(sbi
, "Test dummy encryption mount option ignored");
463 static int parse_options(struct super_block
*sb
, char *options
, bool is_remount
)
465 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
466 substring_t args
[MAX_OPT_ARGS
];
467 #ifdef CONFIG_F2FS_FS_COMPRESSION
468 unsigned char (*ext
)[F2FS_EXTENSION_LEN
];
480 while ((p
= strsep(&options
, ",")) != NULL
) {
485 * Initialize args struct so we know whether arg was
486 * found; some options take optional arguments.
488 args
[0].to
= args
[0].from
= NULL
;
489 token
= match_token(p
, f2fs_tokens
, args
);
492 case Opt_gc_background
:
493 name
= match_strdup(&args
[0]);
497 if (!strcmp(name
, "on")) {
498 F2FS_OPTION(sbi
).bggc_mode
= BGGC_MODE_ON
;
499 } else if (!strcmp(name
, "off")) {
500 F2FS_OPTION(sbi
).bggc_mode
= BGGC_MODE_OFF
;
501 } else if (!strcmp(name
, "sync")) {
502 F2FS_OPTION(sbi
).bggc_mode
= BGGC_MODE_SYNC
;
509 case Opt_disable_roll_forward
:
510 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
513 /* this option mounts f2fs with ro */
514 set_opt(sbi
, NORECOVERY
);
515 if (!f2fs_readonly(sb
))
519 set_opt(sbi
, DISCARD
);
522 if (f2fs_sb_has_blkzoned(sbi
)) {
523 f2fs_warn(sbi
, "discard is required for zoned block devices");
526 clear_opt(sbi
, DISCARD
);
529 set_opt(sbi
, NOHEAP
);
532 clear_opt(sbi
, NOHEAP
);
534 #ifdef CONFIG_F2FS_FS_XATTR
536 set_opt(sbi
, XATTR_USER
);
538 case Opt_nouser_xattr
:
539 clear_opt(sbi
, XATTR_USER
);
541 case Opt_inline_xattr
:
542 set_opt(sbi
, INLINE_XATTR
);
544 case Opt_noinline_xattr
:
545 clear_opt(sbi
, INLINE_XATTR
);
547 case Opt_inline_xattr_size
:
548 if (args
->from
&& match_int(args
, &arg
))
550 set_opt(sbi
, INLINE_XATTR_SIZE
);
551 F2FS_OPTION(sbi
).inline_xattr_size
= arg
;
555 f2fs_info(sbi
, "user_xattr options not supported");
557 case Opt_nouser_xattr
:
558 f2fs_info(sbi
, "nouser_xattr options not supported");
560 case Opt_inline_xattr
:
561 f2fs_info(sbi
, "inline_xattr options not supported");
563 case Opt_noinline_xattr
:
564 f2fs_info(sbi
, "noinline_xattr options not supported");
567 #ifdef CONFIG_F2FS_FS_POSIX_ACL
569 set_opt(sbi
, POSIX_ACL
);
572 clear_opt(sbi
, POSIX_ACL
);
576 f2fs_info(sbi
, "acl options not supported");
579 f2fs_info(sbi
, "noacl options not supported");
582 case Opt_active_logs
:
583 if (args
->from
&& match_int(args
, &arg
))
585 if (arg
!= 2 && arg
!= 4 &&
586 arg
!= NR_CURSEG_PERSIST_TYPE
)
588 F2FS_OPTION(sbi
).active_logs
= arg
;
590 case Opt_disable_ext_identify
:
591 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
593 case Opt_inline_data
:
594 set_opt(sbi
, INLINE_DATA
);
596 case Opt_inline_dentry
:
597 set_opt(sbi
, INLINE_DENTRY
);
599 case Opt_noinline_dentry
:
600 clear_opt(sbi
, INLINE_DENTRY
);
602 case Opt_flush_merge
:
603 set_opt(sbi
, FLUSH_MERGE
);
605 case Opt_noflush_merge
:
606 clear_opt(sbi
, FLUSH_MERGE
);
609 set_opt(sbi
, NOBARRIER
);
612 set_opt(sbi
, FASTBOOT
);
614 case Opt_extent_cache
:
615 set_opt(sbi
, EXTENT_CACHE
);
617 case Opt_noextent_cache
:
618 clear_opt(sbi
, EXTENT_CACHE
);
620 case Opt_noinline_data
:
621 clear_opt(sbi
, INLINE_DATA
);
624 set_opt(sbi
, DATA_FLUSH
);
626 case Opt_reserve_root
:
627 if (args
->from
&& match_int(args
, &arg
))
629 if (test_opt(sbi
, RESERVE_ROOT
)) {
630 f2fs_info(sbi
, "Preserve previous reserve_root=%u",
631 F2FS_OPTION(sbi
).root_reserved_blocks
);
633 F2FS_OPTION(sbi
).root_reserved_blocks
= arg
;
634 set_opt(sbi
, RESERVE_ROOT
);
638 if (args
->from
&& match_int(args
, &arg
))
640 uid
= make_kuid(current_user_ns(), arg
);
641 if (!uid_valid(uid
)) {
642 f2fs_err(sbi
, "Invalid uid value %d", arg
);
645 F2FS_OPTION(sbi
).s_resuid
= uid
;
648 if (args
->from
&& match_int(args
, &arg
))
650 gid
= make_kgid(current_user_ns(), arg
);
651 if (!gid_valid(gid
)) {
652 f2fs_err(sbi
, "Invalid gid value %d", arg
);
655 F2FS_OPTION(sbi
).s_resgid
= gid
;
658 name
= match_strdup(&args
[0]);
662 if (!strcmp(name
, "adaptive")) {
663 if (f2fs_sb_has_blkzoned(sbi
)) {
664 f2fs_warn(sbi
, "adaptive mode is not allowed with zoned block device feature");
668 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_ADAPTIVE
;
669 } else if (!strcmp(name
, "lfs")) {
670 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_LFS
;
677 case Opt_io_size_bits
:
678 if (args
->from
&& match_int(args
, &arg
))
680 if (arg
<= 0 || arg
> __ilog2_u32(BIO_MAX_PAGES
)) {
681 f2fs_warn(sbi
, "Not support %d, larger than %d",
682 1 << arg
, BIO_MAX_PAGES
);
685 F2FS_OPTION(sbi
).write_io_size_bits
= arg
;
687 #ifdef CONFIG_F2FS_FAULT_INJECTION
688 case Opt_fault_injection
:
689 if (args
->from
&& match_int(args
, &arg
))
691 f2fs_build_fault_attr(sbi
, arg
, F2FS_ALL_FAULT_TYPE
);
692 set_opt(sbi
, FAULT_INJECTION
);
696 if (args
->from
&& match_int(args
, &arg
))
698 f2fs_build_fault_attr(sbi
, 0, arg
);
699 set_opt(sbi
, FAULT_INJECTION
);
702 case Opt_fault_injection
:
703 f2fs_info(sbi
, "fault_injection options not supported");
707 f2fs_info(sbi
, "fault_type options not supported");
711 sb
->s_flags
|= SB_LAZYTIME
;
714 sb
->s_flags
&= ~SB_LAZYTIME
;
719 set_opt(sbi
, USRQUOTA
);
722 set_opt(sbi
, GRPQUOTA
);
725 set_opt(sbi
, PRJQUOTA
);
728 ret
= f2fs_set_qf_name(sb
, USRQUOTA
, &args
[0]);
733 ret
= f2fs_set_qf_name(sb
, GRPQUOTA
, &args
[0]);
738 ret
= f2fs_set_qf_name(sb
, PRJQUOTA
, &args
[0]);
742 case Opt_offusrjquota
:
743 ret
= f2fs_clear_qf_name(sb
, USRQUOTA
);
747 case Opt_offgrpjquota
:
748 ret
= f2fs_clear_qf_name(sb
, GRPQUOTA
);
752 case Opt_offprjjquota
:
753 ret
= f2fs_clear_qf_name(sb
, PRJQUOTA
);
757 case Opt_jqfmt_vfsold
:
758 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_OLD
;
760 case Opt_jqfmt_vfsv0
:
761 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V0
;
763 case Opt_jqfmt_vfsv1
:
764 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V1
;
767 clear_opt(sbi
, QUOTA
);
768 clear_opt(sbi
, USRQUOTA
);
769 clear_opt(sbi
, GRPQUOTA
);
770 clear_opt(sbi
, PRJQUOTA
);
780 case Opt_offusrjquota
:
781 case Opt_offgrpjquota
:
782 case Opt_offprjjquota
:
783 case Opt_jqfmt_vfsold
:
784 case Opt_jqfmt_vfsv0
:
785 case Opt_jqfmt_vfsv1
:
787 f2fs_info(sbi
, "quota operations not supported");
791 name
= match_strdup(&args
[0]);
794 if (!strcmp(name
, "user-based")) {
795 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_USER
;
796 } else if (!strcmp(name
, "off")) {
797 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
798 } else if (!strcmp(name
, "fs-based")) {
799 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_FS
;
807 name
= match_strdup(&args
[0]);
811 if (!strcmp(name
, "default")) {
812 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
813 } else if (!strcmp(name
, "reuse")) {
814 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
822 name
= match_strdup(&args
[0]);
825 if (!strcmp(name
, "posix")) {
826 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
827 } else if (!strcmp(name
, "strict")) {
828 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_STRICT
;
829 } else if (!strcmp(name
, "nobarrier")) {
830 F2FS_OPTION(sbi
).fsync_mode
=
831 FSYNC_MODE_NOBARRIER
;
838 case Opt_test_dummy_encryption
:
839 ret
= f2fs_set_test_dummy_encryption(sb
, p
, &args
[0],
844 case Opt_inlinecrypt
:
845 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
846 sb
->s_flags
|= SB_INLINECRYPT
;
848 f2fs_info(sbi
, "inline encryption not supported");
851 case Opt_checkpoint_disable_cap_perc
:
852 if (args
->from
&& match_int(args
, &arg
))
854 if (arg
< 0 || arg
> 100)
856 F2FS_OPTION(sbi
).unusable_cap_perc
= arg
;
857 set_opt(sbi
, DISABLE_CHECKPOINT
);
859 case Opt_checkpoint_disable_cap
:
860 if (args
->from
&& match_int(args
, &arg
))
862 F2FS_OPTION(sbi
).unusable_cap
= arg
;
863 set_opt(sbi
, DISABLE_CHECKPOINT
);
865 case Opt_checkpoint_disable
:
866 set_opt(sbi
, DISABLE_CHECKPOINT
);
868 case Opt_checkpoint_enable
:
869 clear_opt(sbi
, DISABLE_CHECKPOINT
);
871 #ifdef CONFIG_F2FS_FS_COMPRESSION
872 case Opt_compress_algorithm
:
873 if (!f2fs_sb_has_compression(sbi
)) {
874 f2fs_info(sbi
, "Image doesn't support compression");
877 name
= match_strdup(&args
[0]);
880 if (!strcmp(name
, "lzo")) {
881 F2FS_OPTION(sbi
).compress_algorithm
=
883 } else if (!strcmp(name
, "lz4")) {
884 F2FS_OPTION(sbi
).compress_algorithm
=
886 } else if (!strcmp(name
, "zstd")) {
887 F2FS_OPTION(sbi
).compress_algorithm
=
889 } else if (!strcmp(name
, "lzo-rle")) {
890 F2FS_OPTION(sbi
).compress_algorithm
=
898 case Opt_compress_log_size
:
899 if (!f2fs_sb_has_compression(sbi
)) {
900 f2fs_info(sbi
, "Image doesn't support compression");
903 if (args
->from
&& match_int(args
, &arg
))
905 if (arg
< MIN_COMPRESS_LOG_SIZE
||
906 arg
> MAX_COMPRESS_LOG_SIZE
) {
908 "Compress cluster log size is out of range");
911 F2FS_OPTION(sbi
).compress_log_size
= arg
;
913 case Opt_compress_extension
:
914 if (!f2fs_sb_has_compression(sbi
)) {
915 f2fs_info(sbi
, "Image doesn't support compression");
918 name
= match_strdup(&args
[0]);
922 ext
= F2FS_OPTION(sbi
).extensions
;
923 ext_cnt
= F2FS_OPTION(sbi
).compress_ext_cnt
;
925 if (strlen(name
) >= F2FS_EXTENSION_LEN
||
926 ext_cnt
>= COMPRESS_EXT_NUM
) {
928 "invalid extension length/number");
933 strcpy(ext
[ext_cnt
], name
);
934 F2FS_OPTION(sbi
).compress_ext_cnt
++;
938 case Opt_compress_algorithm
:
939 case Opt_compress_log_size
:
940 case Opt_compress_extension
:
941 f2fs_info(sbi
, "compression options not supported");
948 f2fs_err(sbi
, "Unrecognized mount option \"%s\" or missing value",
954 if (f2fs_check_quota_options(sbi
))
957 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sbi
->sb
)) {
958 f2fs_info(sbi
, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
961 if (f2fs_sb_has_project_quota(sbi
) && !f2fs_readonly(sbi
->sb
)) {
962 f2fs_err(sbi
, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
966 #ifndef CONFIG_UNICODE
967 if (f2fs_sb_has_casefold(sbi
)) {
969 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
974 * The BLKZONED feature indicates that the drive was formatted with
975 * zone alignment optimization. This is optional for host-aware
976 * devices, but mandatory for host-managed zoned block devices.
978 #ifndef CONFIG_BLK_DEV_ZONED
979 if (f2fs_sb_has_blkzoned(sbi
)) {
980 f2fs_err(sbi
, "Zoned block device support is not enabled");
985 if (F2FS_IO_SIZE_BITS(sbi
) && !f2fs_lfs_mode(sbi
)) {
986 f2fs_err(sbi
, "Should set mode=lfs with %uKB-sized IO",
987 F2FS_IO_SIZE_KB(sbi
));
991 if (test_opt(sbi
, INLINE_XATTR_SIZE
)) {
992 int min_size
, max_size
;
994 if (!f2fs_sb_has_extra_attr(sbi
) ||
995 !f2fs_sb_has_flexible_inline_xattr(sbi
)) {
996 f2fs_err(sbi
, "extra_attr or flexible_inline_xattr feature is off");
999 if (!test_opt(sbi
, INLINE_XATTR
)) {
1000 f2fs_err(sbi
, "inline_xattr_size option should be set with inline_xattr option");
1004 min_size
= sizeof(struct f2fs_xattr_header
) / sizeof(__le32
);
1005 max_size
= MAX_INLINE_XATTR_SIZE
;
1007 if (F2FS_OPTION(sbi
).inline_xattr_size
< min_size
||
1008 F2FS_OPTION(sbi
).inline_xattr_size
> max_size
) {
1009 f2fs_err(sbi
, "inline xattr size is out of range: %d ~ %d",
1010 min_size
, max_size
);
1015 if (test_opt(sbi
, DISABLE_CHECKPOINT
) && f2fs_lfs_mode(sbi
)) {
1016 f2fs_err(sbi
, "LFS not compatible with checkpoint=disable\n");
1020 /* Not pass down write hints if the number of active logs is lesser
1021 * than NR_CURSEG_PERSIST_TYPE.
1023 if (F2FS_OPTION(sbi
).active_logs
!= NR_CURSEG_TYPE
)
1024 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
1028 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
1030 struct f2fs_inode_info
*fi
;
1032 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
1036 init_once((void *) fi
);
1038 /* Initialize f2fs-specific inode info */
1039 atomic_set(&fi
->dirty_pages
, 0);
1040 atomic_set(&fi
->i_compr_blocks
, 0);
1041 init_rwsem(&fi
->i_sem
);
1042 spin_lock_init(&fi
->i_size_lock
);
1043 INIT_LIST_HEAD(&fi
->dirty_list
);
1044 INIT_LIST_HEAD(&fi
->gdirty_list
);
1045 INIT_LIST_HEAD(&fi
->inmem_ilist
);
1046 INIT_LIST_HEAD(&fi
->inmem_pages
);
1047 mutex_init(&fi
->inmem_lock
);
1048 init_rwsem(&fi
->i_gc_rwsem
[READ
]);
1049 init_rwsem(&fi
->i_gc_rwsem
[WRITE
]);
1050 init_rwsem(&fi
->i_mmap_sem
);
1051 init_rwsem(&fi
->i_xattr_sem
);
1053 /* Will be used by directory only */
1054 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
1058 return &fi
->vfs_inode
;
1061 static int f2fs_drop_inode(struct inode
*inode
)
1063 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1067 * during filesystem shutdown, if checkpoint is disabled,
1068 * drop useless meta/node dirty pages.
1070 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
1071 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
1072 inode
->i_ino
== F2FS_META_INO(sbi
)) {
1073 trace_f2fs_drop_inode(inode
, 1);
1079 * This is to avoid a deadlock condition like below.
1080 * writeback_single_inode(inode)
1081 * - f2fs_write_data_page
1082 * - f2fs_gc -> iput -> evict
1083 * - inode_wait_for_writeback(inode)
1085 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
1086 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
1087 /* to avoid evict_inode call simultaneously */
1088 atomic_inc(&inode
->i_count
);
1089 spin_unlock(&inode
->i_lock
);
1091 /* some remained atomic pages should discarded */
1092 if (f2fs_is_atomic_file(inode
))
1093 f2fs_drop_inmem_pages(inode
);
1095 /* should remain fi->extent_tree for writepage */
1096 f2fs_destroy_extent_node(inode
);
1098 sb_start_intwrite(inode
->i_sb
);
1099 f2fs_i_size_write(inode
, 0);
1101 f2fs_submit_merged_write_cond(F2FS_I_SB(inode
),
1102 inode
, NULL
, 0, DATA
);
1103 truncate_inode_pages_final(inode
->i_mapping
);
1105 if (F2FS_HAS_BLOCKS(inode
))
1106 f2fs_truncate(inode
);
1108 sb_end_intwrite(inode
->i_sb
);
1110 spin_lock(&inode
->i_lock
);
1111 atomic_dec(&inode
->i_count
);
1113 trace_f2fs_drop_inode(inode
, 0);
1116 ret
= generic_drop_inode(inode
);
1118 ret
= fscrypt_drop_inode(inode
);
1119 trace_f2fs_drop_inode(inode
, ret
);
1123 int f2fs_inode_dirtied(struct inode
*inode
, bool sync
)
1125 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1128 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
1129 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
1132 set_inode_flag(inode
, FI_DIRTY_INODE
);
1133 stat_inc_dirty_inode(sbi
, DIRTY_META
);
1135 if (sync
&& list_empty(&F2FS_I(inode
)->gdirty_list
)) {
1136 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
1137 &sbi
->inode_list
[DIRTY_META
]);
1138 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
1140 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1144 void f2fs_inode_synced(struct inode
*inode
)
1146 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1148 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
1149 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
1150 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1153 if (!list_empty(&F2FS_I(inode
)->gdirty_list
)) {
1154 list_del_init(&F2FS_I(inode
)->gdirty_list
);
1155 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
1157 clear_inode_flag(inode
, FI_DIRTY_INODE
);
1158 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
1159 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
1160 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1164 * f2fs_dirty_inode() is called from __mark_inode_dirty()
1166 * We should call set_dirty_inode to write the dirty inode through write_inode.
1168 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
1170 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1172 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
1173 inode
->i_ino
== F2FS_META_INO(sbi
))
1176 if (flags
== I_DIRTY_TIME
)
1179 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
1180 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
1182 f2fs_inode_dirtied(inode
, false);
1185 static void f2fs_free_inode(struct inode
*inode
)
1187 fscrypt_free_inode(inode
);
1188 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
1191 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
1193 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
1194 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
1197 static void destroy_device_list(struct f2fs_sb_info
*sbi
)
1201 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
1202 blkdev_put(FDEV(i
).bdev
, FMODE_EXCL
);
1203 #ifdef CONFIG_BLK_DEV_ZONED
1204 kvfree(FDEV(i
).blkz_seq
);
1205 kfree(FDEV(i
).zone_capacity_blocks
);
1211 static void f2fs_put_super(struct super_block
*sb
)
1213 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1217 /* unregister procfs/sysfs entries in advance to avoid race case */
1218 f2fs_unregister_sysfs(sbi
);
1220 f2fs_quota_off_umount(sb
);
1222 /* prevent remaining shrinker jobs */
1223 mutex_lock(&sbi
->umount_mutex
);
1226 * We don't need to do checkpoint when superblock is clean.
1227 * But, the previous checkpoint was not done by umount, it needs to do
1228 * clean checkpoint again.
1230 if ((is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
1231 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
))) {
1232 struct cp_control cpc
= {
1233 .reason
= CP_UMOUNT
,
1235 f2fs_write_checkpoint(sbi
, &cpc
);
1238 /* be sure to wait for any on-going discard commands */
1239 dropped
= f2fs_issue_discard_timeout(sbi
);
1241 if ((f2fs_hw_support_discard(sbi
) || f2fs_hw_should_discard(sbi
)) &&
1242 !sbi
->discard_blks
&& !dropped
) {
1243 struct cp_control cpc
= {
1244 .reason
= CP_UMOUNT
| CP_TRIMMED
,
1246 f2fs_write_checkpoint(sbi
, &cpc
);
1250 * normally superblock is clean, so we need to release this.
1251 * In addition, EIO will skip do checkpoint, we need this as well.
1253 f2fs_release_ino_entry(sbi
, true);
1255 f2fs_leave_shrinker(sbi
);
1256 mutex_unlock(&sbi
->umount_mutex
);
1258 /* our cp_error case, we can wait for any writeback page */
1259 f2fs_flush_merged_writes(sbi
);
1261 f2fs_wait_on_all_pages(sbi
, F2FS_WB_CP_DATA
);
1263 f2fs_bug_on(sbi
, sbi
->fsync_node_num
);
1265 iput(sbi
->node_inode
);
1266 sbi
->node_inode
= NULL
;
1268 iput(sbi
->meta_inode
);
1269 sbi
->meta_inode
= NULL
;
1272 * iput() can update stat information, if f2fs_write_checkpoint()
1273 * above failed with error.
1275 f2fs_destroy_stats(sbi
);
1277 /* destroy f2fs internal modules */
1278 f2fs_destroy_node_manager(sbi
);
1279 f2fs_destroy_segment_manager(sbi
);
1281 f2fs_destroy_post_read_wq(sbi
);
1285 sb
->s_fs_info
= NULL
;
1286 if (sbi
->s_chksum_driver
)
1287 crypto_free_shash(sbi
->s_chksum_driver
);
1288 kfree(sbi
->raw_super
);
1290 destroy_device_list(sbi
);
1291 f2fs_destroy_page_array_cache(sbi
);
1292 f2fs_destroy_xattr_caches(sbi
);
1293 mempool_destroy(sbi
->write_io_dummy
);
1295 for (i
= 0; i
< MAXQUOTAS
; i
++)
1296 kfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
1298 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi
).dummy_enc_policy
);
1299 destroy_percpu_info(sbi
);
1300 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
1301 kvfree(sbi
->write_io
[i
]);
1302 #ifdef CONFIG_UNICODE
1303 utf8_unload(sb
->s_encoding
);
1308 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
1310 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1313 if (unlikely(f2fs_cp_error(sbi
)))
1315 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
1318 trace_f2fs_sync_fs(sb
, sync
);
1320 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1324 struct cp_control cpc
;
1326 cpc
.reason
= __get_cp_reason(sbi
);
1328 down_write(&sbi
->gc_lock
);
1329 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1330 up_write(&sbi
->gc_lock
);
1332 f2fs_trace_ios(NULL
, 1);
1337 static int f2fs_freeze(struct super_block
*sb
)
1339 if (f2fs_readonly(sb
))
1342 /* IO error happened before */
1343 if (unlikely(f2fs_cp_error(F2FS_SB(sb
))))
1346 /* must be clean, since sync_filesystem() was already called */
1347 if (is_sbi_flag_set(F2FS_SB(sb
), SBI_IS_DIRTY
))
1352 static int f2fs_unfreeze(struct super_block
*sb
)
1358 static int f2fs_statfs_project(struct super_block
*sb
,
1359 kprojid_t projid
, struct kstatfs
*buf
)
1362 struct dquot
*dquot
;
1366 qid
= make_kqid_projid(projid
);
1367 dquot
= dqget(sb
, qid
);
1369 return PTR_ERR(dquot
);
1370 spin_lock(&dquot
->dq_dqb_lock
);
1372 limit
= min_not_zero(dquot
->dq_dqb
.dqb_bsoftlimit
,
1373 dquot
->dq_dqb
.dqb_bhardlimit
);
1375 limit
>>= sb
->s_blocksize_bits
;
1377 if (limit
&& buf
->f_blocks
> limit
) {
1378 curblock
= (dquot
->dq_dqb
.dqb_curspace
+
1379 dquot
->dq_dqb
.dqb_rsvspace
) >> sb
->s_blocksize_bits
;
1380 buf
->f_blocks
= limit
;
1381 buf
->f_bfree
= buf
->f_bavail
=
1382 (buf
->f_blocks
> curblock
) ?
1383 (buf
->f_blocks
- curblock
) : 0;
1386 limit
= min_not_zero(dquot
->dq_dqb
.dqb_isoftlimit
,
1387 dquot
->dq_dqb
.dqb_ihardlimit
);
1389 if (limit
&& buf
->f_files
> limit
) {
1390 buf
->f_files
= limit
;
1392 (buf
->f_files
> dquot
->dq_dqb
.dqb_curinodes
) ?
1393 (buf
->f_files
- dquot
->dq_dqb
.dqb_curinodes
) : 0;
1396 spin_unlock(&dquot
->dq_dqb_lock
);
1402 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1404 struct super_block
*sb
= dentry
->d_sb
;
1405 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1406 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
1407 block_t total_count
, user_block_count
, start_count
;
1408 u64 avail_node_count
;
1410 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
1411 user_block_count
= sbi
->user_block_count
;
1412 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
1413 buf
->f_type
= F2FS_SUPER_MAGIC
;
1414 buf
->f_bsize
= sbi
->blocksize
;
1416 buf
->f_blocks
= total_count
- start_count
;
1417 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) -
1418 sbi
->current_reserved_blocks
;
1420 spin_lock(&sbi
->stat_lock
);
1421 if (unlikely(buf
->f_bfree
<= sbi
->unusable_block_count
))
1424 buf
->f_bfree
-= sbi
->unusable_block_count
;
1425 spin_unlock(&sbi
->stat_lock
);
1427 if (buf
->f_bfree
> F2FS_OPTION(sbi
).root_reserved_blocks
)
1428 buf
->f_bavail
= buf
->f_bfree
-
1429 F2FS_OPTION(sbi
).root_reserved_blocks
;
1433 avail_node_count
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
1435 if (avail_node_count
> user_block_count
) {
1436 buf
->f_files
= user_block_count
;
1437 buf
->f_ffree
= buf
->f_bavail
;
1439 buf
->f_files
= avail_node_count
;
1440 buf
->f_ffree
= min(avail_node_count
- valid_node_count(sbi
),
1444 buf
->f_namelen
= F2FS_NAME_LEN
;
1445 buf
->f_fsid
= u64_to_fsid(id
);
1448 if (is_inode_flag_set(dentry
->d_inode
, FI_PROJ_INHERIT
) &&
1449 sb_has_quota_limits_enabled(sb
, PRJQUOTA
)) {
1450 f2fs_statfs_project(sb
, F2FS_I(dentry
->d_inode
)->i_projid
, buf
);
1456 static inline void f2fs_show_quota_options(struct seq_file
*seq
,
1457 struct super_block
*sb
)
1460 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1462 if (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1465 switch (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1476 seq_printf(seq
, ",jqfmt=%s", fmtname
);
1479 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
1480 seq_show_option(seq
, "usrjquota",
1481 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
]);
1483 if (F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
1484 seq_show_option(seq
, "grpjquota",
1485 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
]);
1487 if (F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
1488 seq_show_option(seq
, "prjjquota",
1489 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]);
1493 static inline void f2fs_show_compress_options(struct seq_file
*seq
,
1494 struct super_block
*sb
)
1496 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1500 if (!f2fs_sb_has_compression(sbi
))
1503 switch (F2FS_OPTION(sbi
).compress_algorithm
) {
1513 case COMPRESS_LZORLE
:
1514 algtype
= "lzo-rle";
1517 seq_printf(seq
, ",compress_algorithm=%s", algtype
);
1519 seq_printf(seq
, ",compress_log_size=%u",
1520 F2FS_OPTION(sbi
).compress_log_size
);
1522 for (i
= 0; i
< F2FS_OPTION(sbi
).compress_ext_cnt
; i
++) {
1523 seq_printf(seq
, ",compress_extension=%s",
1524 F2FS_OPTION(sbi
).extensions
[i
]);
1528 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
1530 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
1532 if (F2FS_OPTION(sbi
).bggc_mode
== BGGC_MODE_SYNC
)
1533 seq_printf(seq
, ",background_gc=%s", "sync");
1534 else if (F2FS_OPTION(sbi
).bggc_mode
== BGGC_MODE_ON
)
1535 seq_printf(seq
, ",background_gc=%s", "on");
1536 else if (F2FS_OPTION(sbi
).bggc_mode
== BGGC_MODE_OFF
)
1537 seq_printf(seq
, ",background_gc=%s", "off");
1539 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
1540 seq_puts(seq
, ",disable_roll_forward");
1541 if (test_opt(sbi
, NORECOVERY
))
1542 seq_puts(seq
, ",norecovery");
1543 if (test_opt(sbi
, DISCARD
))
1544 seq_puts(seq
, ",discard");
1546 seq_puts(seq
, ",nodiscard");
1547 if (test_opt(sbi
, NOHEAP
))
1548 seq_puts(seq
, ",no_heap");
1550 seq_puts(seq
, ",heap");
1551 #ifdef CONFIG_F2FS_FS_XATTR
1552 if (test_opt(sbi
, XATTR_USER
))
1553 seq_puts(seq
, ",user_xattr");
1555 seq_puts(seq
, ",nouser_xattr");
1556 if (test_opt(sbi
, INLINE_XATTR
))
1557 seq_puts(seq
, ",inline_xattr");
1559 seq_puts(seq
, ",noinline_xattr");
1560 if (test_opt(sbi
, INLINE_XATTR_SIZE
))
1561 seq_printf(seq
, ",inline_xattr_size=%u",
1562 F2FS_OPTION(sbi
).inline_xattr_size
);
1564 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1565 if (test_opt(sbi
, POSIX_ACL
))
1566 seq_puts(seq
, ",acl");
1568 seq_puts(seq
, ",noacl");
1570 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
1571 seq_puts(seq
, ",disable_ext_identify");
1572 if (test_opt(sbi
, INLINE_DATA
))
1573 seq_puts(seq
, ",inline_data");
1575 seq_puts(seq
, ",noinline_data");
1576 if (test_opt(sbi
, INLINE_DENTRY
))
1577 seq_puts(seq
, ",inline_dentry");
1579 seq_puts(seq
, ",noinline_dentry");
1580 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
1581 seq_puts(seq
, ",flush_merge");
1582 if (test_opt(sbi
, NOBARRIER
))
1583 seq_puts(seq
, ",nobarrier");
1584 if (test_opt(sbi
, FASTBOOT
))
1585 seq_puts(seq
, ",fastboot");
1586 if (test_opt(sbi
, EXTENT_CACHE
))
1587 seq_puts(seq
, ",extent_cache");
1589 seq_puts(seq
, ",noextent_cache");
1590 if (test_opt(sbi
, DATA_FLUSH
))
1591 seq_puts(seq
, ",data_flush");
1593 seq_puts(seq
, ",mode=");
1594 if (F2FS_OPTION(sbi
).fs_mode
== FS_MODE_ADAPTIVE
)
1595 seq_puts(seq
, "adaptive");
1596 else if (F2FS_OPTION(sbi
).fs_mode
== FS_MODE_LFS
)
1597 seq_puts(seq
, "lfs");
1598 seq_printf(seq
, ",active_logs=%u", F2FS_OPTION(sbi
).active_logs
);
1599 if (test_opt(sbi
, RESERVE_ROOT
))
1600 seq_printf(seq
, ",reserve_root=%u,resuid=%u,resgid=%u",
1601 F2FS_OPTION(sbi
).root_reserved_blocks
,
1602 from_kuid_munged(&init_user_ns
,
1603 F2FS_OPTION(sbi
).s_resuid
),
1604 from_kgid_munged(&init_user_ns
,
1605 F2FS_OPTION(sbi
).s_resgid
));
1606 if (F2FS_IO_SIZE_BITS(sbi
))
1607 seq_printf(seq
, ",io_bits=%u",
1608 F2FS_OPTION(sbi
).write_io_size_bits
);
1609 #ifdef CONFIG_F2FS_FAULT_INJECTION
1610 if (test_opt(sbi
, FAULT_INJECTION
)) {
1611 seq_printf(seq
, ",fault_injection=%u",
1612 F2FS_OPTION(sbi
).fault_info
.inject_rate
);
1613 seq_printf(seq
, ",fault_type=%u",
1614 F2FS_OPTION(sbi
).fault_info
.inject_type
);
1618 if (test_opt(sbi
, QUOTA
))
1619 seq_puts(seq
, ",quota");
1620 if (test_opt(sbi
, USRQUOTA
))
1621 seq_puts(seq
, ",usrquota");
1622 if (test_opt(sbi
, GRPQUOTA
))
1623 seq_puts(seq
, ",grpquota");
1624 if (test_opt(sbi
, PRJQUOTA
))
1625 seq_puts(seq
, ",prjquota");
1627 f2fs_show_quota_options(seq
, sbi
->sb
);
1628 if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_USER
)
1629 seq_printf(seq
, ",whint_mode=%s", "user-based");
1630 else if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_FS
)
1631 seq_printf(seq
, ",whint_mode=%s", "fs-based");
1633 fscrypt_show_test_dummy_encryption(seq
, ',', sbi
->sb
);
1635 if (sbi
->sb
->s_flags
& SB_INLINECRYPT
)
1636 seq_puts(seq
, ",inlinecrypt");
1638 if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_DEFAULT
)
1639 seq_printf(seq
, ",alloc_mode=%s", "default");
1640 else if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_REUSE
)
1641 seq_printf(seq
, ",alloc_mode=%s", "reuse");
1643 if (test_opt(sbi
, DISABLE_CHECKPOINT
))
1644 seq_printf(seq
, ",checkpoint=disable:%u",
1645 F2FS_OPTION(sbi
).unusable_cap
);
1646 if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_POSIX
)
1647 seq_printf(seq
, ",fsync_mode=%s", "posix");
1648 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
)
1649 seq_printf(seq
, ",fsync_mode=%s", "strict");
1650 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_NOBARRIER
)
1651 seq_printf(seq
, ",fsync_mode=%s", "nobarrier");
1653 #ifdef CONFIG_F2FS_FS_COMPRESSION
1654 f2fs_show_compress_options(seq
, sbi
->sb
);
1657 if (test_opt(sbi
, ATGC
))
1658 seq_puts(seq
, ",atgc");
1662 static void default_options(struct f2fs_sb_info
*sbi
)
1664 /* init some FS parameters */
1665 F2FS_OPTION(sbi
).active_logs
= NR_CURSEG_PERSIST_TYPE
;
1666 F2FS_OPTION(sbi
).inline_xattr_size
= DEFAULT_INLINE_XATTR_ADDRS
;
1667 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
1668 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
1669 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
1670 F2FS_OPTION(sbi
).s_resuid
= make_kuid(&init_user_ns
, F2FS_DEF_RESUID
);
1671 F2FS_OPTION(sbi
).s_resgid
= make_kgid(&init_user_ns
, F2FS_DEF_RESGID
);
1672 F2FS_OPTION(sbi
).compress_algorithm
= COMPRESS_LZ4
;
1673 F2FS_OPTION(sbi
).compress_log_size
= MIN_COMPRESS_LOG_SIZE
;
1674 F2FS_OPTION(sbi
).compress_ext_cnt
= 0;
1675 F2FS_OPTION(sbi
).bggc_mode
= BGGC_MODE_ON
;
1677 sbi
->sb
->s_flags
&= ~SB_INLINECRYPT
;
1679 set_opt(sbi
, INLINE_XATTR
);
1680 set_opt(sbi
, INLINE_DATA
);
1681 set_opt(sbi
, INLINE_DENTRY
);
1682 set_opt(sbi
, EXTENT_CACHE
);
1683 set_opt(sbi
, NOHEAP
);
1684 clear_opt(sbi
, DISABLE_CHECKPOINT
);
1685 F2FS_OPTION(sbi
).unusable_cap
= 0;
1686 sbi
->sb
->s_flags
|= SB_LAZYTIME
;
1687 set_opt(sbi
, FLUSH_MERGE
);
1688 set_opt(sbi
, DISCARD
);
1689 if (f2fs_sb_has_blkzoned(sbi
))
1690 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_LFS
;
1692 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_ADAPTIVE
;
1694 #ifdef CONFIG_F2FS_FS_XATTR
1695 set_opt(sbi
, XATTR_USER
);
1697 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1698 set_opt(sbi
, POSIX_ACL
);
1701 f2fs_build_fault_attr(sbi
, 0, 0);
1705 static int f2fs_enable_quotas(struct super_block
*sb
);
1708 static int f2fs_disable_checkpoint(struct f2fs_sb_info
*sbi
)
1710 unsigned int s_flags
= sbi
->sb
->s_flags
;
1711 struct cp_control cpc
;
1716 if (s_flags
& SB_RDONLY
) {
1717 f2fs_err(sbi
, "checkpoint=disable on readonly fs");
1720 sbi
->sb
->s_flags
|= SB_ACTIVE
;
1722 f2fs_update_time(sbi
, DISABLE_TIME
);
1724 while (!f2fs_time_over(sbi
, DISABLE_TIME
)) {
1725 down_write(&sbi
->gc_lock
);
1726 err
= f2fs_gc(sbi
, true, false, NULL_SEGNO
);
1727 if (err
== -ENODATA
) {
1731 if (err
&& err
!= -EAGAIN
)
1735 ret
= sync_filesystem(sbi
->sb
);
1737 err
= ret
? ret
: err
;
1741 unusable
= f2fs_get_unusable_blocks(sbi
);
1742 if (f2fs_disable_cp_again(sbi
, unusable
)) {
1747 down_write(&sbi
->gc_lock
);
1748 cpc
.reason
= CP_PAUSE
;
1749 set_sbi_flag(sbi
, SBI_CP_DISABLED
);
1750 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1754 spin_lock(&sbi
->stat_lock
);
1755 sbi
->unusable_block_count
= unusable
;
1756 spin_unlock(&sbi
->stat_lock
);
1759 up_write(&sbi
->gc_lock
);
1761 sbi
->sb
->s_flags
= s_flags
; /* Restore SB_RDONLY status */
1765 static void f2fs_enable_checkpoint(struct f2fs_sb_info
*sbi
)
1767 down_write(&sbi
->gc_lock
);
1768 f2fs_dirty_to_prefree(sbi
);
1770 clear_sbi_flag(sbi
, SBI_CP_DISABLED
);
1771 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1772 up_write(&sbi
->gc_lock
);
1774 f2fs_sync_fs(sbi
->sb
, 1);
1777 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
1779 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1780 struct f2fs_mount_info org_mount_opt
;
1781 unsigned long old_sb_flags
;
1783 bool need_restart_gc
= false;
1784 bool need_stop_gc
= false;
1785 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
1786 bool disable_checkpoint
= test_opt(sbi
, DISABLE_CHECKPOINT
);
1787 bool no_io_align
= !F2FS_IO_ALIGNED(sbi
);
1788 bool no_atgc
= !test_opt(sbi
, ATGC
);
1789 bool checkpoint_changed
;
1795 * Save the old mount options in case we
1796 * need to restore them.
1798 org_mount_opt
= sbi
->mount_opt
;
1799 old_sb_flags
= sb
->s_flags
;
1802 org_mount_opt
.s_jquota_fmt
= F2FS_OPTION(sbi
).s_jquota_fmt
;
1803 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1804 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
1805 org_mount_opt
.s_qf_names
[i
] =
1806 kstrdup(F2FS_OPTION(sbi
).s_qf_names
[i
],
1808 if (!org_mount_opt
.s_qf_names
[i
]) {
1809 for (j
= 0; j
< i
; j
++)
1810 kfree(org_mount_opt
.s_qf_names
[j
]);
1814 org_mount_opt
.s_qf_names
[i
] = NULL
;
1819 /* recover superblocks we couldn't write due to previous RO mount */
1820 if (!(*flags
& SB_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
1821 err
= f2fs_commit_super(sbi
, false);
1822 f2fs_info(sbi
, "Try to recover all the superblocks, ret: %d",
1825 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1828 default_options(sbi
);
1830 /* parse mount options */
1831 err
= parse_options(sb
, data
, true);
1834 checkpoint_changed
=
1835 disable_checkpoint
!= test_opt(sbi
, DISABLE_CHECKPOINT
);
1838 * Previous and new state of filesystem is RO,
1839 * so skip checking GC and FLUSH_MERGE conditions.
1841 if (f2fs_readonly(sb
) && (*flags
& SB_RDONLY
))
1845 if (!f2fs_readonly(sb
) && (*flags
& SB_RDONLY
)) {
1846 err
= dquot_suspend(sb
, -1);
1849 } else if (f2fs_readonly(sb
) && !(*flags
& SB_RDONLY
)) {
1850 /* dquot_resume needs RW */
1851 sb
->s_flags
&= ~SB_RDONLY
;
1852 if (sb_any_quota_suspended(sb
)) {
1853 dquot_resume(sb
, -1);
1854 } else if (f2fs_sb_has_quota_ino(sbi
)) {
1855 err
= f2fs_enable_quotas(sb
);
1861 /* disallow enable atgc dynamically */
1862 if (no_atgc
== !!test_opt(sbi
, ATGC
)) {
1864 f2fs_warn(sbi
, "switch atgc option is not allowed");
1868 /* disallow enable/disable extent_cache dynamically */
1869 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
1871 f2fs_warn(sbi
, "switch extent_cache option is not allowed");
1875 if (no_io_align
== !!F2FS_IO_ALIGNED(sbi
)) {
1877 f2fs_warn(sbi
, "switch io_bits option is not allowed");
1881 if ((*flags
& SB_RDONLY
) && test_opt(sbi
, DISABLE_CHECKPOINT
)) {
1883 f2fs_warn(sbi
, "disabling checkpoint not compatible with read-only");
1888 * We stop the GC thread if FS is mounted as RO
1889 * or if background_gc = off is passed in mount
1890 * option. Also sync the filesystem.
1892 if ((*flags
& SB_RDONLY
) ||
1893 F2FS_OPTION(sbi
).bggc_mode
== BGGC_MODE_OFF
) {
1894 if (sbi
->gc_thread
) {
1895 f2fs_stop_gc_thread(sbi
);
1896 need_restart_gc
= true;
1898 } else if (!sbi
->gc_thread
) {
1899 err
= f2fs_start_gc_thread(sbi
);
1902 need_stop_gc
= true;
1905 if (*flags
& SB_RDONLY
||
1906 F2FS_OPTION(sbi
).whint_mode
!= org_mount_opt
.whint_mode
) {
1907 writeback_inodes_sb(sb
, WB_REASON_SYNC
);
1910 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1911 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
1912 f2fs_sync_fs(sb
, 1);
1913 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
1916 if (checkpoint_changed
) {
1917 if (test_opt(sbi
, DISABLE_CHECKPOINT
)) {
1918 err
= f2fs_disable_checkpoint(sbi
);
1922 f2fs_enable_checkpoint(sbi
);
1927 * We stop issue flush thread if FS is mounted as RO
1928 * or if flush_merge is not passed in mount option.
1930 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
1931 clear_opt(sbi
, FLUSH_MERGE
);
1932 f2fs_destroy_flush_cmd_control(sbi
, false);
1934 err
= f2fs_create_flush_cmd_control(sbi
);
1940 /* Release old quota file names */
1941 for (i
= 0; i
< MAXQUOTAS
; i
++)
1942 kfree(org_mount_opt
.s_qf_names
[i
]);
1944 /* Update the POSIXACL Flag */
1945 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
1946 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
1948 limit_reserve_root(sbi
);
1949 adjust_unusable_cap_perc(sbi
);
1950 *flags
= (*flags
& ~SB_LAZYTIME
) | (sb
->s_flags
& SB_LAZYTIME
);
1953 if (need_restart_gc
) {
1954 if (f2fs_start_gc_thread(sbi
))
1955 f2fs_warn(sbi
, "background gc thread has stopped");
1956 } else if (need_stop_gc
) {
1957 f2fs_stop_gc_thread(sbi
);
1961 F2FS_OPTION(sbi
).s_jquota_fmt
= org_mount_opt
.s_jquota_fmt
;
1962 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1963 kfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
1964 F2FS_OPTION(sbi
).s_qf_names
[i
] = org_mount_opt
.s_qf_names
[i
];
1967 sbi
->mount_opt
= org_mount_opt
;
1968 sb
->s_flags
= old_sb_flags
;
1973 /* Read data from quotafile */
1974 static ssize_t
f2fs_quota_read(struct super_block
*sb
, int type
, char *data
,
1975 size_t len
, loff_t off
)
1977 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1978 struct address_space
*mapping
= inode
->i_mapping
;
1979 block_t blkidx
= F2FS_BYTES_TO_BLK(off
);
1980 int offset
= off
& (sb
->s_blocksize
- 1);
1983 loff_t i_size
= i_size_read(inode
);
1990 if (off
+ len
> i_size
)
1993 while (toread
> 0) {
1994 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
, toread
);
1996 page
= read_cache_page_gfp(mapping
, blkidx
, GFP_NOFS
);
1998 if (PTR_ERR(page
) == -ENOMEM
) {
1999 congestion_wait(BLK_RW_ASYNC
,
2000 DEFAULT_IO_TIMEOUT
);
2003 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2004 return PTR_ERR(page
);
2009 if (unlikely(page
->mapping
!= mapping
)) {
2010 f2fs_put_page(page
, 1);
2013 if (unlikely(!PageUptodate(page
))) {
2014 f2fs_put_page(page
, 1);
2015 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2019 kaddr
= kmap_atomic(page
);
2020 memcpy(data
, kaddr
+ offset
, tocopy
);
2021 kunmap_atomic(kaddr
);
2022 f2fs_put_page(page
, 1);
2032 /* Write to quotafile */
2033 static ssize_t
f2fs_quota_write(struct super_block
*sb
, int type
,
2034 const char *data
, size_t len
, loff_t off
)
2036 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
2037 struct address_space
*mapping
= inode
->i_mapping
;
2038 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
2039 int offset
= off
& (sb
->s_blocksize
- 1);
2040 size_t towrite
= len
;
2042 void *fsdata
= NULL
;
2047 while (towrite
> 0) {
2048 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
,
2051 err
= a_ops
->write_begin(NULL
, mapping
, off
, tocopy
, 0,
2053 if (unlikely(err
)) {
2054 if (err
== -ENOMEM
) {
2055 congestion_wait(BLK_RW_ASYNC
,
2056 DEFAULT_IO_TIMEOUT
);
2059 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2063 kaddr
= kmap_atomic(page
);
2064 memcpy(kaddr
+ offset
, data
, tocopy
);
2065 kunmap_atomic(kaddr
);
2066 flush_dcache_page(page
);
2068 a_ops
->write_end(NULL
, mapping
, off
, tocopy
, tocopy
,
2079 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
2080 f2fs_mark_inode_dirty_sync(inode
, false);
2081 return len
- towrite
;
2084 static struct dquot
**f2fs_get_dquots(struct inode
*inode
)
2086 return F2FS_I(inode
)->i_dquot
;
2089 static qsize_t
*f2fs_get_reserved_space(struct inode
*inode
)
2091 return &F2FS_I(inode
)->i_reserved_quota
;
2094 static int f2fs_quota_on_mount(struct f2fs_sb_info
*sbi
, int type
)
2096 if (is_set_ckpt_flags(sbi
, CP_QUOTA_NEED_FSCK_FLAG
)) {
2097 f2fs_err(sbi
, "quota sysfile may be corrupted, skip loading it");
2101 return dquot_quota_on_mount(sbi
->sb
, F2FS_OPTION(sbi
).s_qf_names
[type
],
2102 F2FS_OPTION(sbi
).s_jquota_fmt
, type
);
2105 int f2fs_enable_quota_files(struct f2fs_sb_info
*sbi
, bool rdonly
)
2110 if (f2fs_sb_has_quota_ino(sbi
) && rdonly
) {
2111 err
= f2fs_enable_quotas(sbi
->sb
);
2113 f2fs_err(sbi
, "Cannot turn on quota_ino: %d", err
);
2119 for (i
= 0; i
< MAXQUOTAS
; i
++) {
2120 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
2121 err
= f2fs_quota_on_mount(sbi
, i
);
2126 f2fs_err(sbi
, "Cannot turn on quotas: %d on %d",
2133 static int f2fs_quota_enable(struct super_block
*sb
, int type
, int format_id
,
2136 struct inode
*qf_inode
;
2137 unsigned long qf_inum
;
2140 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb
)));
2142 qf_inum
= f2fs_qf_ino(sb
, type
);
2146 qf_inode
= f2fs_iget(sb
, qf_inum
);
2147 if (IS_ERR(qf_inode
)) {
2148 f2fs_err(F2FS_SB(sb
), "Bad quota inode %u:%lu", type
, qf_inum
);
2149 return PTR_ERR(qf_inode
);
2152 /* Don't account quota for quota files to avoid recursion */
2153 qf_inode
->i_flags
|= S_NOQUOTA
;
2154 err
= dquot_load_quota_inode(qf_inode
, type
, format_id
, flags
);
2159 static int f2fs_enable_quotas(struct super_block
*sb
)
2161 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2163 unsigned long qf_inum
;
2164 bool quota_mopt
[MAXQUOTAS
] = {
2165 test_opt(sbi
, USRQUOTA
),
2166 test_opt(sbi
, GRPQUOTA
),
2167 test_opt(sbi
, PRJQUOTA
),
2170 if (is_set_ckpt_flags(F2FS_SB(sb
), CP_QUOTA_NEED_FSCK_FLAG
)) {
2171 f2fs_err(sbi
, "quota file may be corrupted, skip loading it");
2175 sb_dqopt(sb
)->flags
|= DQUOT_QUOTA_SYS_FILE
;
2177 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2178 qf_inum
= f2fs_qf_ino(sb
, type
);
2180 err
= f2fs_quota_enable(sb
, type
, QFMT_VFS_V1
,
2181 DQUOT_USAGE_ENABLED
|
2182 (quota_mopt
[type
] ? DQUOT_LIMITS_ENABLED
: 0));
2184 f2fs_err(sbi
, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2186 for (type
--; type
>= 0; type
--)
2187 dquot_quota_off(sb
, type
);
2188 set_sbi_flag(F2FS_SB(sb
),
2189 SBI_QUOTA_NEED_REPAIR
);
2197 int f2fs_quota_sync(struct super_block
*sb
, int type
)
2199 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2200 struct quota_info
*dqopt
= sb_dqopt(sb
);
2207 * down_read(quota_sem)
2208 * dquot_writeback_dquots()
2211 * down_read(quota_sem)
2215 down_read(&sbi
->quota_sem
);
2216 ret
= dquot_writeback_dquots(sb
, type
);
2221 * Now when everything is written we can discard the pagecache so
2222 * that userspace sees the changes.
2224 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2225 struct address_space
*mapping
;
2227 if (type
!= -1 && cnt
!= type
)
2229 if (!sb_has_quota_active(sb
, cnt
))
2232 mapping
= dqopt
->files
[cnt
]->i_mapping
;
2234 ret
= filemap_fdatawrite(mapping
);
2238 /* if we are using journalled quota */
2239 if (is_journalled_quota(sbi
))
2242 ret
= filemap_fdatawait(mapping
);
2244 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2246 inode_lock(dqopt
->files
[cnt
]);
2247 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
2248 inode_unlock(dqopt
->files
[cnt
]);
2252 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2253 up_read(&sbi
->quota_sem
);
2254 f2fs_unlock_op(sbi
);
2258 static int f2fs_quota_on(struct super_block
*sb
, int type
, int format_id
,
2259 const struct path
*path
)
2261 struct inode
*inode
;
2264 /* if quota sysfile exists, deny enabling quota with specific file */
2265 if (f2fs_sb_has_quota_ino(F2FS_SB(sb
))) {
2266 f2fs_err(F2FS_SB(sb
), "quota sysfile already exists");
2270 err
= f2fs_quota_sync(sb
, type
);
2274 err
= dquot_quota_on(sb
, type
, format_id
, path
);
2278 inode
= d_inode(path
->dentry
);
2281 F2FS_I(inode
)->i_flags
|= F2FS_NOATIME_FL
| F2FS_IMMUTABLE_FL
;
2282 f2fs_set_inode_flags(inode
);
2283 inode_unlock(inode
);
2284 f2fs_mark_inode_dirty_sync(inode
, false);
2289 static int __f2fs_quota_off(struct super_block
*sb
, int type
)
2291 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
2294 if (!inode
|| !igrab(inode
))
2295 return dquot_quota_off(sb
, type
);
2297 err
= f2fs_quota_sync(sb
, type
);
2301 err
= dquot_quota_off(sb
, type
);
2302 if (err
|| f2fs_sb_has_quota_ino(F2FS_SB(sb
)))
2306 F2FS_I(inode
)->i_flags
&= ~(F2FS_NOATIME_FL
| F2FS_IMMUTABLE_FL
);
2307 f2fs_set_inode_flags(inode
);
2308 inode_unlock(inode
);
2309 f2fs_mark_inode_dirty_sync(inode
, false);
2315 static int f2fs_quota_off(struct super_block
*sb
, int type
)
2317 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2320 err
= __f2fs_quota_off(sb
, type
);
2323 * quotactl can shutdown journalled quota, result in inconsistence
2324 * between quota record and fs data by following updates, tag the
2325 * flag to let fsck be aware of it.
2327 if (is_journalled_quota(sbi
))
2328 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2332 void f2fs_quota_off_umount(struct super_block
*sb
)
2337 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2338 err
= __f2fs_quota_off(sb
, type
);
2340 int ret
= dquot_quota_off(sb
, type
);
2342 f2fs_err(F2FS_SB(sb
), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2344 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2348 * In case of checkpoint=disable, we must flush quota blocks.
2349 * This can cause NULL exception for node_inode in end_io, since
2350 * put_super already dropped it.
2352 sync_filesystem(sb
);
2355 static void f2fs_truncate_quota_inode_pages(struct super_block
*sb
)
2357 struct quota_info
*dqopt
= sb_dqopt(sb
);
2360 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2361 if (!dqopt
->files
[type
])
2363 f2fs_inode_synced(dqopt
->files
[type
]);
2367 static int f2fs_dquot_commit(struct dquot
*dquot
)
2369 struct f2fs_sb_info
*sbi
= F2FS_SB(dquot
->dq_sb
);
2372 down_read_nested(&sbi
->quota_sem
, SINGLE_DEPTH_NESTING
);
2373 ret
= dquot_commit(dquot
);
2375 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2376 up_read(&sbi
->quota_sem
);
2380 static int f2fs_dquot_acquire(struct dquot
*dquot
)
2382 struct f2fs_sb_info
*sbi
= F2FS_SB(dquot
->dq_sb
);
2385 down_read(&sbi
->quota_sem
);
2386 ret
= dquot_acquire(dquot
);
2388 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2389 up_read(&sbi
->quota_sem
);
2393 static int f2fs_dquot_release(struct dquot
*dquot
)
2395 struct f2fs_sb_info
*sbi
= F2FS_SB(dquot
->dq_sb
);
2396 int ret
= dquot_release(dquot
);
2399 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2403 static int f2fs_dquot_mark_dquot_dirty(struct dquot
*dquot
)
2405 struct super_block
*sb
= dquot
->dq_sb
;
2406 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2407 int ret
= dquot_mark_dquot_dirty(dquot
);
2409 /* if we are using journalled quota */
2410 if (is_journalled_quota(sbi
))
2411 set_sbi_flag(sbi
, SBI_QUOTA_NEED_FLUSH
);
2416 static int f2fs_dquot_commit_info(struct super_block
*sb
, int type
)
2418 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2419 int ret
= dquot_commit_info(sb
, type
);
2422 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2426 static int f2fs_get_projid(struct inode
*inode
, kprojid_t
*projid
)
2428 *projid
= F2FS_I(inode
)->i_projid
;
2432 static const struct dquot_operations f2fs_quota_operations
= {
2433 .get_reserved_space
= f2fs_get_reserved_space
,
2434 .write_dquot
= f2fs_dquot_commit
,
2435 .acquire_dquot
= f2fs_dquot_acquire
,
2436 .release_dquot
= f2fs_dquot_release
,
2437 .mark_dirty
= f2fs_dquot_mark_dquot_dirty
,
2438 .write_info
= f2fs_dquot_commit_info
,
2439 .alloc_dquot
= dquot_alloc
,
2440 .destroy_dquot
= dquot_destroy
,
2441 .get_projid
= f2fs_get_projid
,
2442 .get_next_id
= dquot_get_next_id
,
2445 static const struct quotactl_ops f2fs_quotactl_ops
= {
2446 .quota_on
= f2fs_quota_on
,
2447 .quota_off
= f2fs_quota_off
,
2448 .quota_sync
= f2fs_quota_sync
,
2449 .get_state
= dquot_get_state
,
2450 .set_info
= dquot_set_dqinfo
,
2451 .get_dqblk
= dquot_get_dqblk
,
2452 .set_dqblk
= dquot_set_dqblk
,
2453 .get_nextdqblk
= dquot_get_next_dqblk
,
2456 int f2fs_quota_sync(struct super_block
*sb
, int type
)
2461 void f2fs_quota_off_umount(struct super_block
*sb
)
2466 static const struct super_operations f2fs_sops
= {
2467 .alloc_inode
= f2fs_alloc_inode
,
2468 .free_inode
= f2fs_free_inode
,
2469 .drop_inode
= f2fs_drop_inode
,
2470 .write_inode
= f2fs_write_inode
,
2471 .dirty_inode
= f2fs_dirty_inode
,
2472 .show_options
= f2fs_show_options
,
2474 .quota_read
= f2fs_quota_read
,
2475 .quota_write
= f2fs_quota_write
,
2476 .get_dquots
= f2fs_get_dquots
,
2478 .evict_inode
= f2fs_evict_inode
,
2479 .put_super
= f2fs_put_super
,
2480 .sync_fs
= f2fs_sync_fs
,
2481 .freeze_fs
= f2fs_freeze
,
2482 .unfreeze_fs
= f2fs_unfreeze
,
2483 .statfs
= f2fs_statfs
,
2484 .remount_fs
= f2fs_remount
,
2487 #ifdef CONFIG_FS_ENCRYPTION
2488 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
2490 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
2491 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
2495 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
2498 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2501 * Encrypting the root directory is not allowed because fsck
2502 * expects lost+found directory to exist and remain unencrypted
2503 * if LOST_FOUND feature is enabled.
2506 if (f2fs_sb_has_lost_found(sbi
) &&
2507 inode
->i_ino
== F2FS_ROOT_INO(sbi
))
2510 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
2511 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
2512 ctx
, len
, fs_data
, XATTR_CREATE
);
2515 static const union fscrypt_policy
*f2fs_get_dummy_policy(struct super_block
*sb
)
2517 return F2FS_OPTION(F2FS_SB(sb
)).dummy_enc_policy
.policy
;
2520 static bool f2fs_has_stable_inodes(struct super_block
*sb
)
2525 static void f2fs_get_ino_and_lblk_bits(struct super_block
*sb
,
2526 int *ino_bits_ret
, int *lblk_bits_ret
)
2528 *ino_bits_ret
= 8 * sizeof(nid_t
);
2529 *lblk_bits_ret
= 8 * sizeof(block_t
);
2532 static int f2fs_get_num_devices(struct super_block
*sb
)
2534 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2536 if (f2fs_is_multi_device(sbi
))
2537 return sbi
->s_ndevs
;
2541 static void f2fs_get_devices(struct super_block
*sb
,
2542 struct request_queue
**devs
)
2544 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2547 for (i
= 0; i
< sbi
->s_ndevs
; i
++)
2548 devs
[i
] = bdev_get_queue(FDEV(i
).bdev
);
2551 static const struct fscrypt_operations f2fs_cryptops
= {
2552 .key_prefix
= "f2fs:",
2553 .get_context
= f2fs_get_context
,
2554 .set_context
= f2fs_set_context
,
2555 .get_dummy_policy
= f2fs_get_dummy_policy
,
2556 .empty_dir
= f2fs_empty_dir
,
2557 .max_namelen
= F2FS_NAME_LEN
,
2558 .has_stable_inodes
= f2fs_has_stable_inodes
,
2559 .get_ino_and_lblk_bits
= f2fs_get_ino_and_lblk_bits
,
2560 .get_num_devices
= f2fs_get_num_devices
,
2561 .get_devices
= f2fs_get_devices
,
2565 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
2566 u64 ino
, u32 generation
)
2568 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2569 struct inode
*inode
;
2571 if (f2fs_check_nid_range(sbi
, ino
))
2572 return ERR_PTR(-ESTALE
);
2575 * f2fs_iget isn't quite right if the inode is currently unallocated!
2576 * However f2fs_iget currently does appropriate checks to handle stale
2577 * inodes so everything is OK.
2579 inode
= f2fs_iget(sb
, ino
);
2581 return ERR_CAST(inode
);
2582 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
2583 /* we didn't find the right inode.. */
2585 return ERR_PTR(-ESTALE
);
2590 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
2591 int fh_len
, int fh_type
)
2593 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
2594 f2fs_nfs_get_inode
);
2597 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
2598 int fh_len
, int fh_type
)
2600 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
2601 f2fs_nfs_get_inode
);
2604 static const struct export_operations f2fs_export_ops
= {
2605 .fh_to_dentry
= f2fs_fh_to_dentry
,
2606 .fh_to_parent
= f2fs_fh_to_parent
,
2607 .get_parent
= f2fs_get_parent
,
2610 static loff_t
max_file_blocks(void)
2613 loff_t leaf_count
= DEF_ADDRS_PER_BLOCK
;
2616 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2617 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2618 * space in inode.i_addr, it will be more safe to reassign
2622 /* two direct node blocks */
2623 result
+= (leaf_count
* 2);
2625 /* two indirect node blocks */
2626 leaf_count
*= NIDS_PER_BLOCK
;
2627 result
+= (leaf_count
* 2);
2629 /* one double indirect node block */
2630 leaf_count
*= NIDS_PER_BLOCK
;
2631 result
+= leaf_count
;
2636 static int __f2fs_commit_super(struct buffer_head
*bh
,
2637 struct f2fs_super_block
*super
)
2641 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
2642 set_buffer_dirty(bh
);
2645 /* it's rare case, we can do fua all the time */
2646 return __sync_dirty_buffer(bh
, REQ_SYNC
| REQ_PREFLUSH
| REQ_FUA
);
2649 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
2650 struct buffer_head
*bh
)
2652 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
2653 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
2654 struct super_block
*sb
= sbi
->sb
;
2655 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
2656 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
2657 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
2658 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
2659 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
2660 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
2661 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
2662 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
2663 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
2664 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
2665 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
2666 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
2667 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2668 u64 main_end_blkaddr
= main_blkaddr
+
2669 (segment_count_main
<< log_blocks_per_seg
);
2670 u64 seg_end_blkaddr
= segment0_blkaddr
+
2671 (segment_count
<< log_blocks_per_seg
);
2673 if (segment0_blkaddr
!= cp_blkaddr
) {
2674 f2fs_info(sbi
, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2675 segment0_blkaddr
, cp_blkaddr
);
2679 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
2681 f2fs_info(sbi
, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2682 cp_blkaddr
, sit_blkaddr
,
2683 segment_count_ckpt
<< log_blocks_per_seg
);
2687 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
2689 f2fs_info(sbi
, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2690 sit_blkaddr
, nat_blkaddr
,
2691 segment_count_sit
<< log_blocks_per_seg
);
2695 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
2697 f2fs_info(sbi
, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2698 nat_blkaddr
, ssa_blkaddr
,
2699 segment_count_nat
<< log_blocks_per_seg
);
2703 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
2705 f2fs_info(sbi
, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2706 ssa_blkaddr
, main_blkaddr
,
2707 segment_count_ssa
<< log_blocks_per_seg
);
2711 if (main_end_blkaddr
> seg_end_blkaddr
) {
2712 f2fs_info(sbi
, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
2713 main_blkaddr
, seg_end_blkaddr
,
2714 segment_count_main
<< log_blocks_per_seg
);
2716 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
2720 /* fix in-memory information all the time */
2721 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
2722 segment0_blkaddr
) >> log_blocks_per_seg
);
2724 if (f2fs_readonly(sb
) || bdev_read_only(sb
->s_bdev
)) {
2725 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
2728 err
= __f2fs_commit_super(bh
, NULL
);
2729 res
= err
? "failed" : "done";
2731 f2fs_info(sbi
, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
2732 res
, main_blkaddr
, seg_end_blkaddr
,
2733 segment_count_main
<< log_blocks_per_seg
);
2740 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
2741 struct buffer_head
*bh
)
2743 block_t segment_count
, segs_per_sec
, secs_per_zone
, segment_count_main
;
2744 block_t total_sections
, blocks_per_seg
;
2745 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
2746 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
2747 size_t crc_offset
= 0;
2750 if (le32_to_cpu(raw_super
->magic
) != F2FS_SUPER_MAGIC
) {
2751 f2fs_info(sbi
, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2752 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
2756 /* Check checksum_offset and crc in superblock */
2757 if (__F2FS_HAS_FEATURE(raw_super
, F2FS_FEATURE_SB_CHKSUM
)) {
2758 crc_offset
= le32_to_cpu(raw_super
->checksum_offset
);
2760 offsetof(struct f2fs_super_block
, crc
)) {
2761 f2fs_info(sbi
, "Invalid SB checksum offset: %zu",
2763 return -EFSCORRUPTED
;
2765 crc
= le32_to_cpu(raw_super
->crc
);
2766 if (!f2fs_crc_valid(sbi
, crc
, raw_super
, crc_offset
)) {
2767 f2fs_info(sbi
, "Invalid SB checksum value: %u", crc
);
2768 return -EFSCORRUPTED
;
2772 /* Currently, support only 4KB page cache size */
2773 if (F2FS_BLKSIZE
!= PAGE_SIZE
) {
2774 f2fs_info(sbi
, "Invalid page_cache_size (%lu), supports only 4KB",
2776 return -EFSCORRUPTED
;
2779 /* Currently, support only 4KB block size */
2780 if (le32_to_cpu(raw_super
->log_blocksize
) != F2FS_BLKSIZE_BITS
) {
2781 f2fs_info(sbi
, "Invalid log_blocksize (%u), supports only %u",
2782 le32_to_cpu(raw_super
->log_blocksize
),
2784 return -EFSCORRUPTED
;
2787 /* check log blocks per segment */
2788 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
2789 f2fs_info(sbi
, "Invalid log blocks per segment (%u)",
2790 le32_to_cpu(raw_super
->log_blocks_per_seg
));
2791 return -EFSCORRUPTED
;
2794 /* Currently, support 512/1024/2048/4096 bytes sector size */
2795 if (le32_to_cpu(raw_super
->log_sectorsize
) >
2796 F2FS_MAX_LOG_SECTOR_SIZE
||
2797 le32_to_cpu(raw_super
->log_sectorsize
) <
2798 F2FS_MIN_LOG_SECTOR_SIZE
) {
2799 f2fs_info(sbi
, "Invalid log sectorsize (%u)",
2800 le32_to_cpu(raw_super
->log_sectorsize
));
2801 return -EFSCORRUPTED
;
2803 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
2804 le32_to_cpu(raw_super
->log_sectorsize
) !=
2805 F2FS_MAX_LOG_SECTOR_SIZE
) {
2806 f2fs_info(sbi
, "Invalid log sectors per block(%u) log sectorsize(%u)",
2807 le32_to_cpu(raw_super
->log_sectors_per_block
),
2808 le32_to_cpu(raw_super
->log_sectorsize
));
2809 return -EFSCORRUPTED
;
2812 segment_count
= le32_to_cpu(raw_super
->segment_count
);
2813 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
2814 segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
2815 secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
2816 total_sections
= le32_to_cpu(raw_super
->section_count
);
2818 /* blocks_per_seg should be 512, given the above check */
2819 blocks_per_seg
= 1 << le32_to_cpu(raw_super
->log_blocks_per_seg
);
2821 if (segment_count
> F2FS_MAX_SEGMENT
||
2822 segment_count
< F2FS_MIN_SEGMENTS
) {
2823 f2fs_info(sbi
, "Invalid segment count (%u)", segment_count
);
2824 return -EFSCORRUPTED
;
2827 if (total_sections
> segment_count_main
|| total_sections
< 1 ||
2828 segs_per_sec
> segment_count
|| !segs_per_sec
) {
2829 f2fs_info(sbi
, "Invalid segment/section count (%u, %u x %u)",
2830 segment_count
, total_sections
, segs_per_sec
);
2831 return -EFSCORRUPTED
;
2834 if (segment_count_main
!= total_sections
* segs_per_sec
) {
2835 f2fs_info(sbi
, "Invalid segment/section count (%u != %u * %u)",
2836 segment_count_main
, total_sections
, segs_per_sec
);
2837 return -EFSCORRUPTED
;
2840 if ((segment_count
/ segs_per_sec
) < total_sections
) {
2841 f2fs_info(sbi
, "Small segment_count (%u < %u * %u)",
2842 segment_count
, segs_per_sec
, total_sections
);
2843 return -EFSCORRUPTED
;
2846 if (segment_count
> (le64_to_cpu(raw_super
->block_count
) >> 9)) {
2847 f2fs_info(sbi
, "Wrong segment_count / block_count (%u > %llu)",
2848 segment_count
, le64_to_cpu(raw_super
->block_count
));
2849 return -EFSCORRUPTED
;
2852 if (RDEV(0).path
[0]) {
2853 block_t dev_seg_count
= le32_to_cpu(RDEV(0).total_segments
);
2856 while (i
< MAX_DEVICES
&& RDEV(i
).path
[0]) {
2857 dev_seg_count
+= le32_to_cpu(RDEV(i
).total_segments
);
2860 if (segment_count
!= dev_seg_count
) {
2861 f2fs_info(sbi
, "Segment count (%u) mismatch with total segments from devices (%u)",
2862 segment_count
, dev_seg_count
);
2863 return -EFSCORRUPTED
;
2866 if (__F2FS_HAS_FEATURE(raw_super
, F2FS_FEATURE_BLKZONED
) &&
2867 !bdev_is_zoned(sbi
->sb
->s_bdev
)) {
2868 f2fs_info(sbi
, "Zoned block device path is missing");
2869 return -EFSCORRUPTED
;
2873 if (secs_per_zone
> total_sections
|| !secs_per_zone
) {
2874 f2fs_info(sbi
, "Wrong secs_per_zone / total_sections (%u, %u)",
2875 secs_per_zone
, total_sections
);
2876 return -EFSCORRUPTED
;
2878 if (le32_to_cpu(raw_super
->extension_count
) > F2FS_MAX_EXTENSION
||
2879 raw_super
->hot_ext_count
> F2FS_MAX_EXTENSION
||
2880 (le32_to_cpu(raw_super
->extension_count
) +
2881 raw_super
->hot_ext_count
) > F2FS_MAX_EXTENSION
) {
2882 f2fs_info(sbi
, "Corrupted extension count (%u + %u > %u)",
2883 le32_to_cpu(raw_super
->extension_count
),
2884 raw_super
->hot_ext_count
,
2885 F2FS_MAX_EXTENSION
);
2886 return -EFSCORRUPTED
;
2889 if (le32_to_cpu(raw_super
->cp_payload
) >
2890 (blocks_per_seg
- F2FS_CP_PACKS
)) {
2891 f2fs_info(sbi
, "Insane cp_payload (%u > %u)",
2892 le32_to_cpu(raw_super
->cp_payload
),
2893 blocks_per_seg
- F2FS_CP_PACKS
);
2894 return -EFSCORRUPTED
;
2897 /* check reserved ino info */
2898 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
2899 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
2900 le32_to_cpu(raw_super
->root_ino
) != 3) {
2901 f2fs_info(sbi
, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2902 le32_to_cpu(raw_super
->node_ino
),
2903 le32_to_cpu(raw_super
->meta_ino
),
2904 le32_to_cpu(raw_super
->root_ino
));
2905 return -EFSCORRUPTED
;
2908 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2909 if (sanity_check_area_boundary(sbi
, bh
))
2910 return -EFSCORRUPTED
;
2915 int f2fs_sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
2917 unsigned int total
, fsmeta
;
2918 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
2919 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
2920 unsigned int ovp_segments
, reserved_segments
;
2921 unsigned int main_segs
, blocks_per_seg
;
2922 unsigned int sit_segs
, nat_segs
;
2923 unsigned int sit_bitmap_size
, nat_bitmap_size
;
2924 unsigned int log_blocks_per_seg
;
2925 unsigned int segment_count_main
;
2926 unsigned int cp_pack_start_sum
, cp_payload
;
2927 block_t user_block_count
, valid_user_blocks
;
2928 block_t avail_node_count
, valid_node_count
;
2931 total
= le32_to_cpu(raw_super
->segment_count
);
2932 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
2933 sit_segs
= le32_to_cpu(raw_super
->segment_count_sit
);
2935 nat_segs
= le32_to_cpu(raw_super
->segment_count_nat
);
2937 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
2938 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
2940 if (unlikely(fsmeta
>= total
))
2943 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
2944 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
2946 if (unlikely(fsmeta
< F2FS_MIN_META_SEGMENTS
||
2947 ovp_segments
== 0 || reserved_segments
== 0)) {
2948 f2fs_err(sbi
, "Wrong layout: check mkfs.f2fs version");
2952 user_block_count
= le64_to_cpu(ckpt
->user_block_count
);
2953 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
2954 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2955 if (!user_block_count
|| user_block_count
>=
2956 segment_count_main
<< log_blocks_per_seg
) {
2957 f2fs_err(sbi
, "Wrong user_block_count: %u",
2962 valid_user_blocks
= le64_to_cpu(ckpt
->valid_block_count
);
2963 if (valid_user_blocks
> user_block_count
) {
2964 f2fs_err(sbi
, "Wrong valid_user_blocks: %u, user_block_count: %u",
2965 valid_user_blocks
, user_block_count
);
2969 valid_node_count
= le32_to_cpu(ckpt
->valid_node_count
);
2970 avail_node_count
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
2971 if (valid_node_count
> avail_node_count
) {
2972 f2fs_err(sbi
, "Wrong valid_node_count: %u, avail_node_count: %u",
2973 valid_node_count
, avail_node_count
);
2977 main_segs
= le32_to_cpu(raw_super
->segment_count_main
);
2978 blocks_per_seg
= sbi
->blocks_per_seg
;
2980 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
2981 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) >= main_segs
||
2982 le16_to_cpu(ckpt
->cur_node_blkoff
[i
]) >= blocks_per_seg
)
2984 for (j
= i
+ 1; j
< NR_CURSEG_NODE_TYPE
; j
++) {
2985 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
2986 le32_to_cpu(ckpt
->cur_node_segno
[j
])) {
2987 f2fs_err(sbi
, "Node segment (%u, %u) has the same segno: %u",
2989 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
2994 for (i
= 0; i
< NR_CURSEG_DATA_TYPE
; i
++) {
2995 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) >= main_segs
||
2996 le16_to_cpu(ckpt
->cur_data_blkoff
[i
]) >= blocks_per_seg
)
2998 for (j
= i
+ 1; j
< NR_CURSEG_DATA_TYPE
; j
++) {
2999 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) ==
3000 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
3001 f2fs_err(sbi
, "Data segment (%u, %u) has the same segno: %u",
3003 le32_to_cpu(ckpt
->cur_data_segno
[i
]));
3008 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
3009 for (j
= 0; j
< NR_CURSEG_DATA_TYPE
; j
++) {
3010 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
3011 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
3012 f2fs_err(sbi
, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3014 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
3020 sit_bitmap_size
= le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
);
3021 nat_bitmap_size
= le32_to_cpu(ckpt
->nat_ver_bitmap_bytesize
);
3023 if (sit_bitmap_size
!= ((sit_segs
/ 2) << log_blocks_per_seg
) / 8 ||
3024 nat_bitmap_size
!= ((nat_segs
/ 2) << log_blocks_per_seg
) / 8) {
3025 f2fs_err(sbi
, "Wrong bitmap size: sit: %u, nat:%u",
3026 sit_bitmap_size
, nat_bitmap_size
);
3030 cp_pack_start_sum
= __start_sum_addr(sbi
);
3031 cp_payload
= __cp_payload(sbi
);
3032 if (cp_pack_start_sum
< cp_payload
+ 1 ||
3033 cp_pack_start_sum
> blocks_per_seg
- 1 -
3034 NR_CURSEG_PERSIST_TYPE
) {
3035 f2fs_err(sbi
, "Wrong cp_pack_start_sum: %u",
3040 if (__is_set_ckpt_flags(ckpt
, CP_LARGE_NAT_BITMAP_FLAG
) &&
3041 le32_to_cpu(ckpt
->checksum_offset
) != CP_MIN_CHKSUM_OFFSET
) {
3042 f2fs_warn(sbi
, "using deprecated layout of large_nat_bitmap, "
3043 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3044 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3045 le32_to_cpu(ckpt
->checksum_offset
));
3049 if (unlikely(f2fs_cp_error(sbi
))) {
3050 f2fs_err(sbi
, "A bug case: need to run fsck");
3056 static void init_sb_info(struct f2fs_sb_info
*sbi
)
3058 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
3061 sbi
->log_sectors_per_block
=
3062 le32_to_cpu(raw_super
->log_sectors_per_block
);
3063 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
3064 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
3065 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
3066 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
3067 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
3068 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
3069 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
3070 sbi
->total_node_count
=
3071 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
3072 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
3073 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
3074 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
3075 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
3076 sbi
->cur_victim_sec
= NULL_SECNO
;
3077 sbi
->next_victim_seg
[BG_GC
] = NULL_SEGNO
;
3078 sbi
->next_victim_seg
[FG_GC
] = NULL_SEGNO
;
3079 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
3080 sbi
->migration_granularity
= sbi
->segs_per_sec
;
3082 sbi
->dir_level
= DEF_DIR_LEVEL
;
3083 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
3084 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
3085 sbi
->interval_time
[DISCARD_TIME
] = DEF_IDLE_INTERVAL
;
3086 sbi
->interval_time
[GC_TIME
] = DEF_IDLE_INTERVAL
;
3087 sbi
->interval_time
[DISABLE_TIME
] = DEF_DISABLE_INTERVAL
;
3088 sbi
->interval_time
[UMOUNT_DISCARD_TIMEOUT
] =
3089 DEF_UMOUNT_DISCARD_TIMEOUT
;
3090 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
3092 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
3093 atomic_set(&sbi
->nr_pages
[i
], 0);
3095 for (i
= 0; i
< META
; i
++)
3096 atomic_set(&sbi
->wb_sync_req
[i
], 0);
3098 INIT_LIST_HEAD(&sbi
->s_list
);
3099 mutex_init(&sbi
->umount_mutex
);
3100 init_rwsem(&sbi
->io_order_lock
);
3101 spin_lock_init(&sbi
->cp_lock
);
3103 sbi
->dirty_device
= 0;
3104 spin_lock_init(&sbi
->dev_lock
);
3106 init_rwsem(&sbi
->sb_lock
);
3107 init_rwsem(&sbi
->pin_sem
);
3110 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
3114 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
3118 err
= percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
3121 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
3126 #ifdef CONFIG_BLK_DEV_ZONED
3128 struct f2fs_report_zones_args
{
3129 struct f2fs_dev_info
*dev
;
3130 bool zone_cap_mismatch
;
3133 static int f2fs_report_zone_cb(struct blk_zone
*zone
, unsigned int idx
,
3136 struct f2fs_report_zones_args
*rz_args
= data
;
3138 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
3141 set_bit(idx
, rz_args
->dev
->blkz_seq
);
3142 rz_args
->dev
->zone_capacity_blocks
[idx
] = zone
->capacity
>>
3143 F2FS_LOG_SECTORS_PER_BLOCK
;
3144 if (zone
->len
!= zone
->capacity
&& !rz_args
->zone_cap_mismatch
)
3145 rz_args
->zone_cap_mismatch
= true;
3150 static int init_blkz_info(struct f2fs_sb_info
*sbi
, int devi
)
3152 struct block_device
*bdev
= FDEV(devi
).bdev
;
3153 sector_t nr_sectors
= bdev
->bd_part
->nr_sects
;
3154 struct f2fs_report_zones_args rep_zone_arg
;
3157 if (!f2fs_sb_has_blkzoned(sbi
))
3160 if (sbi
->blocks_per_blkz
&& sbi
->blocks_per_blkz
!=
3161 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
)))
3163 sbi
->blocks_per_blkz
= SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
));
3164 if (sbi
->log_blocks_per_blkz
&& sbi
->log_blocks_per_blkz
!=
3165 __ilog2_u32(sbi
->blocks_per_blkz
))
3167 sbi
->log_blocks_per_blkz
= __ilog2_u32(sbi
->blocks_per_blkz
);
3168 FDEV(devi
).nr_blkz
= SECTOR_TO_BLOCK(nr_sectors
) >>
3169 sbi
->log_blocks_per_blkz
;
3170 if (nr_sectors
& (bdev_zone_sectors(bdev
) - 1))
3171 FDEV(devi
).nr_blkz
++;
3173 FDEV(devi
).blkz_seq
= f2fs_kvzalloc(sbi
,
3174 BITS_TO_LONGS(FDEV(devi
).nr_blkz
)
3175 * sizeof(unsigned long),
3177 if (!FDEV(devi
).blkz_seq
)
3180 /* Get block zones type and zone-capacity */
3181 FDEV(devi
).zone_capacity_blocks
= f2fs_kzalloc(sbi
,
3182 FDEV(devi
).nr_blkz
* sizeof(block_t
),
3184 if (!FDEV(devi
).zone_capacity_blocks
)
3187 rep_zone_arg
.dev
= &FDEV(devi
);
3188 rep_zone_arg
.zone_cap_mismatch
= false;
3190 ret
= blkdev_report_zones(bdev
, 0, BLK_ALL_ZONES
, f2fs_report_zone_cb
,
3195 if (!rep_zone_arg
.zone_cap_mismatch
) {
3196 kfree(FDEV(devi
).zone_capacity_blocks
);
3197 FDEV(devi
).zone_capacity_blocks
= NULL
;
3205 * Read f2fs raw super block.
3206 * Because we have two copies of super block, so read both of them
3207 * to get the first valid one. If any one of them is broken, we pass
3208 * them recovery flag back to the caller.
3210 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
3211 struct f2fs_super_block
**raw_super
,
3212 int *valid_super_block
, int *recovery
)
3214 struct super_block
*sb
= sbi
->sb
;
3216 struct buffer_head
*bh
;
3217 struct f2fs_super_block
*super
;
3220 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
3224 for (block
= 0; block
< 2; block
++) {
3225 bh
= sb_bread(sb
, block
);
3227 f2fs_err(sbi
, "Unable to read %dth superblock",
3234 /* sanity checking of raw super */
3235 err
= sanity_check_raw_super(sbi
, bh
);
3237 f2fs_err(sbi
, "Can't find valid F2FS filesystem in %dth superblock",
3245 memcpy(super
, bh
->b_data
+ F2FS_SUPER_OFFSET
,
3247 *valid_super_block
= block
;
3253 /* No valid superblock */
3262 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
3264 struct buffer_head
*bh
;
3268 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
3269 bdev_read_only(sbi
->sb
->s_bdev
)) {
3270 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
3274 /* we should update superblock crc here */
3275 if (!recover
&& f2fs_sb_has_sb_chksum(sbi
)) {
3276 crc
= f2fs_crc32(sbi
, F2FS_RAW_SUPER(sbi
),
3277 offsetof(struct f2fs_super_block
, crc
));
3278 F2FS_RAW_SUPER(sbi
)->crc
= cpu_to_le32(crc
);
3281 /* write back-up superblock first */
3282 bh
= sb_bread(sbi
->sb
, sbi
->valid_super_block
? 0 : 1);
3285 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
3288 /* if we are in recovery path, skip writing valid superblock */
3292 /* write current valid superblock */
3293 bh
= sb_bread(sbi
->sb
, sbi
->valid_super_block
);
3296 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
3301 static int f2fs_scan_devices(struct f2fs_sb_info
*sbi
)
3303 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
3304 unsigned int max_devices
= MAX_DEVICES
;
3307 /* Initialize single device information */
3308 if (!RDEV(0).path
[0]) {
3309 if (!bdev_is_zoned(sbi
->sb
->s_bdev
))
3315 * Initialize multiple devices information, or single
3316 * zoned block device information.
3318 sbi
->devs
= f2fs_kzalloc(sbi
,
3319 array_size(max_devices
,
3320 sizeof(struct f2fs_dev_info
)),
3325 for (i
= 0; i
< max_devices
; i
++) {
3327 if (i
> 0 && !RDEV(i
).path
[0])
3330 if (max_devices
== 1) {
3331 /* Single zoned block device mount */
3333 blkdev_get_by_dev(sbi
->sb
->s_bdev
->bd_dev
,
3334 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
3336 /* Multi-device mount */
3337 memcpy(FDEV(i
).path
, RDEV(i
).path
, MAX_PATH_LEN
);
3338 FDEV(i
).total_segments
=
3339 le32_to_cpu(RDEV(i
).total_segments
);
3341 FDEV(i
).start_blk
= 0;
3342 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
3343 (FDEV(i
).total_segments
<<
3344 sbi
->log_blocks_per_seg
) - 1 +
3345 le32_to_cpu(raw_super
->segment0_blkaddr
);
3347 FDEV(i
).start_blk
= FDEV(i
- 1).end_blk
+ 1;
3348 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
3349 (FDEV(i
).total_segments
<<
3350 sbi
->log_blocks_per_seg
) - 1;
3352 FDEV(i
).bdev
= blkdev_get_by_path(FDEV(i
).path
,
3353 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
3355 if (IS_ERR(FDEV(i
).bdev
))
3356 return PTR_ERR(FDEV(i
).bdev
);
3358 /* to release errored devices */
3359 sbi
->s_ndevs
= i
+ 1;
3361 #ifdef CONFIG_BLK_DEV_ZONED
3362 if (bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HM
&&
3363 !f2fs_sb_has_blkzoned(sbi
)) {
3364 f2fs_err(sbi
, "Zoned block device feature not enabled\n");
3367 if (bdev_zoned_model(FDEV(i
).bdev
) != BLK_ZONED_NONE
) {
3368 if (init_blkz_info(sbi
, i
)) {
3369 f2fs_err(sbi
, "Failed to initialize F2FS blkzone information");
3372 if (max_devices
== 1)
3374 f2fs_info(sbi
, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3376 FDEV(i
).total_segments
,
3377 FDEV(i
).start_blk
, FDEV(i
).end_blk
,
3378 bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HA
?
3379 "Host-aware" : "Host-managed");
3383 f2fs_info(sbi
, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3385 FDEV(i
).total_segments
,
3386 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
3389 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi
));
3393 static int f2fs_setup_casefold(struct f2fs_sb_info
*sbi
)
3395 #ifdef CONFIG_UNICODE
3396 if (f2fs_sb_has_casefold(sbi
) && !sbi
->sb
->s_encoding
) {
3397 const struct f2fs_sb_encodings
*encoding_info
;
3398 struct unicode_map
*encoding
;
3399 __u16 encoding_flags
;
3401 if (f2fs_sb_has_encrypt(sbi
)) {
3403 "Can't mount with encoding and encryption");
3407 if (f2fs_sb_read_encoding(sbi
->raw_super
, &encoding_info
,
3410 "Encoding requested by superblock is unknown");
3414 encoding
= utf8_load(encoding_info
->version
);
3415 if (IS_ERR(encoding
)) {
3417 "can't mount with superblock charset: %s-%s "
3418 "not supported by the kernel. flags: 0x%x.",
3419 encoding_info
->name
, encoding_info
->version
,
3421 return PTR_ERR(encoding
);
3423 f2fs_info(sbi
, "Using encoding defined by superblock: "
3424 "%s-%s with flags 0x%hx", encoding_info
->name
,
3425 encoding_info
->version
?:"\b", encoding_flags
);
3427 sbi
->sb
->s_encoding
= encoding
;
3428 sbi
->sb
->s_encoding_flags
= encoding_flags
;
3429 sbi
->sb
->s_d_op
= &f2fs_dentry_ops
;
3432 if (f2fs_sb_has_casefold(sbi
)) {
3433 f2fs_err(sbi
, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3440 static void f2fs_tuning_parameters(struct f2fs_sb_info
*sbi
)
3442 struct f2fs_sm_info
*sm_i
= SM_I(sbi
);
3444 /* adjust parameters according to the volume size */
3445 if (sm_i
->main_segments
<= SMALL_VOLUME_SEGMENTS
) {
3446 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
3447 sm_i
->dcc_info
->discard_granularity
= 1;
3448 sm_i
->ipu_policy
= 1 << F2FS_IPU_FORCE
;
3451 sbi
->readdir_ra
= 1;
3454 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
3456 struct f2fs_sb_info
*sbi
;
3457 struct f2fs_super_block
*raw_super
;
3460 bool skip_recovery
= false, need_fsck
= false;
3461 char *options
= NULL
;
3462 int recovery
, i
, valid_super_block
;
3463 struct curseg_info
*seg_i
;
3469 valid_super_block
= -1;
3472 /* allocate memory for f2fs-specific super block info */
3473 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
3479 /* Load the checksum driver */
3480 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
3481 if (IS_ERR(sbi
->s_chksum_driver
)) {
3482 f2fs_err(sbi
, "Cannot load crc32 driver.");
3483 err
= PTR_ERR(sbi
->s_chksum_driver
);
3484 sbi
->s_chksum_driver
= NULL
;
3488 /* set a block size */
3489 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
3490 f2fs_err(sbi
, "unable to set blocksize");
3494 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
3499 sb
->s_fs_info
= sbi
;
3500 sbi
->raw_super
= raw_super
;
3502 /* precompute checksum seed for metadata */
3503 if (f2fs_sb_has_inode_chksum(sbi
))
3504 sbi
->s_chksum_seed
= f2fs_chksum(sbi
, ~0, raw_super
->uuid
,
3505 sizeof(raw_super
->uuid
));
3507 default_options(sbi
);
3508 /* parse mount options */
3509 options
= kstrdup((const char *)data
, GFP_KERNEL
);
3510 if (data
&& !options
) {
3515 err
= parse_options(sb
, options
, false);
3519 sbi
->max_file_blocks
= max_file_blocks();
3520 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
3521 le32_to_cpu(raw_super
->log_blocksize
);
3522 sb
->s_max_links
= F2FS_LINK_MAX
;
3524 err
= f2fs_setup_casefold(sbi
);
3529 sb
->dq_op
= &f2fs_quota_operations
;
3530 sb
->s_qcop
= &f2fs_quotactl_ops
;
3531 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
3533 if (f2fs_sb_has_quota_ino(sbi
)) {
3534 for (i
= 0; i
< MAXQUOTAS
; i
++) {
3535 if (f2fs_qf_ino(sbi
->sb
, i
))
3536 sbi
->nquota_files
++;
3541 sb
->s_op
= &f2fs_sops
;
3542 #ifdef CONFIG_FS_ENCRYPTION
3543 sb
->s_cop
= &f2fs_cryptops
;
3545 #ifdef CONFIG_FS_VERITY
3546 sb
->s_vop
= &f2fs_verityops
;
3548 sb
->s_xattr
= f2fs_xattr_handlers
;
3549 sb
->s_export_op
= &f2fs_export_ops
;
3550 sb
->s_magic
= F2FS_SUPER_MAGIC
;
3551 sb
->s_time_gran
= 1;
3552 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
3553 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
3554 memcpy(&sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
3555 sb
->s_iflags
|= SB_I_CGROUPWB
;
3557 /* init f2fs-specific super block info */
3558 sbi
->valid_super_block
= valid_super_block
;
3559 init_rwsem(&sbi
->gc_lock
);
3560 mutex_init(&sbi
->writepages
);
3561 mutex_init(&sbi
->cp_mutex
);
3562 init_rwsem(&sbi
->node_write
);
3563 init_rwsem(&sbi
->node_change
);
3565 /* disallow all the data/node/meta page writes */
3566 set_sbi_flag(sbi
, SBI_POR_DOING
);
3567 spin_lock_init(&sbi
->stat_lock
);
3569 /* init iostat info */
3570 spin_lock_init(&sbi
->iostat_lock
);
3571 sbi
->iostat_enable
= false;
3572 sbi
->iostat_period_ms
= DEFAULT_IOSTAT_PERIOD_MS
;
3574 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
3575 int n
= (i
== META
) ? 1: NR_TEMP_TYPE
;
3581 sizeof(struct f2fs_bio_info
)),
3583 if (!sbi
->write_io
[i
]) {
3588 for (j
= HOT
; j
< n
; j
++) {
3589 init_rwsem(&sbi
->write_io
[i
][j
].io_rwsem
);
3590 sbi
->write_io
[i
][j
].sbi
= sbi
;
3591 sbi
->write_io
[i
][j
].bio
= NULL
;
3592 spin_lock_init(&sbi
->write_io
[i
][j
].io_lock
);
3593 INIT_LIST_HEAD(&sbi
->write_io
[i
][j
].io_list
);
3594 INIT_LIST_HEAD(&sbi
->write_io
[i
][j
].bio_list
);
3595 init_rwsem(&sbi
->write_io
[i
][j
].bio_list_lock
);
3599 init_rwsem(&sbi
->cp_rwsem
);
3600 init_rwsem(&sbi
->quota_sem
);
3601 init_waitqueue_head(&sbi
->cp_wait
);
3604 err
= init_percpu_info(sbi
);
3608 if (F2FS_IO_ALIGNED(sbi
)) {
3609 sbi
->write_io_dummy
=
3610 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi
) - 1), 0);
3611 if (!sbi
->write_io_dummy
) {
3617 /* init per sbi slab cache */
3618 err
= f2fs_init_xattr_caches(sbi
);
3621 err
= f2fs_init_page_array_cache(sbi
);
3623 goto free_xattr_cache
;
3625 /* get an inode for meta space */
3626 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
3627 if (IS_ERR(sbi
->meta_inode
)) {
3628 f2fs_err(sbi
, "Failed to read F2FS meta data inode");
3629 err
= PTR_ERR(sbi
->meta_inode
);
3630 goto free_page_array_cache
;
3633 err
= f2fs_get_valid_checkpoint(sbi
);
3635 f2fs_err(sbi
, "Failed to get valid F2FS checkpoint");
3636 goto free_meta_inode
;
3639 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_QUOTA_NEED_FSCK_FLAG
))
3640 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
3641 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_DISABLED_QUICK_FLAG
)) {
3642 set_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
3643 sbi
->interval_time
[DISABLE_TIME
] = DEF_DISABLE_QUICK_INTERVAL
;
3646 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_FSCK_FLAG
))
3647 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
3649 /* Initialize device list */
3650 err
= f2fs_scan_devices(sbi
);
3652 f2fs_err(sbi
, "Failed to find devices");
3656 err
= f2fs_init_post_read_wq(sbi
);
3658 f2fs_err(sbi
, "Failed to initialize post read workqueue");
3662 sbi
->total_valid_node_count
=
3663 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
3664 percpu_counter_set(&sbi
->total_valid_inode_count
,
3665 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
3666 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
3667 sbi
->total_valid_block_count
=
3668 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
3669 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
3670 sbi
->reserved_blocks
= 0;
3671 sbi
->current_reserved_blocks
= 0;
3672 limit_reserve_root(sbi
);
3673 adjust_unusable_cap_perc(sbi
);
3675 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
3676 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
3677 spin_lock_init(&sbi
->inode_lock
[i
]);
3679 mutex_init(&sbi
->flush_lock
);
3681 f2fs_init_extent_cache_info(sbi
);
3683 f2fs_init_ino_entry_info(sbi
);
3685 f2fs_init_fsync_node_info(sbi
);
3687 /* setup f2fs internal modules */
3688 err
= f2fs_build_segment_manager(sbi
);
3690 f2fs_err(sbi
, "Failed to initialize F2FS segment manager (%d)",
3694 err
= f2fs_build_node_manager(sbi
);
3696 f2fs_err(sbi
, "Failed to initialize F2FS node manager (%d)",
3701 /* For write statistics */
3702 if (sb
->s_bdev
->bd_part
)
3703 sbi
->sectors_written_start
=
3704 (u64
)part_stat_read(sb
->s_bdev
->bd_part
,
3705 sectors
[STAT_WRITE
]);
3707 /* Read accumulated write IO statistics if exists */
3708 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
3709 if (__exist_node_summaries(sbi
))
3710 sbi
->kbytes_written
=
3711 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
3713 f2fs_build_gc_manager(sbi
);
3715 err
= f2fs_build_stats(sbi
);
3719 /* get an inode for node space */
3720 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
3721 if (IS_ERR(sbi
->node_inode
)) {
3722 f2fs_err(sbi
, "Failed to read node inode");
3723 err
= PTR_ERR(sbi
->node_inode
);
3727 /* read root inode and dentry */
3728 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
3730 f2fs_err(sbi
, "Failed to read root inode");
3731 err
= PTR_ERR(root
);
3732 goto free_node_inode
;
3734 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
||
3735 !root
->i_size
|| !root
->i_nlink
) {
3738 goto free_node_inode
;
3741 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
3744 goto free_node_inode
;
3747 err
= f2fs_register_sysfs(sbi
);
3749 goto free_root_inode
;
3752 /* Enable quota usage during mount */
3753 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sb
)) {
3754 err
= f2fs_enable_quotas(sb
);
3756 f2fs_err(sbi
, "Cannot turn on quotas: error %d", err
);
3759 /* if there are any orphan inodes, free them */
3760 err
= f2fs_recover_orphan_inodes(sbi
);
3764 if (unlikely(is_set_ckpt_flags(sbi
, CP_DISABLED_FLAG
)))
3765 goto reset_checkpoint
;
3767 /* recover fsynced data */
3768 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
) &&
3769 !test_opt(sbi
, NORECOVERY
)) {
3771 * mount should be failed, when device has readonly mode, and
3772 * previous checkpoint was not done by clean system shutdown.
3774 if (f2fs_hw_is_readonly(sbi
)) {
3775 if (!is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
3777 f2fs_err(sbi
, "Need to recover fsync data, but write access unavailable");
3780 f2fs_info(sbi
, "write access unavailable, skipping recovery");
3781 goto reset_checkpoint
;
3785 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
3788 goto reset_checkpoint
;
3790 err
= f2fs_recover_fsync_data(sbi
, false);
3793 skip_recovery
= true;
3795 f2fs_err(sbi
, "Cannot recover all fsync data errno=%d",
3800 err
= f2fs_recover_fsync_data(sbi
, true);
3802 if (!f2fs_readonly(sb
) && err
> 0) {
3804 f2fs_err(sbi
, "Need to recover fsync data");
3810 * If the f2fs is not readonly and fsync data recovery succeeds,
3811 * check zoned block devices' write pointer consistency.
3813 if (!err
&& !f2fs_readonly(sb
) && f2fs_sb_has_blkzoned(sbi
)) {
3814 err
= f2fs_check_write_pointer(sbi
);
3820 f2fs_init_inmem_curseg(sbi
);
3822 /* f2fs_recover_fsync_data() cleared this already */
3823 clear_sbi_flag(sbi
, SBI_POR_DOING
);
3825 if (test_opt(sbi
, DISABLE_CHECKPOINT
)) {
3826 err
= f2fs_disable_checkpoint(sbi
);
3828 goto sync_free_meta
;
3829 } else if (is_set_ckpt_flags(sbi
, CP_DISABLED_FLAG
)) {
3830 f2fs_enable_checkpoint(sbi
);
3834 * If filesystem is not mounted as read-only then
3835 * do start the gc_thread.
3837 if (F2FS_OPTION(sbi
).bggc_mode
!= BGGC_MODE_OFF
&& !f2fs_readonly(sb
)) {
3838 /* After POR, we can run background GC thread.*/
3839 err
= f2fs_start_gc_thread(sbi
);
3841 goto sync_free_meta
;
3845 /* recover broken superblock */
3847 err
= f2fs_commit_super(sbi
, true);
3848 f2fs_info(sbi
, "Try to recover %dth superblock, ret: %d",
3849 sbi
->valid_super_block
? 1 : 2, err
);
3852 f2fs_join_shrinker(sbi
);
3854 f2fs_tuning_parameters(sbi
);
3856 f2fs_notice(sbi
, "Mounted with checkpoint version = %llx",
3857 cur_cp_version(F2FS_CKPT(sbi
)));
3858 f2fs_update_time(sbi
, CP_TIME
);
3859 f2fs_update_time(sbi
, REQ_TIME
);
3860 clear_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
3864 /* safe to flush all the data */
3865 sync_filesystem(sbi
->sb
);
3870 f2fs_truncate_quota_inode_pages(sb
);
3871 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sb
))
3872 f2fs_quota_off_umount(sbi
->sb
);
3875 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
3876 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
3877 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
3878 * falls into an infinite loop in f2fs_sync_meta_pages().
3880 truncate_inode_pages_final(META_MAPPING(sbi
));
3881 /* evict some inodes being cached by GC */
3883 f2fs_unregister_sysfs(sbi
);
3888 f2fs_release_ino_entry(sbi
, true);
3889 truncate_inode_pages_final(NODE_MAPPING(sbi
));
3890 iput(sbi
->node_inode
);
3891 sbi
->node_inode
= NULL
;
3893 f2fs_destroy_stats(sbi
);
3895 f2fs_destroy_node_manager(sbi
);
3897 f2fs_destroy_segment_manager(sbi
);
3898 f2fs_destroy_post_read_wq(sbi
);
3900 destroy_device_list(sbi
);
3903 make_bad_inode(sbi
->meta_inode
);
3904 iput(sbi
->meta_inode
);
3905 sbi
->meta_inode
= NULL
;
3906 free_page_array_cache
:
3907 f2fs_destroy_page_array_cache(sbi
);
3909 f2fs_destroy_xattr_caches(sbi
);
3911 mempool_destroy(sbi
->write_io_dummy
);
3913 destroy_percpu_info(sbi
);
3915 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
3916 kvfree(sbi
->write_io
[i
]);
3918 #ifdef CONFIG_UNICODE
3919 utf8_unload(sb
->s_encoding
);
3920 sb
->s_encoding
= NULL
;
3924 for (i
= 0; i
< MAXQUOTAS
; i
++)
3925 kfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
3927 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi
).dummy_enc_policy
);
3932 if (sbi
->s_chksum_driver
)
3933 crypto_free_shash(sbi
->s_chksum_driver
);
3936 /* give only one another chance */
3937 if (retry_cnt
> 0 && skip_recovery
) {
3939 shrink_dcache_sb(sb
);
3945 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
3946 const char *dev_name
, void *data
)
3948 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
3951 static void kill_f2fs_super(struct super_block
*sb
)
3954 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
3956 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
3957 f2fs_stop_gc_thread(sbi
);
3958 f2fs_stop_discard_thread(sbi
);
3960 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
3961 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
3962 struct cp_control cpc
= {
3963 .reason
= CP_UMOUNT
,
3965 f2fs_write_checkpoint(sbi
, &cpc
);
3968 if (is_sbi_flag_set(sbi
, SBI_IS_RECOVERED
) && f2fs_readonly(sb
))
3969 sb
->s_flags
&= ~SB_RDONLY
;
3971 kill_block_super(sb
);
3974 static struct file_system_type f2fs_fs_type
= {
3975 .owner
= THIS_MODULE
,
3977 .mount
= f2fs_mount
,
3978 .kill_sb
= kill_f2fs_super
,
3979 .fs_flags
= FS_REQUIRES_DEV
,
3981 MODULE_ALIAS_FS("f2fs");
3983 static int __init
init_inodecache(void)
3985 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
3986 sizeof(struct f2fs_inode_info
), 0,
3987 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
3988 if (!f2fs_inode_cachep
)
3993 static void destroy_inodecache(void)
3996 * Make sure all delayed rcu free inodes are flushed before we
4000 kmem_cache_destroy(f2fs_inode_cachep
);
4003 static int __init
init_f2fs_fs(void)
4007 if (PAGE_SIZE
!= F2FS_BLKSIZE
) {
4008 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
4009 PAGE_SIZE
, F2FS_BLKSIZE
);
4013 f2fs_build_trace_ios();
4015 err
= init_inodecache();
4018 err
= f2fs_create_node_manager_caches();
4020 goto free_inodecache
;
4021 err
= f2fs_create_segment_manager_caches();
4023 goto free_node_manager_caches
;
4024 err
= f2fs_create_checkpoint_caches();
4026 goto free_segment_manager_caches
;
4027 err
= f2fs_create_extent_cache();
4029 goto free_checkpoint_caches
;
4030 err
= f2fs_create_garbage_collection_cache();
4032 goto free_extent_cache
;
4033 err
= f2fs_init_sysfs();
4035 goto free_garbage_collection_cache
;
4036 err
= register_shrinker(&f2fs_shrinker_info
);
4039 err
= register_filesystem(&f2fs_fs_type
);
4042 f2fs_create_root_stats();
4043 err
= f2fs_init_post_read_processing();
4045 goto free_root_stats
;
4046 err
= f2fs_init_bio_entry_cache();
4048 goto free_post_read
;
4049 err
= f2fs_init_bioset();
4051 goto free_bio_enrty_cache
;
4052 err
= f2fs_init_compress_mempool();
4055 err
= f2fs_init_compress_cache();
4057 goto free_compress_mempool
;
4059 free_compress_mempool
:
4060 f2fs_destroy_compress_mempool();
4062 f2fs_destroy_bioset();
4063 free_bio_enrty_cache
:
4064 f2fs_destroy_bio_entry_cache();
4066 f2fs_destroy_post_read_processing();
4068 f2fs_destroy_root_stats();
4069 unregister_filesystem(&f2fs_fs_type
);
4071 unregister_shrinker(&f2fs_shrinker_info
);
4074 free_garbage_collection_cache
:
4075 f2fs_destroy_garbage_collection_cache();
4077 f2fs_destroy_extent_cache();
4078 free_checkpoint_caches
:
4079 f2fs_destroy_checkpoint_caches();
4080 free_segment_manager_caches
:
4081 f2fs_destroy_segment_manager_caches();
4082 free_node_manager_caches
:
4083 f2fs_destroy_node_manager_caches();
4085 destroy_inodecache();
4090 static void __exit
exit_f2fs_fs(void)
4092 f2fs_destroy_compress_cache();
4093 f2fs_destroy_compress_mempool();
4094 f2fs_destroy_bioset();
4095 f2fs_destroy_bio_entry_cache();
4096 f2fs_destroy_post_read_processing();
4097 f2fs_destroy_root_stats();
4098 unregister_filesystem(&f2fs_fs_type
);
4099 unregister_shrinker(&f2fs_shrinker_info
);
4101 f2fs_destroy_garbage_collection_cache();
4102 f2fs_destroy_extent_cache();
4103 f2fs_destroy_checkpoint_caches();
4104 f2fs_destroy_segment_manager_caches();
4105 f2fs_destroy_node_manager_caches();
4106 destroy_inodecache();
4107 f2fs_destroy_trace_ios();
4110 module_init(init_f2fs_fs
)
4111 module_exit(exit_f2fs_fs
)
4113 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
4114 MODULE_DESCRIPTION("Flash Friendly File System");
4115 MODULE_LICENSE("GPL");