1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/module.h>
9 #include <linux/init.h>
11 #include <linux/statfs.h>
12 #include <linux/buffer_head.h>
13 #include <linux/backing-dev.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
38 static struct kmem_cache
*f2fs_inode_cachep
;
40 #ifdef CONFIG_F2FS_FAULT_INJECTION
42 const char *f2fs_fault_name
[FAULT_MAX
] = {
43 [FAULT_KMALLOC
] = "kmalloc",
44 [FAULT_KVMALLOC
] = "kvmalloc",
45 [FAULT_PAGE_ALLOC
] = "page alloc",
46 [FAULT_PAGE_GET
] = "page get",
47 [FAULT_ALLOC_BIO
] = "alloc bio",
48 [FAULT_ALLOC_NID
] = "alloc nid",
49 [FAULT_ORPHAN
] = "orphan",
50 [FAULT_BLOCK
] = "no more block",
51 [FAULT_DIR_DEPTH
] = "too big dir depth",
52 [FAULT_EVICT_INODE
] = "evict_inode fail",
53 [FAULT_TRUNCATE
] = "truncate fail",
54 [FAULT_READ_IO
] = "read IO error",
55 [FAULT_CHECKPOINT
] = "checkpoint error",
56 [FAULT_DISCARD
] = "discard error",
57 [FAULT_WRITE_IO
] = "write IO error",
60 void f2fs_build_fault_attr(struct f2fs_sb_info
*sbi
, unsigned int rate
,
63 struct f2fs_fault_info
*ffi
= &F2FS_OPTION(sbi
).fault_info
;
66 atomic_set(&ffi
->inject_ops
, 0);
67 ffi
->inject_rate
= rate
;
71 ffi
->inject_type
= type
;
74 memset(ffi
, 0, sizeof(struct f2fs_fault_info
));
78 /* f2fs-wide shrinker description */
79 static struct shrinker f2fs_shrinker_info
= {
80 .scan_objects
= f2fs_shrink_scan
,
81 .count_objects
= f2fs_shrink_count
,
82 .seeks
= DEFAULT_SEEKS
,
87 Opt_disable_roll_forward
,
98 Opt_disable_ext_identify
,
101 Opt_inline_xattr_size
,
139 Opt_test_dummy_encryption
,
140 Opt_checkpoint_disable
,
141 Opt_checkpoint_disable_cap
,
142 Opt_checkpoint_disable_cap_perc
,
143 Opt_checkpoint_enable
,
147 static match_table_t f2fs_tokens
= {
148 {Opt_gc_background
, "background_gc=%s"},
149 {Opt_disable_roll_forward
, "disable_roll_forward"},
150 {Opt_norecovery
, "norecovery"},
151 {Opt_discard
, "discard"},
152 {Opt_nodiscard
, "nodiscard"},
153 {Opt_noheap
, "no_heap"},
155 {Opt_user_xattr
, "user_xattr"},
156 {Opt_nouser_xattr
, "nouser_xattr"},
158 {Opt_noacl
, "noacl"},
159 {Opt_active_logs
, "active_logs=%u"},
160 {Opt_disable_ext_identify
, "disable_ext_identify"},
161 {Opt_inline_xattr
, "inline_xattr"},
162 {Opt_noinline_xattr
, "noinline_xattr"},
163 {Opt_inline_xattr_size
, "inline_xattr_size=%u"},
164 {Opt_inline_data
, "inline_data"},
165 {Opt_inline_dentry
, "inline_dentry"},
166 {Opt_noinline_dentry
, "noinline_dentry"},
167 {Opt_flush_merge
, "flush_merge"},
168 {Opt_noflush_merge
, "noflush_merge"},
169 {Opt_nobarrier
, "nobarrier"},
170 {Opt_fastboot
, "fastboot"},
171 {Opt_extent_cache
, "extent_cache"},
172 {Opt_noextent_cache
, "noextent_cache"},
173 {Opt_noinline_data
, "noinline_data"},
174 {Opt_data_flush
, "data_flush"},
175 {Opt_reserve_root
, "reserve_root=%u"},
176 {Opt_resgid
, "resgid=%u"},
177 {Opt_resuid
, "resuid=%u"},
178 {Opt_mode
, "mode=%s"},
179 {Opt_io_size_bits
, "io_bits=%u"},
180 {Opt_fault_injection
, "fault_injection=%u"},
181 {Opt_fault_type
, "fault_type=%u"},
182 {Opt_lazytime
, "lazytime"},
183 {Opt_nolazytime
, "nolazytime"},
184 {Opt_quota
, "quota"},
185 {Opt_noquota
, "noquota"},
186 {Opt_usrquota
, "usrquota"},
187 {Opt_grpquota
, "grpquota"},
188 {Opt_prjquota
, "prjquota"},
189 {Opt_usrjquota
, "usrjquota=%s"},
190 {Opt_grpjquota
, "grpjquota=%s"},
191 {Opt_prjjquota
, "prjjquota=%s"},
192 {Opt_offusrjquota
, "usrjquota="},
193 {Opt_offgrpjquota
, "grpjquota="},
194 {Opt_offprjjquota
, "prjjquota="},
195 {Opt_jqfmt_vfsold
, "jqfmt=vfsold"},
196 {Opt_jqfmt_vfsv0
, "jqfmt=vfsv0"},
197 {Opt_jqfmt_vfsv1
, "jqfmt=vfsv1"},
198 {Opt_whint
, "whint_mode=%s"},
199 {Opt_alloc
, "alloc_mode=%s"},
200 {Opt_fsync
, "fsync_mode=%s"},
201 {Opt_test_dummy_encryption
, "test_dummy_encryption"},
202 {Opt_checkpoint_disable
, "checkpoint=disable"},
203 {Opt_checkpoint_disable_cap
, "checkpoint=disable:%u"},
204 {Opt_checkpoint_disable_cap_perc
, "checkpoint=disable:%u%%"},
205 {Opt_checkpoint_enable
, "checkpoint=enable"},
209 void f2fs_printk(struct f2fs_sb_info
*sbi
, const char *fmt
, ...)
211 struct va_format vaf
;
217 level
= printk_get_level(fmt
);
218 vaf
.fmt
= printk_skip_level(fmt
);
220 printk("%c%cF2FS-fs (%s): %pV\n",
221 KERN_SOH_ASCII
, level
, sbi
->sb
->s_id
, &vaf
);
226 #ifdef CONFIG_UNICODE
227 static const struct f2fs_sb_encodings
{
231 } f2fs_sb_encoding_map
[] = {
232 {F2FS_ENC_UTF8_12_1
, "utf8", "12.1.0"},
235 static int f2fs_sb_read_encoding(const struct f2fs_super_block
*sb
,
236 const struct f2fs_sb_encodings
**encoding
,
239 __u16 magic
= le16_to_cpu(sb
->s_encoding
);
242 for (i
= 0; i
< ARRAY_SIZE(f2fs_sb_encoding_map
); i
++)
243 if (magic
== f2fs_sb_encoding_map
[i
].magic
)
246 if (i
>= ARRAY_SIZE(f2fs_sb_encoding_map
))
249 *encoding
= &f2fs_sb_encoding_map
[i
];
250 *flags
= le16_to_cpu(sb
->s_encoding_flags
);
256 static inline void limit_reserve_root(struct f2fs_sb_info
*sbi
)
258 block_t limit
= min((sbi
->user_block_count
<< 1) / 1000,
259 sbi
->user_block_count
- sbi
->reserved_blocks
);
262 if (test_opt(sbi
, RESERVE_ROOT
) &&
263 F2FS_OPTION(sbi
).root_reserved_blocks
> limit
) {
264 F2FS_OPTION(sbi
).root_reserved_blocks
= limit
;
265 f2fs_info(sbi
, "Reduce reserved blocks for root = %u",
266 F2FS_OPTION(sbi
).root_reserved_blocks
);
268 if (!test_opt(sbi
, RESERVE_ROOT
) &&
269 (!uid_eq(F2FS_OPTION(sbi
).s_resuid
,
270 make_kuid(&init_user_ns
, F2FS_DEF_RESUID
)) ||
271 !gid_eq(F2FS_OPTION(sbi
).s_resgid
,
272 make_kgid(&init_user_ns
, F2FS_DEF_RESGID
))))
273 f2fs_info(sbi
, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
274 from_kuid_munged(&init_user_ns
,
275 F2FS_OPTION(sbi
).s_resuid
),
276 from_kgid_munged(&init_user_ns
,
277 F2FS_OPTION(sbi
).s_resgid
));
280 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info
*sbi
)
282 if (!F2FS_OPTION(sbi
).unusable_cap_perc
)
285 if (F2FS_OPTION(sbi
).unusable_cap_perc
== 100)
286 F2FS_OPTION(sbi
).unusable_cap
= sbi
->user_block_count
;
288 F2FS_OPTION(sbi
).unusable_cap
= (sbi
->user_block_count
/ 100) *
289 F2FS_OPTION(sbi
).unusable_cap_perc
;
291 f2fs_info(sbi
, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
292 F2FS_OPTION(sbi
).unusable_cap
,
293 F2FS_OPTION(sbi
).unusable_cap_perc
);
296 static void init_once(void *foo
)
298 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
300 inode_init_once(&fi
->vfs_inode
);
304 static const char * const quotatypes
[] = INITQFNAMES
;
305 #define QTYPE2NAME(t) (quotatypes[t])
306 static int f2fs_set_qf_name(struct super_block
*sb
, int qtype
,
309 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
313 if (sb_any_quota_loaded(sb
) && !F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
314 f2fs_err(sbi
, "Cannot change journaled quota options when quota turned on");
317 if (f2fs_sb_has_quota_ino(sbi
)) {
318 f2fs_info(sbi
, "QUOTA feature is enabled, so ignore qf_name");
322 qname
= match_strdup(args
);
324 f2fs_err(sbi
, "Not enough memory for storing quotafile name");
327 if (F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
328 if (strcmp(F2FS_OPTION(sbi
).s_qf_names
[qtype
], qname
) == 0)
331 f2fs_err(sbi
, "%s quota file already specified",
335 if (strchr(qname
, '/')) {
336 f2fs_err(sbi
, "quotafile must be on filesystem root");
339 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = qname
;
347 static int f2fs_clear_qf_name(struct super_block
*sb
, int qtype
)
349 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
351 if (sb_any_quota_loaded(sb
) && F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
352 f2fs_err(sbi
, "Cannot change journaled quota options when quota turned on");
355 kvfree(F2FS_OPTION(sbi
).s_qf_names
[qtype
]);
356 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = NULL
;
360 static int f2fs_check_quota_options(struct f2fs_sb_info
*sbi
)
363 * We do the test below only for project quotas. 'usrquota' and
364 * 'grpquota' mount options are allowed even without quota feature
365 * to support legacy quotas in quota files.
367 if (test_opt(sbi
, PRJQUOTA
) && !f2fs_sb_has_project_quota(sbi
)) {
368 f2fs_err(sbi
, "Project quota feature not enabled. Cannot enable project quota enforcement.");
371 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
] ||
372 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
] ||
373 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]) {
374 if (test_opt(sbi
, USRQUOTA
) &&
375 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
376 clear_opt(sbi
, USRQUOTA
);
378 if (test_opt(sbi
, GRPQUOTA
) &&
379 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
380 clear_opt(sbi
, GRPQUOTA
);
382 if (test_opt(sbi
, PRJQUOTA
) &&
383 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
384 clear_opt(sbi
, PRJQUOTA
);
386 if (test_opt(sbi
, GRPQUOTA
) || test_opt(sbi
, USRQUOTA
) ||
387 test_opt(sbi
, PRJQUOTA
)) {
388 f2fs_err(sbi
, "old and new quota format mixing");
392 if (!F2FS_OPTION(sbi
).s_jquota_fmt
) {
393 f2fs_err(sbi
, "journaled quota format not specified");
398 if (f2fs_sb_has_quota_ino(sbi
) && F2FS_OPTION(sbi
).s_jquota_fmt
) {
399 f2fs_info(sbi
, "QUOTA feature is enabled, so ignore jquota_fmt");
400 F2FS_OPTION(sbi
).s_jquota_fmt
= 0;
406 static int parse_options(struct super_block
*sb
, char *options
)
408 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
409 substring_t args
[MAX_OPT_ARGS
];
421 while ((p
= strsep(&options
, ",")) != NULL
) {
426 * Initialize args struct so we know whether arg was
427 * found; some options take optional arguments.
429 args
[0].to
= args
[0].from
= NULL
;
430 token
= match_token(p
, f2fs_tokens
, args
);
433 case Opt_gc_background
:
434 name
= match_strdup(&args
[0]);
438 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
440 clear_opt(sbi
, FORCE_FG_GC
);
441 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
442 clear_opt(sbi
, BG_GC
);
443 clear_opt(sbi
, FORCE_FG_GC
);
444 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
446 set_opt(sbi
, FORCE_FG_GC
);
453 case Opt_disable_roll_forward
:
454 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
457 /* this option mounts f2fs with ro */
458 set_opt(sbi
, NORECOVERY
);
459 if (!f2fs_readonly(sb
))
463 set_opt(sbi
, DISCARD
);
466 if (f2fs_sb_has_blkzoned(sbi
)) {
467 f2fs_warn(sbi
, "discard is required for zoned block devices");
470 clear_opt(sbi
, DISCARD
);
473 set_opt(sbi
, NOHEAP
);
476 clear_opt(sbi
, NOHEAP
);
478 #ifdef CONFIG_F2FS_FS_XATTR
480 set_opt(sbi
, XATTR_USER
);
482 case Opt_nouser_xattr
:
483 clear_opt(sbi
, XATTR_USER
);
485 case Opt_inline_xattr
:
486 set_opt(sbi
, INLINE_XATTR
);
488 case Opt_noinline_xattr
:
489 clear_opt(sbi
, INLINE_XATTR
);
491 case Opt_inline_xattr_size
:
492 if (args
->from
&& match_int(args
, &arg
))
494 set_opt(sbi
, INLINE_XATTR_SIZE
);
495 F2FS_OPTION(sbi
).inline_xattr_size
= arg
;
499 f2fs_info(sbi
, "user_xattr options not supported");
501 case Opt_nouser_xattr
:
502 f2fs_info(sbi
, "nouser_xattr options not supported");
504 case Opt_inline_xattr
:
505 f2fs_info(sbi
, "inline_xattr options not supported");
507 case Opt_noinline_xattr
:
508 f2fs_info(sbi
, "noinline_xattr options not supported");
511 #ifdef CONFIG_F2FS_FS_POSIX_ACL
513 set_opt(sbi
, POSIX_ACL
);
516 clear_opt(sbi
, POSIX_ACL
);
520 f2fs_info(sbi
, "acl options not supported");
523 f2fs_info(sbi
, "noacl options not supported");
526 case Opt_active_logs
:
527 if (args
->from
&& match_int(args
, &arg
))
529 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
531 F2FS_OPTION(sbi
).active_logs
= arg
;
533 case Opt_disable_ext_identify
:
534 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
536 case Opt_inline_data
:
537 set_opt(sbi
, INLINE_DATA
);
539 case Opt_inline_dentry
:
540 set_opt(sbi
, INLINE_DENTRY
);
542 case Opt_noinline_dentry
:
543 clear_opt(sbi
, INLINE_DENTRY
);
545 case Opt_flush_merge
:
546 set_opt(sbi
, FLUSH_MERGE
);
548 case Opt_noflush_merge
:
549 clear_opt(sbi
, FLUSH_MERGE
);
552 set_opt(sbi
, NOBARRIER
);
555 set_opt(sbi
, FASTBOOT
);
557 case Opt_extent_cache
:
558 set_opt(sbi
, EXTENT_CACHE
);
560 case Opt_noextent_cache
:
561 clear_opt(sbi
, EXTENT_CACHE
);
563 case Opt_noinline_data
:
564 clear_opt(sbi
, INLINE_DATA
);
567 set_opt(sbi
, DATA_FLUSH
);
569 case Opt_reserve_root
:
570 if (args
->from
&& match_int(args
, &arg
))
572 if (test_opt(sbi
, RESERVE_ROOT
)) {
573 f2fs_info(sbi
, "Preserve previous reserve_root=%u",
574 F2FS_OPTION(sbi
).root_reserved_blocks
);
576 F2FS_OPTION(sbi
).root_reserved_blocks
= arg
;
577 set_opt(sbi
, RESERVE_ROOT
);
581 if (args
->from
&& match_int(args
, &arg
))
583 uid
= make_kuid(current_user_ns(), arg
);
584 if (!uid_valid(uid
)) {
585 f2fs_err(sbi
, "Invalid uid value %d", arg
);
588 F2FS_OPTION(sbi
).s_resuid
= uid
;
591 if (args
->from
&& match_int(args
, &arg
))
593 gid
= make_kgid(current_user_ns(), arg
);
594 if (!gid_valid(gid
)) {
595 f2fs_err(sbi
, "Invalid gid value %d", arg
);
598 F2FS_OPTION(sbi
).s_resgid
= gid
;
601 name
= match_strdup(&args
[0]);
605 if (strlen(name
) == 8 &&
606 !strncmp(name
, "adaptive", 8)) {
607 if (f2fs_sb_has_blkzoned(sbi
)) {
608 f2fs_warn(sbi
, "adaptive mode is not allowed with zoned block device feature");
612 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
613 } else if (strlen(name
) == 3 &&
614 !strncmp(name
, "lfs", 3)) {
615 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
622 case Opt_io_size_bits
:
623 if (args
->from
&& match_int(args
, &arg
))
625 if (arg
<= 0 || arg
> __ilog2_u32(BIO_MAX_PAGES
)) {
626 f2fs_warn(sbi
, "Not support %d, larger than %d",
627 1 << arg
, BIO_MAX_PAGES
);
630 F2FS_OPTION(sbi
).write_io_size_bits
= arg
;
632 #ifdef CONFIG_F2FS_FAULT_INJECTION
633 case Opt_fault_injection
:
634 if (args
->from
&& match_int(args
, &arg
))
636 f2fs_build_fault_attr(sbi
, arg
, F2FS_ALL_FAULT_TYPE
);
637 set_opt(sbi
, FAULT_INJECTION
);
641 if (args
->from
&& match_int(args
, &arg
))
643 f2fs_build_fault_attr(sbi
, 0, arg
);
644 set_opt(sbi
, FAULT_INJECTION
);
647 case Opt_fault_injection
:
648 f2fs_info(sbi
, "fault_injection options not supported");
652 f2fs_info(sbi
, "fault_type options not supported");
656 sb
->s_flags
|= SB_LAZYTIME
;
659 sb
->s_flags
&= ~SB_LAZYTIME
;
664 set_opt(sbi
, USRQUOTA
);
667 set_opt(sbi
, GRPQUOTA
);
670 set_opt(sbi
, PRJQUOTA
);
673 ret
= f2fs_set_qf_name(sb
, USRQUOTA
, &args
[0]);
678 ret
= f2fs_set_qf_name(sb
, GRPQUOTA
, &args
[0]);
683 ret
= f2fs_set_qf_name(sb
, PRJQUOTA
, &args
[0]);
687 case Opt_offusrjquota
:
688 ret
= f2fs_clear_qf_name(sb
, USRQUOTA
);
692 case Opt_offgrpjquota
:
693 ret
= f2fs_clear_qf_name(sb
, GRPQUOTA
);
697 case Opt_offprjjquota
:
698 ret
= f2fs_clear_qf_name(sb
, PRJQUOTA
);
702 case Opt_jqfmt_vfsold
:
703 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_OLD
;
705 case Opt_jqfmt_vfsv0
:
706 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V0
;
708 case Opt_jqfmt_vfsv1
:
709 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V1
;
712 clear_opt(sbi
, QUOTA
);
713 clear_opt(sbi
, USRQUOTA
);
714 clear_opt(sbi
, GRPQUOTA
);
715 clear_opt(sbi
, PRJQUOTA
);
725 case Opt_offusrjquota
:
726 case Opt_offgrpjquota
:
727 case Opt_offprjjquota
:
728 case Opt_jqfmt_vfsold
:
729 case Opt_jqfmt_vfsv0
:
730 case Opt_jqfmt_vfsv1
:
732 f2fs_info(sbi
, "quota operations not supported");
736 name
= match_strdup(&args
[0]);
739 if (strlen(name
) == 10 &&
740 !strncmp(name
, "user-based", 10)) {
741 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_USER
;
742 } else if (strlen(name
) == 3 &&
743 !strncmp(name
, "off", 3)) {
744 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
745 } else if (strlen(name
) == 8 &&
746 !strncmp(name
, "fs-based", 8)) {
747 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_FS
;
755 name
= match_strdup(&args
[0]);
759 if (strlen(name
) == 7 &&
760 !strncmp(name
, "default", 7)) {
761 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
762 } else if (strlen(name
) == 5 &&
763 !strncmp(name
, "reuse", 5)) {
764 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
772 name
= match_strdup(&args
[0]);
775 if (strlen(name
) == 5 &&
776 !strncmp(name
, "posix", 5)) {
777 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
778 } else if (strlen(name
) == 6 &&
779 !strncmp(name
, "strict", 6)) {
780 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_STRICT
;
781 } else if (strlen(name
) == 9 &&
782 !strncmp(name
, "nobarrier", 9)) {
783 F2FS_OPTION(sbi
).fsync_mode
=
784 FSYNC_MODE_NOBARRIER
;
791 case Opt_test_dummy_encryption
:
792 #ifdef CONFIG_FS_ENCRYPTION
793 if (!f2fs_sb_has_encrypt(sbi
)) {
794 f2fs_err(sbi
, "Encrypt feature is off");
798 F2FS_OPTION(sbi
).test_dummy_encryption
= true;
799 f2fs_info(sbi
, "Test dummy encryption mode enabled");
801 f2fs_info(sbi
, "Test dummy encryption mount option ignored");
804 case Opt_checkpoint_disable_cap_perc
:
805 if (args
->from
&& match_int(args
, &arg
))
807 if (arg
< 0 || arg
> 100)
809 F2FS_OPTION(sbi
).unusable_cap_perc
= arg
;
810 set_opt(sbi
, DISABLE_CHECKPOINT
);
812 case Opt_checkpoint_disable_cap
:
813 if (args
->from
&& match_int(args
, &arg
))
815 F2FS_OPTION(sbi
).unusable_cap
= arg
;
816 set_opt(sbi
, DISABLE_CHECKPOINT
);
818 case Opt_checkpoint_disable
:
819 set_opt(sbi
, DISABLE_CHECKPOINT
);
821 case Opt_checkpoint_enable
:
822 clear_opt(sbi
, DISABLE_CHECKPOINT
);
825 f2fs_err(sbi
, "Unrecognized mount option \"%s\" or missing value",
831 if (f2fs_check_quota_options(sbi
))
834 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sbi
->sb
)) {
835 f2fs_info(sbi
, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
838 if (f2fs_sb_has_project_quota(sbi
) && !f2fs_readonly(sbi
->sb
)) {
839 f2fs_err(sbi
, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
843 #ifndef CONFIG_UNICODE
844 if (f2fs_sb_has_casefold(sbi
)) {
846 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
851 if (F2FS_IO_SIZE_BITS(sbi
) && !test_opt(sbi
, LFS
)) {
852 f2fs_err(sbi
, "Should set mode=lfs with %uKB-sized IO",
853 F2FS_IO_SIZE_KB(sbi
));
857 if (test_opt(sbi
, INLINE_XATTR_SIZE
)) {
858 int min_size
, max_size
;
860 if (!f2fs_sb_has_extra_attr(sbi
) ||
861 !f2fs_sb_has_flexible_inline_xattr(sbi
)) {
862 f2fs_err(sbi
, "extra_attr or flexible_inline_xattr feature is off");
865 if (!test_opt(sbi
, INLINE_XATTR
)) {
866 f2fs_err(sbi
, "inline_xattr_size option should be set with inline_xattr option");
870 min_size
= sizeof(struct f2fs_xattr_header
) / sizeof(__le32
);
871 max_size
= MAX_INLINE_XATTR_SIZE
;
873 if (F2FS_OPTION(sbi
).inline_xattr_size
< min_size
||
874 F2FS_OPTION(sbi
).inline_xattr_size
> max_size
) {
875 f2fs_err(sbi
, "inline xattr size is out of range: %d ~ %d",
881 if (test_opt(sbi
, DISABLE_CHECKPOINT
) && test_opt(sbi
, LFS
)) {
882 f2fs_err(sbi
, "LFS not compatible with checkpoint=disable\n");
886 /* Not pass down write hints if the number of active logs is lesser
887 * than NR_CURSEG_TYPE.
889 if (F2FS_OPTION(sbi
).active_logs
!= NR_CURSEG_TYPE
)
890 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
894 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
896 struct f2fs_inode_info
*fi
;
898 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
902 init_once((void *) fi
);
904 /* Initialize f2fs-specific inode info */
905 atomic_set(&fi
->dirty_pages
, 0);
906 init_rwsem(&fi
->i_sem
);
907 INIT_LIST_HEAD(&fi
->dirty_list
);
908 INIT_LIST_HEAD(&fi
->gdirty_list
);
909 INIT_LIST_HEAD(&fi
->inmem_ilist
);
910 INIT_LIST_HEAD(&fi
->inmem_pages
);
911 mutex_init(&fi
->inmem_lock
);
912 init_rwsem(&fi
->i_gc_rwsem
[READ
]);
913 init_rwsem(&fi
->i_gc_rwsem
[WRITE
]);
914 init_rwsem(&fi
->i_mmap_sem
);
915 init_rwsem(&fi
->i_xattr_sem
);
917 /* Will be used by directory only */
918 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
920 return &fi
->vfs_inode
;
923 static int f2fs_drop_inode(struct inode
*inode
)
925 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
929 * during filesystem shutdown, if checkpoint is disabled,
930 * drop useless meta/node dirty pages.
932 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
933 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
934 inode
->i_ino
== F2FS_META_INO(sbi
)) {
935 trace_f2fs_drop_inode(inode
, 1);
941 * This is to avoid a deadlock condition like below.
942 * writeback_single_inode(inode)
943 * - f2fs_write_data_page
944 * - f2fs_gc -> iput -> evict
945 * - inode_wait_for_writeback(inode)
947 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
948 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
949 /* to avoid evict_inode call simultaneously */
950 atomic_inc(&inode
->i_count
);
951 spin_unlock(&inode
->i_lock
);
953 /* some remained atomic pages should discarded */
954 if (f2fs_is_atomic_file(inode
))
955 f2fs_drop_inmem_pages(inode
);
957 /* should remain fi->extent_tree for writepage */
958 f2fs_destroy_extent_node(inode
);
960 sb_start_intwrite(inode
->i_sb
);
961 f2fs_i_size_write(inode
, 0);
963 f2fs_submit_merged_write_cond(F2FS_I_SB(inode
),
964 inode
, NULL
, 0, DATA
);
965 truncate_inode_pages_final(inode
->i_mapping
);
967 if (F2FS_HAS_BLOCKS(inode
))
968 f2fs_truncate(inode
);
970 sb_end_intwrite(inode
->i_sb
);
972 spin_lock(&inode
->i_lock
);
973 atomic_dec(&inode
->i_count
);
975 trace_f2fs_drop_inode(inode
, 0);
978 ret
= generic_drop_inode(inode
);
980 ret
= fscrypt_drop_inode(inode
);
981 trace_f2fs_drop_inode(inode
, ret
);
985 int f2fs_inode_dirtied(struct inode
*inode
, bool sync
)
987 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
990 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
991 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
994 set_inode_flag(inode
, FI_DIRTY_INODE
);
995 stat_inc_dirty_inode(sbi
, DIRTY_META
);
997 if (sync
&& list_empty(&F2FS_I(inode
)->gdirty_list
)) {
998 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
999 &sbi
->inode_list
[DIRTY_META
]);
1000 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
1002 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1006 void f2fs_inode_synced(struct inode
*inode
)
1008 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1010 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
1011 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
1012 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1015 if (!list_empty(&F2FS_I(inode
)->gdirty_list
)) {
1016 list_del_init(&F2FS_I(inode
)->gdirty_list
);
1017 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
1019 clear_inode_flag(inode
, FI_DIRTY_INODE
);
1020 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
1021 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
1022 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1026 * f2fs_dirty_inode() is called from __mark_inode_dirty()
1028 * We should call set_dirty_inode to write the dirty inode through write_inode.
1030 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
1032 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1034 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
1035 inode
->i_ino
== F2FS_META_INO(sbi
))
1038 if (flags
== I_DIRTY_TIME
)
1041 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
1042 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
1044 f2fs_inode_dirtied(inode
, false);
1047 static void f2fs_free_inode(struct inode
*inode
)
1049 fscrypt_free_inode(inode
);
1050 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
1053 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
1055 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
1056 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
1059 static void destroy_device_list(struct f2fs_sb_info
*sbi
)
1063 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
1064 blkdev_put(FDEV(i
).bdev
, FMODE_EXCL
);
1065 #ifdef CONFIG_BLK_DEV_ZONED
1066 kvfree(FDEV(i
).blkz_seq
);
1072 static void f2fs_put_super(struct super_block
*sb
)
1074 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1078 /* unregister procfs/sysfs entries in advance to avoid race case */
1079 f2fs_unregister_sysfs(sbi
);
1081 f2fs_quota_off_umount(sb
);
1083 /* prevent remaining shrinker jobs */
1084 mutex_lock(&sbi
->umount_mutex
);
1087 * We don't need to do checkpoint when superblock is clean.
1088 * But, the previous checkpoint was not done by umount, it needs to do
1089 * clean checkpoint again.
1091 if ((is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
1092 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
))) {
1093 struct cp_control cpc
= {
1094 .reason
= CP_UMOUNT
,
1096 f2fs_write_checkpoint(sbi
, &cpc
);
1099 /* be sure to wait for any on-going discard commands */
1100 dropped
= f2fs_issue_discard_timeout(sbi
);
1102 if ((f2fs_hw_support_discard(sbi
) || f2fs_hw_should_discard(sbi
)) &&
1103 !sbi
->discard_blks
&& !dropped
) {
1104 struct cp_control cpc
= {
1105 .reason
= CP_UMOUNT
| CP_TRIMMED
,
1107 f2fs_write_checkpoint(sbi
, &cpc
);
1111 * normally superblock is clean, so we need to release this.
1112 * In addition, EIO will skip do checkpoint, we need this as well.
1114 f2fs_release_ino_entry(sbi
, true);
1116 f2fs_leave_shrinker(sbi
);
1117 mutex_unlock(&sbi
->umount_mutex
);
1119 /* our cp_error case, we can wait for any writeback page */
1120 f2fs_flush_merged_writes(sbi
);
1122 f2fs_wait_on_all_pages(sbi
, F2FS_WB_CP_DATA
);
1124 f2fs_bug_on(sbi
, sbi
->fsync_node_num
);
1126 iput(sbi
->node_inode
);
1127 sbi
->node_inode
= NULL
;
1129 iput(sbi
->meta_inode
);
1130 sbi
->meta_inode
= NULL
;
1133 * iput() can update stat information, if f2fs_write_checkpoint()
1134 * above failed with error.
1136 f2fs_destroy_stats(sbi
);
1138 /* destroy f2fs internal modules */
1139 f2fs_destroy_node_manager(sbi
);
1140 f2fs_destroy_segment_manager(sbi
);
1144 sb
->s_fs_info
= NULL
;
1145 if (sbi
->s_chksum_driver
)
1146 crypto_free_shash(sbi
->s_chksum_driver
);
1147 kvfree(sbi
->raw_super
);
1149 destroy_device_list(sbi
);
1150 mempool_destroy(sbi
->write_io_dummy
);
1152 for (i
= 0; i
< MAXQUOTAS
; i
++)
1153 kvfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
1155 destroy_percpu_info(sbi
);
1156 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
1157 kvfree(sbi
->write_io
[i
]);
1158 #ifdef CONFIG_UNICODE
1159 utf8_unload(sbi
->s_encoding
);
1164 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
1166 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1169 if (unlikely(f2fs_cp_error(sbi
)))
1171 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
1174 trace_f2fs_sync_fs(sb
, sync
);
1176 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1180 struct cp_control cpc
;
1182 cpc
.reason
= __get_cp_reason(sbi
);
1184 mutex_lock(&sbi
->gc_mutex
);
1185 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1186 mutex_unlock(&sbi
->gc_mutex
);
1188 f2fs_trace_ios(NULL
, 1);
1193 static int f2fs_freeze(struct super_block
*sb
)
1195 if (f2fs_readonly(sb
))
1198 /* IO error happened before */
1199 if (unlikely(f2fs_cp_error(F2FS_SB(sb
))))
1202 /* must be clean, since sync_filesystem() was already called */
1203 if (is_sbi_flag_set(F2FS_SB(sb
), SBI_IS_DIRTY
))
1208 static int f2fs_unfreeze(struct super_block
*sb
)
1214 static int f2fs_statfs_project(struct super_block
*sb
,
1215 kprojid_t projid
, struct kstatfs
*buf
)
1218 struct dquot
*dquot
;
1222 qid
= make_kqid_projid(projid
);
1223 dquot
= dqget(sb
, qid
);
1225 return PTR_ERR(dquot
);
1226 spin_lock(&dquot
->dq_dqb_lock
);
1228 limit
= min_not_zero(dquot
->dq_dqb
.dqb_bsoftlimit
,
1229 dquot
->dq_dqb
.dqb_bhardlimit
);
1231 limit
>>= sb
->s_blocksize_bits
;
1233 if (limit
&& buf
->f_blocks
> limit
) {
1234 curblock
= (dquot
->dq_dqb
.dqb_curspace
+
1235 dquot
->dq_dqb
.dqb_rsvspace
) >> sb
->s_blocksize_bits
;
1236 buf
->f_blocks
= limit
;
1237 buf
->f_bfree
= buf
->f_bavail
=
1238 (buf
->f_blocks
> curblock
) ?
1239 (buf
->f_blocks
- curblock
) : 0;
1242 limit
= min_not_zero(dquot
->dq_dqb
.dqb_isoftlimit
,
1243 dquot
->dq_dqb
.dqb_ihardlimit
);
1245 if (limit
&& buf
->f_files
> limit
) {
1246 buf
->f_files
= limit
;
1248 (buf
->f_files
> dquot
->dq_dqb
.dqb_curinodes
) ?
1249 (buf
->f_files
- dquot
->dq_dqb
.dqb_curinodes
) : 0;
1252 spin_unlock(&dquot
->dq_dqb_lock
);
1258 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1260 struct super_block
*sb
= dentry
->d_sb
;
1261 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1262 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
1263 block_t total_count
, user_block_count
, start_count
;
1264 u64 avail_node_count
;
1266 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
1267 user_block_count
= sbi
->user_block_count
;
1268 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
1269 buf
->f_type
= F2FS_SUPER_MAGIC
;
1270 buf
->f_bsize
= sbi
->blocksize
;
1272 buf
->f_blocks
= total_count
- start_count
;
1273 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) -
1274 sbi
->current_reserved_blocks
;
1276 spin_lock(&sbi
->stat_lock
);
1277 if (unlikely(buf
->f_bfree
<= sbi
->unusable_block_count
))
1280 buf
->f_bfree
-= sbi
->unusable_block_count
;
1281 spin_unlock(&sbi
->stat_lock
);
1283 if (buf
->f_bfree
> F2FS_OPTION(sbi
).root_reserved_blocks
)
1284 buf
->f_bavail
= buf
->f_bfree
-
1285 F2FS_OPTION(sbi
).root_reserved_blocks
;
1289 avail_node_count
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
1291 if (avail_node_count
> user_block_count
) {
1292 buf
->f_files
= user_block_count
;
1293 buf
->f_ffree
= buf
->f_bavail
;
1295 buf
->f_files
= avail_node_count
;
1296 buf
->f_ffree
= min(avail_node_count
- valid_node_count(sbi
),
1300 buf
->f_namelen
= F2FS_NAME_LEN
;
1301 buf
->f_fsid
.val
[0] = (u32
)id
;
1302 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
1305 if (is_inode_flag_set(dentry
->d_inode
, FI_PROJ_INHERIT
) &&
1306 sb_has_quota_limits_enabled(sb
, PRJQUOTA
)) {
1307 f2fs_statfs_project(sb
, F2FS_I(dentry
->d_inode
)->i_projid
, buf
);
1313 static inline void f2fs_show_quota_options(struct seq_file
*seq
,
1314 struct super_block
*sb
)
1317 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1319 if (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1322 switch (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1333 seq_printf(seq
, ",jqfmt=%s", fmtname
);
1336 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
1337 seq_show_option(seq
, "usrjquota",
1338 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
]);
1340 if (F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
1341 seq_show_option(seq
, "grpjquota",
1342 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
]);
1344 if (F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
1345 seq_show_option(seq
, "prjjquota",
1346 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]);
1350 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
1352 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
1354 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
1355 if (test_opt(sbi
, FORCE_FG_GC
))
1356 seq_printf(seq
, ",background_gc=%s", "sync");
1358 seq_printf(seq
, ",background_gc=%s", "on");
1360 seq_printf(seq
, ",background_gc=%s", "off");
1362 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
1363 seq_puts(seq
, ",disable_roll_forward");
1364 if (test_opt(sbi
, NORECOVERY
))
1365 seq_puts(seq
, ",norecovery");
1366 if (test_opt(sbi
, DISCARD
))
1367 seq_puts(seq
, ",discard");
1369 seq_puts(seq
, ",nodiscard");
1370 if (test_opt(sbi
, NOHEAP
))
1371 seq_puts(seq
, ",no_heap");
1373 seq_puts(seq
, ",heap");
1374 #ifdef CONFIG_F2FS_FS_XATTR
1375 if (test_opt(sbi
, XATTR_USER
))
1376 seq_puts(seq
, ",user_xattr");
1378 seq_puts(seq
, ",nouser_xattr");
1379 if (test_opt(sbi
, INLINE_XATTR
))
1380 seq_puts(seq
, ",inline_xattr");
1382 seq_puts(seq
, ",noinline_xattr");
1383 if (test_opt(sbi
, INLINE_XATTR_SIZE
))
1384 seq_printf(seq
, ",inline_xattr_size=%u",
1385 F2FS_OPTION(sbi
).inline_xattr_size
);
1387 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1388 if (test_opt(sbi
, POSIX_ACL
))
1389 seq_puts(seq
, ",acl");
1391 seq_puts(seq
, ",noacl");
1393 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
1394 seq_puts(seq
, ",disable_ext_identify");
1395 if (test_opt(sbi
, INLINE_DATA
))
1396 seq_puts(seq
, ",inline_data");
1398 seq_puts(seq
, ",noinline_data");
1399 if (test_opt(sbi
, INLINE_DENTRY
))
1400 seq_puts(seq
, ",inline_dentry");
1402 seq_puts(seq
, ",noinline_dentry");
1403 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
1404 seq_puts(seq
, ",flush_merge");
1405 if (test_opt(sbi
, NOBARRIER
))
1406 seq_puts(seq
, ",nobarrier");
1407 if (test_opt(sbi
, FASTBOOT
))
1408 seq_puts(seq
, ",fastboot");
1409 if (test_opt(sbi
, EXTENT_CACHE
))
1410 seq_puts(seq
, ",extent_cache");
1412 seq_puts(seq
, ",noextent_cache");
1413 if (test_opt(sbi
, DATA_FLUSH
))
1414 seq_puts(seq
, ",data_flush");
1416 seq_puts(seq
, ",mode=");
1417 if (test_opt(sbi
, ADAPTIVE
))
1418 seq_puts(seq
, "adaptive");
1419 else if (test_opt(sbi
, LFS
))
1420 seq_puts(seq
, "lfs");
1421 seq_printf(seq
, ",active_logs=%u", F2FS_OPTION(sbi
).active_logs
);
1422 if (test_opt(sbi
, RESERVE_ROOT
))
1423 seq_printf(seq
, ",reserve_root=%u,resuid=%u,resgid=%u",
1424 F2FS_OPTION(sbi
).root_reserved_blocks
,
1425 from_kuid_munged(&init_user_ns
,
1426 F2FS_OPTION(sbi
).s_resuid
),
1427 from_kgid_munged(&init_user_ns
,
1428 F2FS_OPTION(sbi
).s_resgid
));
1429 if (F2FS_IO_SIZE_BITS(sbi
))
1430 seq_printf(seq
, ",io_bits=%u",
1431 F2FS_OPTION(sbi
).write_io_size_bits
);
1432 #ifdef CONFIG_F2FS_FAULT_INJECTION
1433 if (test_opt(sbi
, FAULT_INJECTION
)) {
1434 seq_printf(seq
, ",fault_injection=%u",
1435 F2FS_OPTION(sbi
).fault_info
.inject_rate
);
1436 seq_printf(seq
, ",fault_type=%u",
1437 F2FS_OPTION(sbi
).fault_info
.inject_type
);
1441 if (test_opt(sbi
, QUOTA
))
1442 seq_puts(seq
, ",quota");
1443 if (test_opt(sbi
, USRQUOTA
))
1444 seq_puts(seq
, ",usrquota");
1445 if (test_opt(sbi
, GRPQUOTA
))
1446 seq_puts(seq
, ",grpquota");
1447 if (test_opt(sbi
, PRJQUOTA
))
1448 seq_puts(seq
, ",prjquota");
1450 f2fs_show_quota_options(seq
, sbi
->sb
);
1451 if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_USER
)
1452 seq_printf(seq
, ",whint_mode=%s", "user-based");
1453 else if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_FS
)
1454 seq_printf(seq
, ",whint_mode=%s", "fs-based");
1455 #ifdef CONFIG_FS_ENCRYPTION
1456 if (F2FS_OPTION(sbi
).test_dummy_encryption
)
1457 seq_puts(seq
, ",test_dummy_encryption");
1460 if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_DEFAULT
)
1461 seq_printf(seq
, ",alloc_mode=%s", "default");
1462 else if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_REUSE
)
1463 seq_printf(seq
, ",alloc_mode=%s", "reuse");
1465 if (test_opt(sbi
, DISABLE_CHECKPOINT
))
1466 seq_printf(seq
, ",checkpoint=disable:%u",
1467 F2FS_OPTION(sbi
).unusable_cap
);
1468 if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_POSIX
)
1469 seq_printf(seq
, ",fsync_mode=%s", "posix");
1470 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
)
1471 seq_printf(seq
, ",fsync_mode=%s", "strict");
1472 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_NOBARRIER
)
1473 seq_printf(seq
, ",fsync_mode=%s", "nobarrier");
1477 static void default_options(struct f2fs_sb_info
*sbi
)
1479 /* init some FS parameters */
1480 F2FS_OPTION(sbi
).active_logs
= NR_CURSEG_TYPE
;
1481 F2FS_OPTION(sbi
).inline_xattr_size
= DEFAULT_INLINE_XATTR_ADDRS
;
1482 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
1483 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
1484 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
1485 F2FS_OPTION(sbi
).test_dummy_encryption
= false;
1486 F2FS_OPTION(sbi
).s_resuid
= make_kuid(&init_user_ns
, F2FS_DEF_RESUID
);
1487 F2FS_OPTION(sbi
).s_resgid
= make_kgid(&init_user_ns
, F2FS_DEF_RESGID
);
1489 set_opt(sbi
, BG_GC
);
1490 set_opt(sbi
, INLINE_XATTR
);
1491 set_opt(sbi
, INLINE_DATA
);
1492 set_opt(sbi
, INLINE_DENTRY
);
1493 set_opt(sbi
, EXTENT_CACHE
);
1494 set_opt(sbi
, NOHEAP
);
1495 clear_opt(sbi
, DISABLE_CHECKPOINT
);
1496 F2FS_OPTION(sbi
).unusable_cap
= 0;
1497 sbi
->sb
->s_flags
|= SB_LAZYTIME
;
1498 set_opt(sbi
, FLUSH_MERGE
);
1499 set_opt(sbi
, DISCARD
);
1500 if (f2fs_sb_has_blkzoned(sbi
))
1501 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
1503 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
1505 #ifdef CONFIG_F2FS_FS_XATTR
1506 set_opt(sbi
, XATTR_USER
);
1508 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1509 set_opt(sbi
, POSIX_ACL
);
1512 f2fs_build_fault_attr(sbi
, 0, 0);
1516 static int f2fs_enable_quotas(struct super_block
*sb
);
1519 static int f2fs_disable_checkpoint(struct f2fs_sb_info
*sbi
)
1521 unsigned int s_flags
= sbi
->sb
->s_flags
;
1522 struct cp_control cpc
;
1527 if (s_flags
& SB_RDONLY
) {
1528 f2fs_err(sbi
, "checkpoint=disable on readonly fs");
1531 sbi
->sb
->s_flags
|= SB_ACTIVE
;
1533 f2fs_update_time(sbi
, DISABLE_TIME
);
1535 while (!f2fs_time_over(sbi
, DISABLE_TIME
)) {
1536 mutex_lock(&sbi
->gc_mutex
);
1537 err
= f2fs_gc(sbi
, true, false, NULL_SEGNO
);
1538 if (err
== -ENODATA
) {
1542 if (err
&& err
!= -EAGAIN
)
1546 ret
= sync_filesystem(sbi
->sb
);
1548 err
= ret
? ret
: err
;
1552 unusable
= f2fs_get_unusable_blocks(sbi
);
1553 if (f2fs_disable_cp_again(sbi
, unusable
)) {
1558 mutex_lock(&sbi
->gc_mutex
);
1559 cpc
.reason
= CP_PAUSE
;
1560 set_sbi_flag(sbi
, SBI_CP_DISABLED
);
1561 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1565 spin_lock(&sbi
->stat_lock
);
1566 sbi
->unusable_block_count
= unusable
;
1567 spin_unlock(&sbi
->stat_lock
);
1570 mutex_unlock(&sbi
->gc_mutex
);
1572 sbi
->sb
->s_flags
= s_flags
; /* Restore MS_RDONLY status */
1576 static void f2fs_enable_checkpoint(struct f2fs_sb_info
*sbi
)
1578 mutex_lock(&sbi
->gc_mutex
);
1579 f2fs_dirty_to_prefree(sbi
);
1581 clear_sbi_flag(sbi
, SBI_CP_DISABLED
);
1582 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1583 mutex_unlock(&sbi
->gc_mutex
);
1585 f2fs_sync_fs(sbi
->sb
, 1);
1588 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
1590 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1591 struct f2fs_mount_info org_mount_opt
;
1592 unsigned long old_sb_flags
;
1594 bool need_restart_gc
= false;
1595 bool need_stop_gc
= false;
1596 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
1597 bool disable_checkpoint
= test_opt(sbi
, DISABLE_CHECKPOINT
);
1598 bool no_io_align
= !F2FS_IO_ALIGNED(sbi
);
1599 bool checkpoint_changed
;
1605 * Save the old mount options in case we
1606 * need to restore them.
1608 org_mount_opt
= sbi
->mount_opt
;
1609 old_sb_flags
= sb
->s_flags
;
1612 org_mount_opt
.s_jquota_fmt
= F2FS_OPTION(sbi
).s_jquota_fmt
;
1613 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1614 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
1615 org_mount_opt
.s_qf_names
[i
] =
1616 kstrdup(F2FS_OPTION(sbi
).s_qf_names
[i
],
1618 if (!org_mount_opt
.s_qf_names
[i
]) {
1619 for (j
= 0; j
< i
; j
++)
1620 kvfree(org_mount_opt
.s_qf_names
[j
]);
1624 org_mount_opt
.s_qf_names
[i
] = NULL
;
1629 /* recover superblocks we couldn't write due to previous RO mount */
1630 if (!(*flags
& SB_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
1631 err
= f2fs_commit_super(sbi
, false);
1632 f2fs_info(sbi
, "Try to recover all the superblocks, ret: %d",
1635 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1638 default_options(sbi
);
1640 /* parse mount options */
1641 err
= parse_options(sb
, data
);
1644 checkpoint_changed
=
1645 disable_checkpoint
!= test_opt(sbi
, DISABLE_CHECKPOINT
);
1648 * Previous and new state of filesystem is RO,
1649 * so skip checking GC and FLUSH_MERGE conditions.
1651 if (f2fs_readonly(sb
) && (*flags
& SB_RDONLY
))
1655 if (!f2fs_readonly(sb
) && (*flags
& SB_RDONLY
)) {
1656 err
= dquot_suspend(sb
, -1);
1659 } else if (f2fs_readonly(sb
) && !(*flags
& SB_RDONLY
)) {
1660 /* dquot_resume needs RW */
1661 sb
->s_flags
&= ~SB_RDONLY
;
1662 if (sb_any_quota_suspended(sb
)) {
1663 dquot_resume(sb
, -1);
1664 } else if (f2fs_sb_has_quota_ino(sbi
)) {
1665 err
= f2fs_enable_quotas(sb
);
1671 /* disallow enable/disable extent_cache dynamically */
1672 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
1674 f2fs_warn(sbi
, "switch extent_cache option is not allowed");
1678 if (no_io_align
== !!F2FS_IO_ALIGNED(sbi
)) {
1680 f2fs_warn(sbi
, "switch io_bits option is not allowed");
1684 if ((*flags
& SB_RDONLY
) && test_opt(sbi
, DISABLE_CHECKPOINT
)) {
1686 f2fs_warn(sbi
, "disabling checkpoint not compatible with read-only");
1691 * We stop the GC thread if FS is mounted as RO
1692 * or if background_gc = off is passed in mount
1693 * option. Also sync the filesystem.
1695 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
1696 if (sbi
->gc_thread
) {
1697 f2fs_stop_gc_thread(sbi
);
1698 need_restart_gc
= true;
1700 } else if (!sbi
->gc_thread
) {
1701 err
= f2fs_start_gc_thread(sbi
);
1704 need_stop_gc
= true;
1707 if (*flags
& SB_RDONLY
||
1708 F2FS_OPTION(sbi
).whint_mode
!= org_mount_opt
.whint_mode
) {
1709 writeback_inodes_sb(sb
, WB_REASON_SYNC
);
1712 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1713 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
1714 f2fs_sync_fs(sb
, 1);
1715 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
1718 if (checkpoint_changed
) {
1719 if (test_opt(sbi
, DISABLE_CHECKPOINT
)) {
1720 err
= f2fs_disable_checkpoint(sbi
);
1724 f2fs_enable_checkpoint(sbi
);
1729 * We stop issue flush thread if FS is mounted as RO
1730 * or if flush_merge is not passed in mount option.
1732 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
1733 clear_opt(sbi
, FLUSH_MERGE
);
1734 f2fs_destroy_flush_cmd_control(sbi
, false);
1736 err
= f2fs_create_flush_cmd_control(sbi
);
1742 /* Release old quota file names */
1743 for (i
= 0; i
< MAXQUOTAS
; i
++)
1744 kvfree(org_mount_opt
.s_qf_names
[i
]);
1746 /* Update the POSIXACL Flag */
1747 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
1748 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
1750 limit_reserve_root(sbi
);
1751 adjust_unusable_cap_perc(sbi
);
1752 *flags
= (*flags
& ~SB_LAZYTIME
) | (sb
->s_flags
& SB_LAZYTIME
);
1755 if (need_restart_gc
) {
1756 if (f2fs_start_gc_thread(sbi
))
1757 f2fs_warn(sbi
, "background gc thread has stopped");
1758 } else if (need_stop_gc
) {
1759 f2fs_stop_gc_thread(sbi
);
1763 F2FS_OPTION(sbi
).s_jquota_fmt
= org_mount_opt
.s_jquota_fmt
;
1764 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1765 kvfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
1766 F2FS_OPTION(sbi
).s_qf_names
[i
] = org_mount_opt
.s_qf_names
[i
];
1769 sbi
->mount_opt
= org_mount_opt
;
1770 sb
->s_flags
= old_sb_flags
;
1775 /* Read data from quotafile */
1776 static ssize_t
f2fs_quota_read(struct super_block
*sb
, int type
, char *data
,
1777 size_t len
, loff_t off
)
1779 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1780 struct address_space
*mapping
= inode
->i_mapping
;
1781 block_t blkidx
= F2FS_BYTES_TO_BLK(off
);
1782 int offset
= off
& (sb
->s_blocksize
- 1);
1785 loff_t i_size
= i_size_read(inode
);
1792 if (off
+ len
> i_size
)
1795 while (toread
> 0) {
1796 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
, toread
);
1798 page
= read_cache_page_gfp(mapping
, blkidx
, GFP_NOFS
);
1800 if (PTR_ERR(page
) == -ENOMEM
) {
1801 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1804 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
1805 return PTR_ERR(page
);
1810 if (unlikely(page
->mapping
!= mapping
)) {
1811 f2fs_put_page(page
, 1);
1814 if (unlikely(!PageUptodate(page
))) {
1815 f2fs_put_page(page
, 1);
1816 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
1820 kaddr
= kmap_atomic(page
);
1821 memcpy(data
, kaddr
+ offset
, tocopy
);
1822 kunmap_atomic(kaddr
);
1823 f2fs_put_page(page
, 1);
1833 /* Write to quotafile */
1834 static ssize_t
f2fs_quota_write(struct super_block
*sb
, int type
,
1835 const char *data
, size_t len
, loff_t off
)
1837 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1838 struct address_space
*mapping
= inode
->i_mapping
;
1839 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
1840 int offset
= off
& (sb
->s_blocksize
- 1);
1841 size_t towrite
= len
;
1843 void *fsdata
= NULL
;
1848 while (towrite
> 0) {
1849 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
,
1852 err
= a_ops
->write_begin(NULL
, mapping
, off
, tocopy
, 0,
1854 if (unlikely(err
)) {
1855 if (err
== -ENOMEM
) {
1856 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1859 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
1863 kaddr
= kmap_atomic(page
);
1864 memcpy(kaddr
+ offset
, data
, tocopy
);
1865 kunmap_atomic(kaddr
);
1866 flush_dcache_page(page
);
1868 a_ops
->write_end(NULL
, mapping
, off
, tocopy
, tocopy
,
1879 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1880 f2fs_mark_inode_dirty_sync(inode
, false);
1881 return len
- towrite
;
1884 static struct dquot
**f2fs_get_dquots(struct inode
*inode
)
1886 return F2FS_I(inode
)->i_dquot
;
1889 static qsize_t
*f2fs_get_reserved_space(struct inode
*inode
)
1891 return &F2FS_I(inode
)->i_reserved_quota
;
1894 static int f2fs_quota_on_mount(struct f2fs_sb_info
*sbi
, int type
)
1896 if (is_set_ckpt_flags(sbi
, CP_QUOTA_NEED_FSCK_FLAG
)) {
1897 f2fs_err(sbi
, "quota sysfile may be corrupted, skip loading it");
1901 return dquot_quota_on_mount(sbi
->sb
, F2FS_OPTION(sbi
).s_qf_names
[type
],
1902 F2FS_OPTION(sbi
).s_jquota_fmt
, type
);
1905 int f2fs_enable_quota_files(struct f2fs_sb_info
*sbi
, bool rdonly
)
1910 if (f2fs_sb_has_quota_ino(sbi
) && rdonly
) {
1911 err
= f2fs_enable_quotas(sbi
->sb
);
1913 f2fs_err(sbi
, "Cannot turn on quota_ino: %d", err
);
1919 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1920 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
1921 err
= f2fs_quota_on_mount(sbi
, i
);
1926 f2fs_err(sbi
, "Cannot turn on quotas: %d on %d",
1933 static int f2fs_quota_enable(struct super_block
*sb
, int type
, int format_id
,
1936 struct inode
*qf_inode
;
1937 unsigned long qf_inum
;
1940 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb
)));
1942 qf_inum
= f2fs_qf_ino(sb
, type
);
1946 qf_inode
= f2fs_iget(sb
, qf_inum
);
1947 if (IS_ERR(qf_inode
)) {
1948 f2fs_err(F2FS_SB(sb
), "Bad quota inode %u:%lu", type
, qf_inum
);
1949 return PTR_ERR(qf_inode
);
1952 /* Don't account quota for quota files to avoid recursion */
1953 qf_inode
->i_flags
|= S_NOQUOTA
;
1954 err
= dquot_enable(qf_inode
, type
, format_id
, flags
);
1959 static int f2fs_enable_quotas(struct super_block
*sb
)
1961 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1963 unsigned long qf_inum
;
1964 bool quota_mopt
[MAXQUOTAS
] = {
1965 test_opt(sbi
, USRQUOTA
),
1966 test_opt(sbi
, GRPQUOTA
),
1967 test_opt(sbi
, PRJQUOTA
),
1970 if (is_set_ckpt_flags(F2FS_SB(sb
), CP_QUOTA_NEED_FSCK_FLAG
)) {
1971 f2fs_err(sbi
, "quota file may be corrupted, skip loading it");
1975 sb_dqopt(sb
)->flags
|= DQUOT_QUOTA_SYS_FILE
;
1977 for (type
= 0; type
< MAXQUOTAS
; type
++) {
1978 qf_inum
= f2fs_qf_ino(sb
, type
);
1980 err
= f2fs_quota_enable(sb
, type
, QFMT_VFS_V1
,
1981 DQUOT_USAGE_ENABLED
|
1982 (quota_mopt
[type
] ? DQUOT_LIMITS_ENABLED
: 0));
1984 f2fs_err(sbi
, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
1986 for (type
--; type
>= 0; type
--)
1987 dquot_quota_off(sb
, type
);
1988 set_sbi_flag(F2FS_SB(sb
),
1989 SBI_QUOTA_NEED_REPAIR
);
1997 int f2fs_quota_sync(struct super_block
*sb
, int type
)
1999 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2000 struct quota_info
*dqopt
= sb_dqopt(sb
);
2007 * down_read(quota_sem)
2008 * dquot_writeback_dquots()
2011 * down_read(quota_sem)
2015 down_read(&sbi
->quota_sem
);
2016 ret
= dquot_writeback_dquots(sb
, type
);
2021 * Now when everything is written we can discard the pagecache so
2022 * that userspace sees the changes.
2024 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2025 struct address_space
*mapping
;
2027 if (type
!= -1 && cnt
!= type
)
2029 if (!sb_has_quota_active(sb
, cnt
))
2032 mapping
= dqopt
->files
[cnt
]->i_mapping
;
2034 ret
= filemap_fdatawrite(mapping
);
2038 /* if we are using journalled quota */
2039 if (is_journalled_quota(sbi
))
2042 ret
= filemap_fdatawait(mapping
);
2044 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2046 inode_lock(dqopt
->files
[cnt
]);
2047 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
2048 inode_unlock(dqopt
->files
[cnt
]);
2052 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2053 up_read(&sbi
->quota_sem
);
2054 f2fs_unlock_op(sbi
);
2058 static int f2fs_quota_on(struct super_block
*sb
, int type
, int format_id
,
2059 const struct path
*path
)
2061 struct inode
*inode
;
2064 /* if quota sysfile exists, deny enabling quota with specific file */
2065 if (f2fs_sb_has_quota_ino(F2FS_SB(sb
))) {
2066 f2fs_err(F2FS_SB(sb
), "quota sysfile already exists");
2070 err
= f2fs_quota_sync(sb
, type
);
2074 err
= dquot_quota_on(sb
, type
, format_id
, path
);
2078 inode
= d_inode(path
->dentry
);
2081 F2FS_I(inode
)->i_flags
|= F2FS_NOATIME_FL
| F2FS_IMMUTABLE_FL
;
2082 f2fs_set_inode_flags(inode
);
2083 inode_unlock(inode
);
2084 f2fs_mark_inode_dirty_sync(inode
, false);
2089 static int __f2fs_quota_off(struct super_block
*sb
, int type
)
2091 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
2094 if (!inode
|| !igrab(inode
))
2095 return dquot_quota_off(sb
, type
);
2097 err
= f2fs_quota_sync(sb
, type
);
2101 err
= dquot_quota_off(sb
, type
);
2102 if (err
|| f2fs_sb_has_quota_ino(F2FS_SB(sb
)))
2106 F2FS_I(inode
)->i_flags
&= ~(F2FS_NOATIME_FL
| F2FS_IMMUTABLE_FL
);
2107 f2fs_set_inode_flags(inode
);
2108 inode_unlock(inode
);
2109 f2fs_mark_inode_dirty_sync(inode
, false);
2115 static int f2fs_quota_off(struct super_block
*sb
, int type
)
2117 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2120 err
= __f2fs_quota_off(sb
, type
);
2123 * quotactl can shutdown journalled quota, result in inconsistence
2124 * between quota record and fs data by following updates, tag the
2125 * flag to let fsck be aware of it.
2127 if (is_journalled_quota(sbi
))
2128 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2132 void f2fs_quota_off_umount(struct super_block
*sb
)
2137 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2138 err
= __f2fs_quota_off(sb
, type
);
2140 int ret
= dquot_quota_off(sb
, type
);
2142 f2fs_err(F2FS_SB(sb
), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2144 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2148 * In case of checkpoint=disable, we must flush quota blocks.
2149 * This can cause NULL exception for node_inode in end_io, since
2150 * put_super already dropped it.
2152 sync_filesystem(sb
);
2155 static void f2fs_truncate_quota_inode_pages(struct super_block
*sb
)
2157 struct quota_info
*dqopt
= sb_dqopt(sb
);
2160 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2161 if (!dqopt
->files
[type
])
2163 f2fs_inode_synced(dqopt
->files
[type
]);
2167 static int f2fs_dquot_commit(struct dquot
*dquot
)
2169 struct f2fs_sb_info
*sbi
= F2FS_SB(dquot
->dq_sb
);
2172 down_read(&sbi
->quota_sem
);
2173 ret
= dquot_commit(dquot
);
2175 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2176 up_read(&sbi
->quota_sem
);
2180 static int f2fs_dquot_acquire(struct dquot
*dquot
)
2182 struct f2fs_sb_info
*sbi
= F2FS_SB(dquot
->dq_sb
);
2185 down_read(&sbi
->quota_sem
);
2186 ret
= dquot_acquire(dquot
);
2188 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2189 up_read(&sbi
->quota_sem
);
2193 static int f2fs_dquot_release(struct dquot
*dquot
)
2195 struct f2fs_sb_info
*sbi
= F2FS_SB(dquot
->dq_sb
);
2198 down_read(&sbi
->quota_sem
);
2199 ret
= dquot_release(dquot
);
2201 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2202 up_read(&sbi
->quota_sem
);
2206 static int f2fs_dquot_mark_dquot_dirty(struct dquot
*dquot
)
2208 struct super_block
*sb
= dquot
->dq_sb
;
2209 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2212 down_read(&sbi
->quota_sem
);
2213 ret
= dquot_mark_dquot_dirty(dquot
);
2215 /* if we are using journalled quota */
2216 if (is_journalled_quota(sbi
))
2217 set_sbi_flag(sbi
, SBI_QUOTA_NEED_FLUSH
);
2219 up_read(&sbi
->quota_sem
);
2223 static int f2fs_dquot_commit_info(struct super_block
*sb
, int type
)
2225 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2228 down_read(&sbi
->quota_sem
);
2229 ret
= dquot_commit_info(sb
, type
);
2231 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2232 up_read(&sbi
->quota_sem
);
2236 static int f2fs_get_projid(struct inode
*inode
, kprojid_t
*projid
)
2238 *projid
= F2FS_I(inode
)->i_projid
;
2242 static const struct dquot_operations f2fs_quota_operations
= {
2243 .get_reserved_space
= f2fs_get_reserved_space
,
2244 .write_dquot
= f2fs_dquot_commit
,
2245 .acquire_dquot
= f2fs_dquot_acquire
,
2246 .release_dquot
= f2fs_dquot_release
,
2247 .mark_dirty
= f2fs_dquot_mark_dquot_dirty
,
2248 .write_info
= f2fs_dquot_commit_info
,
2249 .alloc_dquot
= dquot_alloc
,
2250 .destroy_dquot
= dquot_destroy
,
2251 .get_projid
= f2fs_get_projid
,
2252 .get_next_id
= dquot_get_next_id
,
2255 static const struct quotactl_ops f2fs_quotactl_ops
= {
2256 .quota_on
= f2fs_quota_on
,
2257 .quota_off
= f2fs_quota_off
,
2258 .quota_sync
= f2fs_quota_sync
,
2259 .get_state
= dquot_get_state
,
2260 .set_info
= dquot_set_dqinfo
,
2261 .get_dqblk
= dquot_get_dqblk
,
2262 .set_dqblk
= dquot_set_dqblk
,
2263 .get_nextdqblk
= dquot_get_next_dqblk
,
2266 int f2fs_quota_sync(struct super_block
*sb
, int type
)
2271 void f2fs_quota_off_umount(struct super_block
*sb
)
2276 static const struct super_operations f2fs_sops
= {
2277 .alloc_inode
= f2fs_alloc_inode
,
2278 .free_inode
= f2fs_free_inode
,
2279 .drop_inode
= f2fs_drop_inode
,
2280 .write_inode
= f2fs_write_inode
,
2281 .dirty_inode
= f2fs_dirty_inode
,
2282 .show_options
= f2fs_show_options
,
2284 .quota_read
= f2fs_quota_read
,
2285 .quota_write
= f2fs_quota_write
,
2286 .get_dquots
= f2fs_get_dquots
,
2288 .evict_inode
= f2fs_evict_inode
,
2289 .put_super
= f2fs_put_super
,
2290 .sync_fs
= f2fs_sync_fs
,
2291 .freeze_fs
= f2fs_freeze
,
2292 .unfreeze_fs
= f2fs_unfreeze
,
2293 .statfs
= f2fs_statfs
,
2294 .remount_fs
= f2fs_remount
,
2297 #ifdef CONFIG_FS_ENCRYPTION
2298 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
2300 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
2301 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
2305 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
2308 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2311 * Encrypting the root directory is not allowed because fsck
2312 * expects lost+found directory to exist and remain unencrypted
2313 * if LOST_FOUND feature is enabled.
2316 if (f2fs_sb_has_lost_found(sbi
) &&
2317 inode
->i_ino
== F2FS_ROOT_INO(sbi
))
2320 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
2321 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
2322 ctx
, len
, fs_data
, XATTR_CREATE
);
2325 static bool f2fs_dummy_context(struct inode
*inode
)
2327 return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode
));
2330 static const struct fscrypt_operations f2fs_cryptops
= {
2331 .key_prefix
= "f2fs:",
2332 .get_context
= f2fs_get_context
,
2333 .set_context
= f2fs_set_context
,
2334 .dummy_context
= f2fs_dummy_context
,
2335 .empty_dir
= f2fs_empty_dir
,
2336 .max_namelen
= F2FS_NAME_LEN
,
2340 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
2341 u64 ino
, u32 generation
)
2343 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2344 struct inode
*inode
;
2346 if (f2fs_check_nid_range(sbi
, ino
))
2347 return ERR_PTR(-ESTALE
);
2350 * f2fs_iget isn't quite right if the inode is currently unallocated!
2351 * However f2fs_iget currently does appropriate checks to handle stale
2352 * inodes so everything is OK.
2354 inode
= f2fs_iget(sb
, ino
);
2356 return ERR_CAST(inode
);
2357 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
2358 /* we didn't find the right inode.. */
2360 return ERR_PTR(-ESTALE
);
2365 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
2366 int fh_len
, int fh_type
)
2368 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
2369 f2fs_nfs_get_inode
);
2372 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
2373 int fh_len
, int fh_type
)
2375 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
2376 f2fs_nfs_get_inode
);
2379 static const struct export_operations f2fs_export_ops
= {
2380 .fh_to_dentry
= f2fs_fh_to_dentry
,
2381 .fh_to_parent
= f2fs_fh_to_parent
,
2382 .get_parent
= f2fs_get_parent
,
2385 static loff_t
max_file_blocks(void)
2388 loff_t leaf_count
= DEF_ADDRS_PER_BLOCK
;
2391 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2392 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2393 * space in inode.i_addr, it will be more safe to reassign
2397 /* two direct node blocks */
2398 result
+= (leaf_count
* 2);
2400 /* two indirect node blocks */
2401 leaf_count
*= NIDS_PER_BLOCK
;
2402 result
+= (leaf_count
* 2);
2404 /* one double indirect node block */
2405 leaf_count
*= NIDS_PER_BLOCK
;
2406 result
+= leaf_count
;
2411 static int __f2fs_commit_super(struct buffer_head
*bh
,
2412 struct f2fs_super_block
*super
)
2416 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
2417 set_buffer_dirty(bh
);
2420 /* it's rare case, we can do fua all the time */
2421 return __sync_dirty_buffer(bh
, REQ_SYNC
| REQ_PREFLUSH
| REQ_FUA
);
2424 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
2425 struct buffer_head
*bh
)
2427 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
2428 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
2429 struct super_block
*sb
= sbi
->sb
;
2430 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
2431 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
2432 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
2433 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
2434 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
2435 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
2436 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
2437 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
2438 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
2439 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
2440 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
2441 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
2442 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2443 u64 main_end_blkaddr
= main_blkaddr
+
2444 (segment_count_main
<< log_blocks_per_seg
);
2445 u64 seg_end_blkaddr
= segment0_blkaddr
+
2446 (segment_count
<< log_blocks_per_seg
);
2448 if (segment0_blkaddr
!= cp_blkaddr
) {
2449 f2fs_info(sbi
, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2450 segment0_blkaddr
, cp_blkaddr
);
2454 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
2456 f2fs_info(sbi
, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2457 cp_blkaddr
, sit_blkaddr
,
2458 segment_count_ckpt
<< log_blocks_per_seg
);
2462 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
2464 f2fs_info(sbi
, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2465 sit_blkaddr
, nat_blkaddr
,
2466 segment_count_sit
<< log_blocks_per_seg
);
2470 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
2472 f2fs_info(sbi
, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2473 nat_blkaddr
, ssa_blkaddr
,
2474 segment_count_nat
<< log_blocks_per_seg
);
2478 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
2480 f2fs_info(sbi
, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2481 ssa_blkaddr
, main_blkaddr
,
2482 segment_count_ssa
<< log_blocks_per_seg
);
2486 if (main_end_blkaddr
> seg_end_blkaddr
) {
2487 f2fs_info(sbi
, "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2490 (segment_count
<< log_blocks_per_seg
),
2491 segment_count_main
<< log_blocks_per_seg
);
2493 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
2497 /* fix in-memory information all the time */
2498 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
2499 segment0_blkaddr
) >> log_blocks_per_seg
);
2501 if (f2fs_readonly(sb
) || bdev_read_only(sb
->s_bdev
)) {
2502 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
2505 err
= __f2fs_commit_super(bh
, NULL
);
2506 res
= err
? "failed" : "done";
2508 f2fs_info(sbi
, "Fix alignment : %s, start(%u) end(%u) block(%u)",
2511 (segment_count
<< log_blocks_per_seg
),
2512 segment_count_main
<< log_blocks_per_seg
);
2519 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
2520 struct buffer_head
*bh
)
2522 block_t segment_count
, segs_per_sec
, secs_per_zone
;
2523 block_t total_sections
, blocks_per_seg
;
2524 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
2525 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
2526 unsigned int blocksize
;
2527 size_t crc_offset
= 0;
2530 if (le32_to_cpu(raw_super
->magic
) != F2FS_SUPER_MAGIC
) {
2531 f2fs_info(sbi
, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2532 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
2536 /* Check checksum_offset and crc in superblock */
2537 if (__F2FS_HAS_FEATURE(raw_super
, F2FS_FEATURE_SB_CHKSUM
)) {
2538 crc_offset
= le32_to_cpu(raw_super
->checksum_offset
);
2540 offsetof(struct f2fs_super_block
, crc
)) {
2541 f2fs_info(sbi
, "Invalid SB checksum offset: %zu",
2543 return -EFSCORRUPTED
;
2545 crc
= le32_to_cpu(raw_super
->crc
);
2546 if (!f2fs_crc_valid(sbi
, crc
, raw_super
, crc_offset
)) {
2547 f2fs_info(sbi
, "Invalid SB checksum value: %u", crc
);
2548 return -EFSCORRUPTED
;
2552 /* Currently, support only 4KB page cache size */
2553 if (F2FS_BLKSIZE
!= PAGE_SIZE
) {
2554 f2fs_info(sbi
, "Invalid page_cache_size (%lu), supports only 4KB",
2556 return -EFSCORRUPTED
;
2559 /* Currently, support only 4KB block size */
2560 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
2561 if (blocksize
!= F2FS_BLKSIZE
) {
2562 f2fs_info(sbi
, "Invalid blocksize (%u), supports only 4KB",
2564 return -EFSCORRUPTED
;
2567 /* check log blocks per segment */
2568 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
2569 f2fs_info(sbi
, "Invalid log blocks per segment (%u)",
2570 le32_to_cpu(raw_super
->log_blocks_per_seg
));
2571 return -EFSCORRUPTED
;
2574 /* Currently, support 512/1024/2048/4096 bytes sector size */
2575 if (le32_to_cpu(raw_super
->log_sectorsize
) >
2576 F2FS_MAX_LOG_SECTOR_SIZE
||
2577 le32_to_cpu(raw_super
->log_sectorsize
) <
2578 F2FS_MIN_LOG_SECTOR_SIZE
) {
2579 f2fs_info(sbi
, "Invalid log sectorsize (%u)",
2580 le32_to_cpu(raw_super
->log_sectorsize
));
2581 return -EFSCORRUPTED
;
2583 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
2584 le32_to_cpu(raw_super
->log_sectorsize
) !=
2585 F2FS_MAX_LOG_SECTOR_SIZE
) {
2586 f2fs_info(sbi
, "Invalid log sectors per block(%u) log sectorsize(%u)",
2587 le32_to_cpu(raw_super
->log_sectors_per_block
),
2588 le32_to_cpu(raw_super
->log_sectorsize
));
2589 return -EFSCORRUPTED
;
2592 segment_count
= le32_to_cpu(raw_super
->segment_count
);
2593 segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
2594 secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
2595 total_sections
= le32_to_cpu(raw_super
->section_count
);
2597 /* blocks_per_seg should be 512, given the above check */
2598 blocks_per_seg
= 1 << le32_to_cpu(raw_super
->log_blocks_per_seg
);
2600 if (segment_count
> F2FS_MAX_SEGMENT
||
2601 segment_count
< F2FS_MIN_SEGMENTS
) {
2602 f2fs_info(sbi
, "Invalid segment count (%u)", segment_count
);
2603 return -EFSCORRUPTED
;
2606 if (total_sections
> segment_count
||
2607 total_sections
< F2FS_MIN_SEGMENTS
||
2608 segs_per_sec
> segment_count
|| !segs_per_sec
) {
2609 f2fs_info(sbi
, "Invalid segment/section count (%u, %u x %u)",
2610 segment_count
, total_sections
, segs_per_sec
);
2611 return -EFSCORRUPTED
;
2614 if ((segment_count
/ segs_per_sec
) < total_sections
) {
2615 f2fs_info(sbi
, "Small segment_count (%u < %u * %u)",
2616 segment_count
, segs_per_sec
, total_sections
);
2617 return -EFSCORRUPTED
;
2620 if (segment_count
> (le64_to_cpu(raw_super
->block_count
) >> 9)) {
2621 f2fs_info(sbi
, "Wrong segment_count / block_count (%u > %llu)",
2622 segment_count
, le64_to_cpu(raw_super
->block_count
));
2623 return -EFSCORRUPTED
;
2626 if (secs_per_zone
> total_sections
|| !secs_per_zone
) {
2627 f2fs_info(sbi
, "Wrong secs_per_zone / total_sections (%u, %u)",
2628 secs_per_zone
, total_sections
);
2629 return -EFSCORRUPTED
;
2631 if (le32_to_cpu(raw_super
->extension_count
) > F2FS_MAX_EXTENSION
||
2632 raw_super
->hot_ext_count
> F2FS_MAX_EXTENSION
||
2633 (le32_to_cpu(raw_super
->extension_count
) +
2634 raw_super
->hot_ext_count
) > F2FS_MAX_EXTENSION
) {
2635 f2fs_info(sbi
, "Corrupted extension count (%u + %u > %u)",
2636 le32_to_cpu(raw_super
->extension_count
),
2637 raw_super
->hot_ext_count
,
2638 F2FS_MAX_EXTENSION
);
2639 return -EFSCORRUPTED
;
2642 if (le32_to_cpu(raw_super
->cp_payload
) >
2643 (blocks_per_seg
- F2FS_CP_PACKS
)) {
2644 f2fs_info(sbi
, "Insane cp_payload (%u > %u)",
2645 le32_to_cpu(raw_super
->cp_payload
),
2646 blocks_per_seg
- F2FS_CP_PACKS
);
2647 return -EFSCORRUPTED
;
2650 /* check reserved ino info */
2651 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
2652 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
2653 le32_to_cpu(raw_super
->root_ino
) != 3) {
2654 f2fs_info(sbi
, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2655 le32_to_cpu(raw_super
->node_ino
),
2656 le32_to_cpu(raw_super
->meta_ino
),
2657 le32_to_cpu(raw_super
->root_ino
));
2658 return -EFSCORRUPTED
;
2661 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2662 if (sanity_check_area_boundary(sbi
, bh
))
2663 return -EFSCORRUPTED
;
2668 int f2fs_sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
2670 unsigned int total
, fsmeta
;
2671 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
2672 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
2673 unsigned int ovp_segments
, reserved_segments
;
2674 unsigned int main_segs
, blocks_per_seg
;
2675 unsigned int sit_segs
, nat_segs
;
2676 unsigned int sit_bitmap_size
, nat_bitmap_size
;
2677 unsigned int log_blocks_per_seg
;
2678 unsigned int segment_count_main
;
2679 unsigned int cp_pack_start_sum
, cp_payload
;
2680 block_t user_block_count
, valid_user_blocks
;
2681 block_t avail_node_count
, valid_node_count
;
2684 total
= le32_to_cpu(raw_super
->segment_count
);
2685 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
2686 sit_segs
= le32_to_cpu(raw_super
->segment_count_sit
);
2688 nat_segs
= le32_to_cpu(raw_super
->segment_count_nat
);
2690 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
2691 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
2693 if (unlikely(fsmeta
>= total
))
2696 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
2697 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
2699 if (unlikely(fsmeta
< F2FS_MIN_SEGMENTS
||
2700 ovp_segments
== 0 || reserved_segments
== 0)) {
2701 f2fs_err(sbi
, "Wrong layout: check mkfs.f2fs version");
2705 user_block_count
= le64_to_cpu(ckpt
->user_block_count
);
2706 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
2707 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2708 if (!user_block_count
|| user_block_count
>=
2709 segment_count_main
<< log_blocks_per_seg
) {
2710 f2fs_err(sbi
, "Wrong user_block_count: %u",
2715 valid_user_blocks
= le64_to_cpu(ckpt
->valid_block_count
);
2716 if (valid_user_blocks
> user_block_count
) {
2717 f2fs_err(sbi
, "Wrong valid_user_blocks: %u, user_block_count: %u",
2718 valid_user_blocks
, user_block_count
);
2722 valid_node_count
= le32_to_cpu(ckpt
->valid_node_count
);
2723 avail_node_count
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
2724 if (valid_node_count
> avail_node_count
) {
2725 f2fs_err(sbi
, "Wrong valid_node_count: %u, avail_node_count: %u",
2726 valid_node_count
, avail_node_count
);
2730 main_segs
= le32_to_cpu(raw_super
->segment_count_main
);
2731 blocks_per_seg
= sbi
->blocks_per_seg
;
2733 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
2734 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) >= main_segs
||
2735 le16_to_cpu(ckpt
->cur_node_blkoff
[i
]) >= blocks_per_seg
)
2737 for (j
= i
+ 1; j
< NR_CURSEG_NODE_TYPE
; j
++) {
2738 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
2739 le32_to_cpu(ckpt
->cur_node_segno
[j
])) {
2740 f2fs_err(sbi
, "Node segment (%u, %u) has the same segno: %u",
2742 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
2747 for (i
= 0; i
< NR_CURSEG_DATA_TYPE
; i
++) {
2748 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) >= main_segs
||
2749 le16_to_cpu(ckpt
->cur_data_blkoff
[i
]) >= blocks_per_seg
)
2751 for (j
= i
+ 1; j
< NR_CURSEG_DATA_TYPE
; j
++) {
2752 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) ==
2753 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
2754 f2fs_err(sbi
, "Data segment (%u, %u) has the same segno: %u",
2756 le32_to_cpu(ckpt
->cur_data_segno
[i
]));
2761 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
2762 for (j
= 0; j
< NR_CURSEG_DATA_TYPE
; j
++) {
2763 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
2764 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
2765 f2fs_err(sbi
, "Node segment (%u) and Data segment (%u) has the same segno: %u",
2767 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
2773 sit_bitmap_size
= le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
);
2774 nat_bitmap_size
= le32_to_cpu(ckpt
->nat_ver_bitmap_bytesize
);
2776 if (sit_bitmap_size
!= ((sit_segs
/ 2) << log_blocks_per_seg
) / 8 ||
2777 nat_bitmap_size
!= ((nat_segs
/ 2) << log_blocks_per_seg
) / 8) {
2778 f2fs_err(sbi
, "Wrong bitmap size: sit: %u, nat:%u",
2779 sit_bitmap_size
, nat_bitmap_size
);
2783 cp_pack_start_sum
= __start_sum_addr(sbi
);
2784 cp_payload
= __cp_payload(sbi
);
2785 if (cp_pack_start_sum
< cp_payload
+ 1 ||
2786 cp_pack_start_sum
> blocks_per_seg
- 1 -
2788 f2fs_err(sbi
, "Wrong cp_pack_start_sum: %u",
2793 if (__is_set_ckpt_flags(ckpt
, CP_LARGE_NAT_BITMAP_FLAG
) &&
2794 le32_to_cpu(ckpt
->checksum_offset
) != CP_MIN_CHKSUM_OFFSET
) {
2795 f2fs_warn(sbi
, "using deprecated layout of large_nat_bitmap, "
2796 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
2797 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
2798 le32_to_cpu(ckpt
->checksum_offset
));
2802 if (unlikely(f2fs_cp_error(sbi
))) {
2803 f2fs_err(sbi
, "A bug case: need to run fsck");
2809 static void init_sb_info(struct f2fs_sb_info
*sbi
)
2811 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
2814 sbi
->log_sectors_per_block
=
2815 le32_to_cpu(raw_super
->log_sectors_per_block
);
2816 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
2817 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
2818 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2819 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
2820 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
2821 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
2822 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
2823 sbi
->total_node_count
=
2824 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
2825 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
2826 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
2827 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
2828 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
2829 sbi
->cur_victim_sec
= NULL_SECNO
;
2830 sbi
->next_victim_seg
[BG_GC
] = NULL_SEGNO
;
2831 sbi
->next_victim_seg
[FG_GC
] = NULL_SEGNO
;
2832 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
2833 sbi
->migration_granularity
= sbi
->segs_per_sec
;
2835 sbi
->dir_level
= DEF_DIR_LEVEL
;
2836 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
2837 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
2838 sbi
->interval_time
[DISCARD_TIME
] = DEF_IDLE_INTERVAL
;
2839 sbi
->interval_time
[GC_TIME
] = DEF_IDLE_INTERVAL
;
2840 sbi
->interval_time
[DISABLE_TIME
] = DEF_DISABLE_INTERVAL
;
2841 sbi
->interval_time
[UMOUNT_DISCARD_TIMEOUT
] =
2842 DEF_UMOUNT_DISCARD_TIMEOUT
;
2843 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
2845 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
2846 atomic_set(&sbi
->nr_pages
[i
], 0);
2848 for (i
= 0; i
< META
; i
++)
2849 atomic_set(&sbi
->wb_sync_req
[i
], 0);
2851 INIT_LIST_HEAD(&sbi
->s_list
);
2852 mutex_init(&sbi
->umount_mutex
);
2853 init_rwsem(&sbi
->io_order_lock
);
2854 spin_lock_init(&sbi
->cp_lock
);
2856 sbi
->dirty_device
= 0;
2857 spin_lock_init(&sbi
->dev_lock
);
2859 init_rwsem(&sbi
->sb_lock
);
2862 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
2866 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
2870 err
= percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
2873 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
2878 #ifdef CONFIG_BLK_DEV_ZONED
2879 static int init_blkz_info(struct f2fs_sb_info
*sbi
, int devi
)
2881 struct block_device
*bdev
= FDEV(devi
).bdev
;
2882 sector_t nr_sectors
= bdev
->bd_part
->nr_sects
;
2883 sector_t sector
= 0;
2884 struct blk_zone
*zones
;
2885 unsigned int i
, nr_zones
;
2889 if (!f2fs_sb_has_blkzoned(sbi
))
2892 if (sbi
->blocks_per_blkz
&& sbi
->blocks_per_blkz
!=
2893 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
)))
2895 sbi
->blocks_per_blkz
= SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
));
2896 if (sbi
->log_blocks_per_blkz
&& sbi
->log_blocks_per_blkz
!=
2897 __ilog2_u32(sbi
->blocks_per_blkz
))
2899 sbi
->log_blocks_per_blkz
= __ilog2_u32(sbi
->blocks_per_blkz
);
2900 FDEV(devi
).nr_blkz
= SECTOR_TO_BLOCK(nr_sectors
) >>
2901 sbi
->log_blocks_per_blkz
;
2902 if (nr_sectors
& (bdev_zone_sectors(bdev
) - 1))
2903 FDEV(devi
).nr_blkz
++;
2905 FDEV(devi
).blkz_seq
= f2fs_kvzalloc(sbi
,
2906 BITS_TO_LONGS(FDEV(devi
).nr_blkz
)
2907 * sizeof(unsigned long),
2909 if (!FDEV(devi
).blkz_seq
)
2912 #define F2FS_REPORT_NR_ZONES 4096
2914 zones
= f2fs_kzalloc(sbi
,
2915 array_size(F2FS_REPORT_NR_ZONES
,
2916 sizeof(struct blk_zone
)),
2921 /* Get block zones type */
2922 while (zones
&& sector
< nr_sectors
) {
2924 nr_zones
= F2FS_REPORT_NR_ZONES
;
2925 err
= blkdev_report_zones(bdev
, sector
, zones
, &nr_zones
);
2933 for (i
= 0; i
< nr_zones
; i
++) {
2934 if (zones
[i
].type
!= BLK_ZONE_TYPE_CONVENTIONAL
)
2935 set_bit(n
, FDEV(devi
).blkz_seq
);
2936 sector
+= zones
[i
].len
;
2948 * Read f2fs raw super block.
2949 * Because we have two copies of super block, so read both of them
2950 * to get the first valid one. If any one of them is broken, we pass
2951 * them recovery flag back to the caller.
2953 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
2954 struct f2fs_super_block
**raw_super
,
2955 int *valid_super_block
, int *recovery
)
2957 struct super_block
*sb
= sbi
->sb
;
2959 struct buffer_head
*bh
;
2960 struct f2fs_super_block
*super
;
2963 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
2967 for (block
= 0; block
< 2; block
++) {
2968 bh
= sb_bread(sb
, block
);
2970 f2fs_err(sbi
, "Unable to read %dth superblock",
2976 /* sanity checking of raw super */
2977 err
= sanity_check_raw_super(sbi
, bh
);
2979 f2fs_err(sbi
, "Can't find valid F2FS filesystem in %dth superblock",
2986 memcpy(super
, bh
->b_data
+ F2FS_SUPER_OFFSET
,
2988 *valid_super_block
= block
;
2994 /* Fail to read any one of the superblocks*/
2998 /* No valid superblock */
3007 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
3009 struct buffer_head
*bh
;
3013 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
3014 bdev_read_only(sbi
->sb
->s_bdev
)) {
3015 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
3019 /* we should update superblock crc here */
3020 if (!recover
&& f2fs_sb_has_sb_chksum(sbi
)) {
3021 crc
= f2fs_crc32(sbi
, F2FS_RAW_SUPER(sbi
),
3022 offsetof(struct f2fs_super_block
, crc
));
3023 F2FS_RAW_SUPER(sbi
)->crc
= cpu_to_le32(crc
);
3026 /* write back-up superblock first */
3027 bh
= sb_bread(sbi
->sb
, sbi
->valid_super_block
? 0 : 1);
3030 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
3033 /* if we are in recovery path, skip writing valid superblock */
3037 /* write current valid superblock */
3038 bh
= sb_bread(sbi
->sb
, sbi
->valid_super_block
);
3041 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
3046 static int f2fs_scan_devices(struct f2fs_sb_info
*sbi
)
3048 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
3049 unsigned int max_devices
= MAX_DEVICES
;
3052 /* Initialize single device information */
3053 if (!RDEV(0).path
[0]) {
3054 if (!bdev_is_zoned(sbi
->sb
->s_bdev
))
3060 * Initialize multiple devices information, or single
3061 * zoned block device information.
3063 sbi
->devs
= f2fs_kzalloc(sbi
,
3064 array_size(max_devices
,
3065 sizeof(struct f2fs_dev_info
)),
3070 for (i
= 0; i
< max_devices
; i
++) {
3072 if (i
> 0 && !RDEV(i
).path
[0])
3075 if (max_devices
== 1) {
3076 /* Single zoned block device mount */
3078 blkdev_get_by_dev(sbi
->sb
->s_bdev
->bd_dev
,
3079 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
3081 /* Multi-device mount */
3082 memcpy(FDEV(i
).path
, RDEV(i
).path
, MAX_PATH_LEN
);
3083 FDEV(i
).total_segments
=
3084 le32_to_cpu(RDEV(i
).total_segments
);
3086 FDEV(i
).start_blk
= 0;
3087 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
3088 (FDEV(i
).total_segments
<<
3089 sbi
->log_blocks_per_seg
) - 1 +
3090 le32_to_cpu(raw_super
->segment0_blkaddr
);
3092 FDEV(i
).start_blk
= FDEV(i
- 1).end_blk
+ 1;
3093 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
3094 (FDEV(i
).total_segments
<<
3095 sbi
->log_blocks_per_seg
) - 1;
3097 FDEV(i
).bdev
= blkdev_get_by_path(FDEV(i
).path
,
3098 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
3100 if (IS_ERR(FDEV(i
).bdev
))
3101 return PTR_ERR(FDEV(i
).bdev
);
3103 /* to release errored devices */
3104 sbi
->s_ndevs
= i
+ 1;
3106 #ifdef CONFIG_BLK_DEV_ZONED
3107 if (bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HM
&&
3108 !f2fs_sb_has_blkzoned(sbi
)) {
3109 f2fs_err(sbi
, "Zoned block device feature not enabled\n");
3112 if (bdev_zoned_model(FDEV(i
).bdev
) != BLK_ZONED_NONE
) {
3113 if (init_blkz_info(sbi
, i
)) {
3114 f2fs_err(sbi
, "Failed to initialize F2FS blkzone information");
3117 if (max_devices
== 1)
3119 f2fs_info(sbi
, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3121 FDEV(i
).total_segments
,
3122 FDEV(i
).start_blk
, FDEV(i
).end_blk
,
3123 bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HA
?
3124 "Host-aware" : "Host-managed");
3128 f2fs_info(sbi
, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3130 FDEV(i
).total_segments
,
3131 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
3134 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi
));
3138 static int f2fs_setup_casefold(struct f2fs_sb_info
*sbi
)
3140 #ifdef CONFIG_UNICODE
3141 if (f2fs_sb_has_casefold(sbi
) && !sbi
->s_encoding
) {
3142 const struct f2fs_sb_encodings
*encoding_info
;
3143 struct unicode_map
*encoding
;
3144 __u16 encoding_flags
;
3146 if (f2fs_sb_has_encrypt(sbi
)) {
3148 "Can't mount with encoding and encryption");
3152 if (f2fs_sb_read_encoding(sbi
->raw_super
, &encoding_info
,
3155 "Encoding requested by superblock is unknown");
3159 encoding
= utf8_load(encoding_info
->version
);
3160 if (IS_ERR(encoding
)) {
3162 "can't mount with superblock charset: %s-%s "
3163 "not supported by the kernel. flags: 0x%x.",
3164 encoding_info
->name
, encoding_info
->version
,
3166 return PTR_ERR(encoding
);
3168 f2fs_info(sbi
, "Using encoding defined by superblock: "
3169 "%s-%s with flags 0x%hx", encoding_info
->name
,
3170 encoding_info
->version
?:"\b", encoding_flags
);
3172 sbi
->s_encoding
= encoding
;
3173 sbi
->s_encoding_flags
= encoding_flags
;
3174 sbi
->sb
->s_d_op
= &f2fs_dentry_ops
;
3177 if (f2fs_sb_has_casefold(sbi
)) {
3178 f2fs_err(sbi
, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3185 static void f2fs_tuning_parameters(struct f2fs_sb_info
*sbi
)
3187 struct f2fs_sm_info
*sm_i
= SM_I(sbi
);
3189 /* adjust parameters according to the volume size */
3190 if (sm_i
->main_segments
<= SMALL_VOLUME_SEGMENTS
) {
3191 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
3192 sm_i
->dcc_info
->discard_granularity
= 1;
3193 sm_i
->ipu_policy
= 1 << F2FS_IPU_FORCE
;
3196 sbi
->readdir_ra
= 1;
3199 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
3201 struct f2fs_sb_info
*sbi
;
3202 struct f2fs_super_block
*raw_super
;
3205 bool skip_recovery
= false, need_fsck
= false;
3206 char *options
= NULL
;
3207 int recovery
, i
, valid_super_block
;
3208 struct curseg_info
*seg_i
;
3214 valid_super_block
= -1;
3217 /* allocate memory for f2fs-specific super block info */
3218 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
3224 /* Load the checksum driver */
3225 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
3226 if (IS_ERR(sbi
->s_chksum_driver
)) {
3227 f2fs_err(sbi
, "Cannot load crc32 driver.");
3228 err
= PTR_ERR(sbi
->s_chksum_driver
);
3229 sbi
->s_chksum_driver
= NULL
;
3233 /* set a block size */
3234 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
3235 f2fs_err(sbi
, "unable to set blocksize");
3239 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
3244 sb
->s_fs_info
= sbi
;
3245 sbi
->raw_super
= raw_super
;
3247 /* precompute checksum seed for metadata */
3248 if (f2fs_sb_has_inode_chksum(sbi
))
3249 sbi
->s_chksum_seed
= f2fs_chksum(sbi
, ~0, raw_super
->uuid
,
3250 sizeof(raw_super
->uuid
));
3253 * The BLKZONED feature indicates that the drive was formatted with
3254 * zone alignment optimization. This is optional for host-aware
3255 * devices, but mandatory for host-managed zoned block devices.
3257 #ifndef CONFIG_BLK_DEV_ZONED
3258 if (f2fs_sb_has_blkzoned(sbi
)) {
3259 f2fs_err(sbi
, "Zoned block device support is not enabled");
3264 default_options(sbi
);
3265 /* parse mount options */
3266 options
= kstrdup((const char *)data
, GFP_KERNEL
);
3267 if (data
&& !options
) {
3272 err
= parse_options(sb
, options
);
3276 sbi
->max_file_blocks
= max_file_blocks();
3277 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
3278 le32_to_cpu(raw_super
->log_blocksize
);
3279 sb
->s_max_links
= F2FS_LINK_MAX
;
3281 err
= f2fs_setup_casefold(sbi
);
3286 sb
->dq_op
= &f2fs_quota_operations
;
3287 sb
->s_qcop
= &f2fs_quotactl_ops
;
3288 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
3290 if (f2fs_sb_has_quota_ino(sbi
)) {
3291 for (i
= 0; i
< MAXQUOTAS
; i
++) {
3292 if (f2fs_qf_ino(sbi
->sb
, i
))
3293 sbi
->nquota_files
++;
3298 sb
->s_op
= &f2fs_sops
;
3299 #ifdef CONFIG_FS_ENCRYPTION
3300 sb
->s_cop
= &f2fs_cryptops
;
3302 #ifdef CONFIG_FS_VERITY
3303 sb
->s_vop
= &f2fs_verityops
;
3305 sb
->s_xattr
= f2fs_xattr_handlers
;
3306 sb
->s_export_op
= &f2fs_export_ops
;
3307 sb
->s_magic
= F2FS_SUPER_MAGIC
;
3308 sb
->s_time_gran
= 1;
3309 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
3310 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
3311 memcpy(&sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
3312 sb
->s_iflags
|= SB_I_CGROUPWB
;
3314 /* init f2fs-specific super block info */
3315 sbi
->valid_super_block
= valid_super_block
;
3316 mutex_init(&sbi
->gc_mutex
);
3317 mutex_init(&sbi
->writepages
);
3318 mutex_init(&sbi
->cp_mutex
);
3319 mutex_init(&sbi
->resize_mutex
);
3320 init_rwsem(&sbi
->node_write
);
3321 init_rwsem(&sbi
->node_change
);
3323 /* disallow all the data/node/meta page writes */
3324 set_sbi_flag(sbi
, SBI_POR_DOING
);
3325 spin_lock_init(&sbi
->stat_lock
);
3327 /* init iostat info */
3328 spin_lock_init(&sbi
->iostat_lock
);
3329 sbi
->iostat_enable
= false;
3331 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
3332 int n
= (i
== META
) ? 1: NR_TEMP_TYPE
;
3338 sizeof(struct f2fs_bio_info
)),
3340 if (!sbi
->write_io
[i
]) {
3345 for (j
= HOT
; j
< n
; j
++) {
3346 init_rwsem(&sbi
->write_io
[i
][j
].io_rwsem
);
3347 sbi
->write_io
[i
][j
].sbi
= sbi
;
3348 sbi
->write_io
[i
][j
].bio
= NULL
;
3349 spin_lock_init(&sbi
->write_io
[i
][j
].io_lock
);
3350 INIT_LIST_HEAD(&sbi
->write_io
[i
][j
].io_list
);
3354 init_rwsem(&sbi
->cp_rwsem
);
3355 init_rwsem(&sbi
->quota_sem
);
3356 init_waitqueue_head(&sbi
->cp_wait
);
3359 err
= init_percpu_info(sbi
);
3363 if (F2FS_IO_ALIGNED(sbi
)) {
3364 sbi
->write_io_dummy
=
3365 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi
) - 1), 0);
3366 if (!sbi
->write_io_dummy
) {
3372 /* get an inode for meta space */
3373 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
3374 if (IS_ERR(sbi
->meta_inode
)) {
3375 f2fs_err(sbi
, "Failed to read F2FS meta data inode");
3376 err
= PTR_ERR(sbi
->meta_inode
);
3380 err
= f2fs_get_valid_checkpoint(sbi
);
3382 f2fs_err(sbi
, "Failed to get valid F2FS checkpoint");
3383 goto free_meta_inode
;
3386 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_QUOTA_NEED_FSCK_FLAG
))
3387 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
3388 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_DISABLED_QUICK_FLAG
)) {
3389 set_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
3390 sbi
->interval_time
[DISABLE_TIME
] = DEF_DISABLE_QUICK_INTERVAL
;
3393 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_FSCK_FLAG
))
3394 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
3396 /* Initialize device list */
3397 err
= f2fs_scan_devices(sbi
);
3399 f2fs_err(sbi
, "Failed to find devices");
3403 sbi
->total_valid_node_count
=
3404 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
3405 percpu_counter_set(&sbi
->total_valid_inode_count
,
3406 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
3407 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
3408 sbi
->total_valid_block_count
=
3409 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
3410 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
3411 sbi
->reserved_blocks
= 0;
3412 sbi
->current_reserved_blocks
= 0;
3413 limit_reserve_root(sbi
);
3414 adjust_unusable_cap_perc(sbi
);
3416 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
3417 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
3418 spin_lock_init(&sbi
->inode_lock
[i
]);
3420 mutex_init(&sbi
->flush_lock
);
3422 f2fs_init_extent_cache_info(sbi
);
3424 f2fs_init_ino_entry_info(sbi
);
3426 f2fs_init_fsync_node_info(sbi
);
3428 /* setup f2fs internal modules */
3429 err
= f2fs_build_segment_manager(sbi
);
3431 f2fs_err(sbi
, "Failed to initialize F2FS segment manager (%d)",
3435 err
= f2fs_build_node_manager(sbi
);
3437 f2fs_err(sbi
, "Failed to initialize F2FS node manager (%d)",
3442 /* For write statistics */
3443 if (sb
->s_bdev
->bd_part
)
3444 sbi
->sectors_written_start
=
3445 (u64
)part_stat_read(sb
->s_bdev
->bd_part
,
3446 sectors
[STAT_WRITE
]);
3448 /* Read accumulated write IO statistics if exists */
3449 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
3450 if (__exist_node_summaries(sbi
))
3451 sbi
->kbytes_written
=
3452 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
3454 f2fs_build_gc_manager(sbi
);
3456 err
= f2fs_build_stats(sbi
);
3460 /* get an inode for node space */
3461 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
3462 if (IS_ERR(sbi
->node_inode
)) {
3463 f2fs_err(sbi
, "Failed to read node inode");
3464 err
= PTR_ERR(sbi
->node_inode
);
3468 /* read root inode and dentry */
3469 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
3471 f2fs_err(sbi
, "Failed to read root inode");
3472 err
= PTR_ERR(root
);
3473 goto free_node_inode
;
3475 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
||
3476 !root
->i_size
|| !root
->i_nlink
) {
3479 goto free_node_inode
;
3482 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
3485 goto free_node_inode
;
3488 err
= f2fs_register_sysfs(sbi
);
3490 goto free_root_inode
;
3493 /* Enable quota usage during mount */
3494 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sb
)) {
3495 err
= f2fs_enable_quotas(sb
);
3497 f2fs_err(sbi
, "Cannot turn on quotas: error %d", err
);
3500 /* if there are nt orphan nodes free them */
3501 err
= f2fs_recover_orphan_inodes(sbi
);
3505 if (unlikely(is_set_ckpt_flags(sbi
, CP_DISABLED_FLAG
)))
3506 goto reset_checkpoint
;
3508 /* recover fsynced data */
3509 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
) &&
3510 !test_opt(sbi
, NORECOVERY
)) {
3512 * mount should be failed, when device has readonly mode, and
3513 * previous checkpoint was not done by clean system shutdown.
3515 if (f2fs_hw_is_readonly(sbi
)) {
3516 if (!is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
3518 f2fs_err(sbi
, "Need to recover fsync data, but write access unavailable");
3521 f2fs_info(sbi
, "write access unavailable, skipping recovery");
3522 goto reset_checkpoint
;
3526 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
3529 goto reset_checkpoint
;
3531 err
= f2fs_recover_fsync_data(sbi
, false);
3534 skip_recovery
= true;
3536 f2fs_err(sbi
, "Cannot recover all fsync data errno=%d",
3541 err
= f2fs_recover_fsync_data(sbi
, true);
3543 if (!f2fs_readonly(sb
) && err
> 0) {
3545 f2fs_err(sbi
, "Need to recover fsync data");
3550 /* f2fs_recover_fsync_data() cleared this already */
3551 clear_sbi_flag(sbi
, SBI_POR_DOING
);
3553 if (test_opt(sbi
, DISABLE_CHECKPOINT
)) {
3554 err
= f2fs_disable_checkpoint(sbi
);
3556 goto sync_free_meta
;
3557 } else if (is_set_ckpt_flags(sbi
, CP_DISABLED_FLAG
)) {
3558 f2fs_enable_checkpoint(sbi
);
3562 * If filesystem is not mounted as read-only then
3563 * do start the gc_thread.
3565 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
3566 /* After POR, we can run background GC thread.*/
3567 err
= f2fs_start_gc_thread(sbi
);
3569 goto sync_free_meta
;
3573 /* recover broken superblock */
3575 err
= f2fs_commit_super(sbi
, true);
3576 f2fs_info(sbi
, "Try to recover %dth superblock, ret: %d",
3577 sbi
->valid_super_block
? 1 : 2, err
);
3580 f2fs_join_shrinker(sbi
);
3582 f2fs_tuning_parameters(sbi
);
3584 f2fs_notice(sbi
, "Mounted with checkpoint version = %llx",
3585 cur_cp_version(F2FS_CKPT(sbi
)));
3586 f2fs_update_time(sbi
, CP_TIME
);
3587 f2fs_update_time(sbi
, REQ_TIME
);
3588 clear_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
3592 /* safe to flush all the data */
3593 sync_filesystem(sbi
->sb
);
3598 f2fs_truncate_quota_inode_pages(sb
);
3599 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sb
))
3600 f2fs_quota_off_umount(sbi
->sb
);
3603 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
3604 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
3605 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
3606 * falls into an infinite loop in f2fs_sync_meta_pages().
3608 truncate_inode_pages_final(META_MAPPING(sbi
));
3609 /* evict some inodes being cached by GC */
3611 f2fs_unregister_sysfs(sbi
);
3616 f2fs_release_ino_entry(sbi
, true);
3617 truncate_inode_pages_final(NODE_MAPPING(sbi
));
3618 iput(sbi
->node_inode
);
3619 sbi
->node_inode
= NULL
;
3621 f2fs_destroy_stats(sbi
);
3623 f2fs_destroy_node_manager(sbi
);
3625 f2fs_destroy_segment_manager(sbi
);
3627 destroy_device_list(sbi
);
3630 make_bad_inode(sbi
->meta_inode
);
3631 iput(sbi
->meta_inode
);
3632 sbi
->meta_inode
= NULL
;
3634 mempool_destroy(sbi
->write_io_dummy
);
3636 destroy_percpu_info(sbi
);
3638 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
3639 kvfree(sbi
->write_io
[i
]);
3641 #ifdef CONFIG_UNICODE
3642 utf8_unload(sbi
->s_encoding
);
3646 for (i
= 0; i
< MAXQUOTAS
; i
++)
3647 kvfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
3653 if (sbi
->s_chksum_driver
)
3654 crypto_free_shash(sbi
->s_chksum_driver
);
3657 /* give only one another chance */
3658 if (retry_cnt
> 0 && skip_recovery
) {
3660 shrink_dcache_sb(sb
);
3666 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
3667 const char *dev_name
, void *data
)
3669 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
3672 static void kill_f2fs_super(struct super_block
*sb
)
3675 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
3677 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
3678 f2fs_stop_gc_thread(sbi
);
3679 f2fs_stop_discard_thread(sbi
);
3681 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
3682 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
3683 struct cp_control cpc
= {
3684 .reason
= CP_UMOUNT
,
3686 f2fs_write_checkpoint(sbi
, &cpc
);
3689 if (is_sbi_flag_set(sbi
, SBI_IS_RECOVERED
) && f2fs_readonly(sb
))
3690 sb
->s_flags
&= ~SB_RDONLY
;
3692 kill_block_super(sb
);
3695 static struct file_system_type f2fs_fs_type
= {
3696 .owner
= THIS_MODULE
,
3698 .mount
= f2fs_mount
,
3699 .kill_sb
= kill_f2fs_super
,
3700 .fs_flags
= FS_REQUIRES_DEV
,
3702 MODULE_ALIAS_FS("f2fs");
3704 static int __init
init_inodecache(void)
3706 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
3707 sizeof(struct f2fs_inode_info
), 0,
3708 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
3709 if (!f2fs_inode_cachep
)
3714 static void destroy_inodecache(void)
3717 * Make sure all delayed rcu free inodes are flushed before we
3721 kmem_cache_destroy(f2fs_inode_cachep
);
3724 static int __init
init_f2fs_fs(void)
3728 if (PAGE_SIZE
!= F2FS_BLKSIZE
) {
3729 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
3730 PAGE_SIZE
, F2FS_BLKSIZE
);
3734 f2fs_build_trace_ios();
3736 err
= init_inodecache();
3739 err
= f2fs_create_node_manager_caches();
3741 goto free_inodecache
;
3742 err
= f2fs_create_segment_manager_caches();
3744 goto free_node_manager_caches
;
3745 err
= f2fs_create_checkpoint_caches();
3747 goto free_segment_manager_caches
;
3748 err
= f2fs_create_extent_cache();
3750 goto free_checkpoint_caches
;
3751 err
= f2fs_init_sysfs();
3753 goto free_extent_cache
;
3754 err
= register_shrinker(&f2fs_shrinker_info
);
3757 err
= register_filesystem(&f2fs_fs_type
);
3760 f2fs_create_root_stats();
3761 err
= f2fs_init_post_read_processing();
3763 goto free_root_stats
;
3767 f2fs_destroy_root_stats();
3768 unregister_filesystem(&f2fs_fs_type
);
3770 unregister_shrinker(&f2fs_shrinker_info
);
3774 f2fs_destroy_extent_cache();
3775 free_checkpoint_caches
:
3776 f2fs_destroy_checkpoint_caches();
3777 free_segment_manager_caches
:
3778 f2fs_destroy_segment_manager_caches();
3779 free_node_manager_caches
:
3780 f2fs_destroy_node_manager_caches();
3782 destroy_inodecache();
3787 static void __exit
exit_f2fs_fs(void)
3789 f2fs_destroy_post_read_processing();
3790 f2fs_destroy_root_stats();
3791 unregister_filesystem(&f2fs_fs_type
);
3792 unregister_shrinker(&f2fs_shrinker_info
);
3794 f2fs_destroy_extent_cache();
3795 f2fs_destroy_checkpoint_caches();
3796 f2fs_destroy_segment_manager_caches();
3797 f2fs_destroy_node_manager_caches();
3798 destroy_inodecache();
3799 f2fs_destroy_trace_ios();
3802 module_init(init_f2fs_fs
)
3803 module_exit(exit_f2fs_fs
)
3805 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
3806 MODULE_DESCRIPTION("Flash Friendly File System");
3807 MODULE_LICENSE("GPL");