1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/module.h>
9 #include <linux/init.h>
11 #include <linux/statfs.h>
12 #include <linux/buffer_head.h>
13 #include <linux/backing-dev.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/f2fs.h>
37 static struct kmem_cache
*f2fs_inode_cachep
;
39 #ifdef CONFIG_F2FS_FAULT_INJECTION
41 const char *f2fs_fault_name
[FAULT_MAX
] = {
42 [FAULT_KMALLOC
] = "kmalloc",
43 [FAULT_KVMALLOC
] = "kvmalloc",
44 [FAULT_PAGE_ALLOC
] = "page alloc",
45 [FAULT_PAGE_GET
] = "page get",
46 [FAULT_ALLOC_BIO
] = "alloc bio",
47 [FAULT_ALLOC_NID
] = "alloc nid",
48 [FAULT_ORPHAN
] = "orphan",
49 [FAULT_BLOCK
] = "no more block",
50 [FAULT_DIR_DEPTH
] = "too big dir depth",
51 [FAULT_EVICT_INODE
] = "evict_inode fail",
52 [FAULT_TRUNCATE
] = "truncate fail",
53 [FAULT_READ_IO
] = "read IO error",
54 [FAULT_CHECKPOINT
] = "checkpoint error",
55 [FAULT_DISCARD
] = "discard error",
56 [FAULT_WRITE_IO
] = "write IO error",
59 void f2fs_build_fault_attr(struct f2fs_sb_info
*sbi
, unsigned int rate
,
62 struct f2fs_fault_info
*ffi
= &F2FS_OPTION(sbi
).fault_info
;
65 atomic_set(&ffi
->inject_ops
, 0);
66 ffi
->inject_rate
= rate
;
70 ffi
->inject_type
= type
;
73 memset(ffi
, 0, sizeof(struct f2fs_fault_info
));
77 /* f2fs-wide shrinker description */
78 static struct shrinker f2fs_shrinker_info
= {
79 .scan_objects
= f2fs_shrink_scan
,
80 .count_objects
= f2fs_shrink_count
,
81 .seeks
= DEFAULT_SEEKS
,
86 Opt_disable_roll_forward
,
97 Opt_disable_ext_identify
,
100 Opt_inline_xattr_size
,
138 Opt_test_dummy_encryption
,
143 static match_table_t f2fs_tokens
= {
144 {Opt_gc_background
, "background_gc=%s"},
145 {Opt_disable_roll_forward
, "disable_roll_forward"},
146 {Opt_norecovery
, "norecovery"},
147 {Opt_discard
, "discard"},
148 {Opt_nodiscard
, "nodiscard"},
149 {Opt_noheap
, "no_heap"},
151 {Opt_user_xattr
, "user_xattr"},
152 {Opt_nouser_xattr
, "nouser_xattr"},
154 {Opt_noacl
, "noacl"},
155 {Opt_active_logs
, "active_logs=%u"},
156 {Opt_disable_ext_identify
, "disable_ext_identify"},
157 {Opt_inline_xattr
, "inline_xattr"},
158 {Opt_noinline_xattr
, "noinline_xattr"},
159 {Opt_inline_xattr_size
, "inline_xattr_size=%u"},
160 {Opt_inline_data
, "inline_data"},
161 {Opt_inline_dentry
, "inline_dentry"},
162 {Opt_noinline_dentry
, "noinline_dentry"},
163 {Opt_flush_merge
, "flush_merge"},
164 {Opt_noflush_merge
, "noflush_merge"},
165 {Opt_nobarrier
, "nobarrier"},
166 {Opt_fastboot
, "fastboot"},
167 {Opt_extent_cache
, "extent_cache"},
168 {Opt_noextent_cache
, "noextent_cache"},
169 {Opt_noinline_data
, "noinline_data"},
170 {Opt_data_flush
, "data_flush"},
171 {Opt_reserve_root
, "reserve_root=%u"},
172 {Opt_resgid
, "resgid=%u"},
173 {Opt_resuid
, "resuid=%u"},
174 {Opt_mode
, "mode=%s"},
175 {Opt_io_size_bits
, "io_bits=%u"},
176 {Opt_fault_injection
, "fault_injection=%u"},
177 {Opt_fault_type
, "fault_type=%u"},
178 {Opt_lazytime
, "lazytime"},
179 {Opt_nolazytime
, "nolazytime"},
180 {Opt_quota
, "quota"},
181 {Opt_noquota
, "noquota"},
182 {Opt_usrquota
, "usrquota"},
183 {Opt_grpquota
, "grpquota"},
184 {Opt_prjquota
, "prjquota"},
185 {Opt_usrjquota
, "usrjquota=%s"},
186 {Opt_grpjquota
, "grpjquota=%s"},
187 {Opt_prjjquota
, "prjjquota=%s"},
188 {Opt_offusrjquota
, "usrjquota="},
189 {Opt_offgrpjquota
, "grpjquota="},
190 {Opt_offprjjquota
, "prjjquota="},
191 {Opt_jqfmt_vfsold
, "jqfmt=vfsold"},
192 {Opt_jqfmt_vfsv0
, "jqfmt=vfsv0"},
193 {Opt_jqfmt_vfsv1
, "jqfmt=vfsv1"},
194 {Opt_whint
, "whint_mode=%s"},
195 {Opt_alloc
, "alloc_mode=%s"},
196 {Opt_fsync
, "fsync_mode=%s"},
197 {Opt_test_dummy_encryption
, "test_dummy_encryption"},
198 {Opt_checkpoint
, "checkpoint=%s"},
202 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
204 struct va_format vaf
;
210 printk("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
214 static inline void limit_reserve_root(struct f2fs_sb_info
*sbi
)
216 block_t limit
= (sbi
->user_block_count
<< 1) / 1000;
219 if (test_opt(sbi
, RESERVE_ROOT
) &&
220 F2FS_OPTION(sbi
).root_reserved_blocks
> limit
) {
221 F2FS_OPTION(sbi
).root_reserved_blocks
= limit
;
222 f2fs_msg(sbi
->sb
, KERN_INFO
,
223 "Reduce reserved blocks for root = %u",
224 F2FS_OPTION(sbi
).root_reserved_blocks
);
226 if (!test_opt(sbi
, RESERVE_ROOT
) &&
227 (!uid_eq(F2FS_OPTION(sbi
).s_resuid
,
228 make_kuid(&init_user_ns
, F2FS_DEF_RESUID
)) ||
229 !gid_eq(F2FS_OPTION(sbi
).s_resgid
,
230 make_kgid(&init_user_ns
, F2FS_DEF_RESGID
))))
231 f2fs_msg(sbi
->sb
, KERN_INFO
,
232 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
233 from_kuid_munged(&init_user_ns
,
234 F2FS_OPTION(sbi
).s_resuid
),
235 from_kgid_munged(&init_user_ns
,
236 F2FS_OPTION(sbi
).s_resgid
));
239 static void init_once(void *foo
)
241 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
243 inode_init_once(&fi
->vfs_inode
);
247 static const char * const quotatypes
[] = INITQFNAMES
;
248 #define QTYPE2NAME(t) (quotatypes[t])
249 static int f2fs_set_qf_name(struct super_block
*sb
, int qtype
,
252 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
256 if (sb_any_quota_loaded(sb
) && !F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
257 f2fs_msg(sb
, KERN_ERR
,
258 "Cannot change journaled "
259 "quota options when quota turned on");
262 if (f2fs_sb_has_quota_ino(sbi
)) {
263 f2fs_msg(sb
, KERN_INFO
,
264 "QUOTA feature is enabled, so ignore qf_name");
268 qname
= match_strdup(args
);
270 f2fs_msg(sb
, KERN_ERR
,
271 "Not enough memory for storing quotafile name");
274 if (F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
275 if (strcmp(F2FS_OPTION(sbi
).s_qf_names
[qtype
], qname
) == 0)
278 f2fs_msg(sb
, KERN_ERR
,
279 "%s quota file already specified",
283 if (strchr(qname
, '/')) {
284 f2fs_msg(sb
, KERN_ERR
,
285 "quotafile must be on filesystem root");
288 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = qname
;
296 static int f2fs_clear_qf_name(struct super_block
*sb
, int qtype
)
298 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
300 if (sb_any_quota_loaded(sb
) && F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
301 f2fs_msg(sb
, KERN_ERR
, "Cannot change journaled quota options"
302 " when quota turned on");
305 kvfree(F2FS_OPTION(sbi
).s_qf_names
[qtype
]);
306 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = NULL
;
310 static int f2fs_check_quota_options(struct f2fs_sb_info
*sbi
)
313 * We do the test below only for project quotas. 'usrquota' and
314 * 'grpquota' mount options are allowed even without quota feature
315 * to support legacy quotas in quota files.
317 if (test_opt(sbi
, PRJQUOTA
) && !f2fs_sb_has_project_quota(sbi
)) {
318 f2fs_msg(sbi
->sb
, KERN_ERR
, "Project quota feature not enabled. "
319 "Cannot enable project quota enforcement.");
322 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
] ||
323 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
] ||
324 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]) {
325 if (test_opt(sbi
, USRQUOTA
) &&
326 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
327 clear_opt(sbi
, USRQUOTA
);
329 if (test_opt(sbi
, GRPQUOTA
) &&
330 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
331 clear_opt(sbi
, GRPQUOTA
);
333 if (test_opt(sbi
, PRJQUOTA
) &&
334 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
335 clear_opt(sbi
, PRJQUOTA
);
337 if (test_opt(sbi
, GRPQUOTA
) || test_opt(sbi
, USRQUOTA
) ||
338 test_opt(sbi
, PRJQUOTA
)) {
339 f2fs_msg(sbi
->sb
, KERN_ERR
, "old and new quota "
344 if (!F2FS_OPTION(sbi
).s_jquota_fmt
) {
345 f2fs_msg(sbi
->sb
, KERN_ERR
, "journaled quota format "
351 if (f2fs_sb_has_quota_ino(sbi
) && F2FS_OPTION(sbi
).s_jquota_fmt
) {
352 f2fs_msg(sbi
->sb
, KERN_INFO
,
353 "QUOTA feature is enabled, so ignore jquota_fmt");
354 F2FS_OPTION(sbi
).s_jquota_fmt
= 0;
360 static int parse_options(struct super_block
*sb
, char *options
)
362 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
363 substring_t args
[MAX_OPT_ARGS
];
375 while ((p
= strsep(&options
, ",")) != NULL
) {
380 * Initialize args struct so we know whether arg was
381 * found; some options take optional arguments.
383 args
[0].to
= args
[0].from
= NULL
;
384 token
= match_token(p
, f2fs_tokens
, args
);
387 case Opt_gc_background
:
388 name
= match_strdup(&args
[0]);
392 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
394 clear_opt(sbi
, FORCE_FG_GC
);
395 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
396 clear_opt(sbi
, BG_GC
);
397 clear_opt(sbi
, FORCE_FG_GC
);
398 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
400 set_opt(sbi
, FORCE_FG_GC
);
407 case Opt_disable_roll_forward
:
408 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
411 /* this option mounts f2fs with ro */
412 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
413 if (!f2fs_readonly(sb
))
417 set_opt(sbi
, DISCARD
);
420 if (f2fs_sb_has_blkzoned(sbi
)) {
421 f2fs_msg(sb
, KERN_WARNING
,
422 "discard is required for zoned block devices");
425 clear_opt(sbi
, DISCARD
);
428 set_opt(sbi
, NOHEAP
);
431 clear_opt(sbi
, NOHEAP
);
433 #ifdef CONFIG_F2FS_FS_XATTR
435 set_opt(sbi
, XATTR_USER
);
437 case Opt_nouser_xattr
:
438 clear_opt(sbi
, XATTR_USER
);
440 case Opt_inline_xattr
:
441 set_opt(sbi
, INLINE_XATTR
);
443 case Opt_noinline_xattr
:
444 clear_opt(sbi
, INLINE_XATTR
);
446 case Opt_inline_xattr_size
:
447 if (args
->from
&& match_int(args
, &arg
))
449 set_opt(sbi
, INLINE_XATTR_SIZE
);
450 F2FS_OPTION(sbi
).inline_xattr_size
= arg
;
454 f2fs_msg(sb
, KERN_INFO
,
455 "user_xattr options not supported");
457 case Opt_nouser_xattr
:
458 f2fs_msg(sb
, KERN_INFO
,
459 "nouser_xattr options not supported");
461 case Opt_inline_xattr
:
462 f2fs_msg(sb
, KERN_INFO
,
463 "inline_xattr options not supported");
465 case Opt_noinline_xattr
:
466 f2fs_msg(sb
, KERN_INFO
,
467 "noinline_xattr options not supported");
470 #ifdef CONFIG_F2FS_FS_POSIX_ACL
472 set_opt(sbi
, POSIX_ACL
);
475 clear_opt(sbi
, POSIX_ACL
);
479 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
482 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
485 case Opt_active_logs
:
486 if (args
->from
&& match_int(args
, &arg
))
488 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
490 F2FS_OPTION(sbi
).active_logs
= arg
;
492 case Opt_disable_ext_identify
:
493 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
495 case Opt_inline_data
:
496 set_opt(sbi
, INLINE_DATA
);
498 case Opt_inline_dentry
:
499 set_opt(sbi
, INLINE_DENTRY
);
501 case Opt_noinline_dentry
:
502 clear_opt(sbi
, INLINE_DENTRY
);
504 case Opt_flush_merge
:
505 set_opt(sbi
, FLUSH_MERGE
);
507 case Opt_noflush_merge
:
508 clear_opt(sbi
, FLUSH_MERGE
);
511 set_opt(sbi
, NOBARRIER
);
514 set_opt(sbi
, FASTBOOT
);
516 case Opt_extent_cache
:
517 set_opt(sbi
, EXTENT_CACHE
);
519 case Opt_noextent_cache
:
520 clear_opt(sbi
, EXTENT_CACHE
);
522 case Opt_noinline_data
:
523 clear_opt(sbi
, INLINE_DATA
);
526 set_opt(sbi
, DATA_FLUSH
);
528 case Opt_reserve_root
:
529 if (args
->from
&& match_int(args
, &arg
))
531 if (test_opt(sbi
, RESERVE_ROOT
)) {
532 f2fs_msg(sb
, KERN_INFO
,
533 "Preserve previous reserve_root=%u",
534 F2FS_OPTION(sbi
).root_reserved_blocks
);
536 F2FS_OPTION(sbi
).root_reserved_blocks
= arg
;
537 set_opt(sbi
, RESERVE_ROOT
);
541 if (args
->from
&& match_int(args
, &arg
))
543 uid
= make_kuid(current_user_ns(), arg
);
544 if (!uid_valid(uid
)) {
545 f2fs_msg(sb
, KERN_ERR
,
546 "Invalid uid value %d", arg
);
549 F2FS_OPTION(sbi
).s_resuid
= uid
;
552 if (args
->from
&& match_int(args
, &arg
))
554 gid
= make_kgid(current_user_ns(), arg
);
555 if (!gid_valid(gid
)) {
556 f2fs_msg(sb
, KERN_ERR
,
557 "Invalid gid value %d", arg
);
560 F2FS_OPTION(sbi
).s_resgid
= gid
;
563 name
= match_strdup(&args
[0]);
567 if (strlen(name
) == 8 &&
568 !strncmp(name
, "adaptive", 8)) {
569 if (f2fs_sb_has_blkzoned(sbi
)) {
570 f2fs_msg(sb
, KERN_WARNING
,
571 "adaptive mode is not allowed with "
572 "zoned block device feature");
576 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
577 } else if (strlen(name
) == 3 &&
578 !strncmp(name
, "lfs", 3)) {
579 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
586 case Opt_io_size_bits
:
587 if (args
->from
&& match_int(args
, &arg
))
589 if (arg
<= 0 || arg
> __ilog2_u32(BIO_MAX_PAGES
)) {
590 f2fs_msg(sb
, KERN_WARNING
,
591 "Not support %d, larger than %d",
592 1 << arg
, BIO_MAX_PAGES
);
595 F2FS_OPTION(sbi
).write_io_size_bits
= arg
;
597 #ifdef CONFIG_F2FS_FAULT_INJECTION
598 case Opt_fault_injection
:
599 if (args
->from
&& match_int(args
, &arg
))
601 f2fs_build_fault_attr(sbi
, arg
, F2FS_ALL_FAULT_TYPE
);
602 set_opt(sbi
, FAULT_INJECTION
);
606 if (args
->from
&& match_int(args
, &arg
))
608 f2fs_build_fault_attr(sbi
, 0, arg
);
609 set_opt(sbi
, FAULT_INJECTION
);
612 case Opt_fault_injection
:
613 f2fs_msg(sb
, KERN_INFO
,
614 "fault_injection options not supported");
618 f2fs_msg(sb
, KERN_INFO
,
619 "fault_type options not supported");
623 sb
->s_flags
|= SB_LAZYTIME
;
626 sb
->s_flags
&= ~SB_LAZYTIME
;
631 set_opt(sbi
, USRQUOTA
);
634 set_opt(sbi
, GRPQUOTA
);
637 set_opt(sbi
, PRJQUOTA
);
640 ret
= f2fs_set_qf_name(sb
, USRQUOTA
, &args
[0]);
645 ret
= f2fs_set_qf_name(sb
, GRPQUOTA
, &args
[0]);
650 ret
= f2fs_set_qf_name(sb
, PRJQUOTA
, &args
[0]);
654 case Opt_offusrjquota
:
655 ret
= f2fs_clear_qf_name(sb
, USRQUOTA
);
659 case Opt_offgrpjquota
:
660 ret
= f2fs_clear_qf_name(sb
, GRPQUOTA
);
664 case Opt_offprjjquota
:
665 ret
= f2fs_clear_qf_name(sb
, PRJQUOTA
);
669 case Opt_jqfmt_vfsold
:
670 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_OLD
;
672 case Opt_jqfmt_vfsv0
:
673 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V0
;
675 case Opt_jqfmt_vfsv1
:
676 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V1
;
679 clear_opt(sbi
, QUOTA
);
680 clear_opt(sbi
, USRQUOTA
);
681 clear_opt(sbi
, GRPQUOTA
);
682 clear_opt(sbi
, PRJQUOTA
);
692 case Opt_offusrjquota
:
693 case Opt_offgrpjquota
:
694 case Opt_offprjjquota
:
695 case Opt_jqfmt_vfsold
:
696 case Opt_jqfmt_vfsv0
:
697 case Opt_jqfmt_vfsv1
:
699 f2fs_msg(sb
, KERN_INFO
,
700 "quota operations not supported");
704 name
= match_strdup(&args
[0]);
707 if (strlen(name
) == 10 &&
708 !strncmp(name
, "user-based", 10)) {
709 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_USER
;
710 } else if (strlen(name
) == 3 &&
711 !strncmp(name
, "off", 3)) {
712 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
713 } else if (strlen(name
) == 8 &&
714 !strncmp(name
, "fs-based", 8)) {
715 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_FS
;
723 name
= match_strdup(&args
[0]);
727 if (strlen(name
) == 7 &&
728 !strncmp(name
, "default", 7)) {
729 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
730 } else if (strlen(name
) == 5 &&
731 !strncmp(name
, "reuse", 5)) {
732 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
740 name
= match_strdup(&args
[0]);
743 if (strlen(name
) == 5 &&
744 !strncmp(name
, "posix", 5)) {
745 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
746 } else if (strlen(name
) == 6 &&
747 !strncmp(name
, "strict", 6)) {
748 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_STRICT
;
749 } else if (strlen(name
) == 9 &&
750 !strncmp(name
, "nobarrier", 9)) {
751 F2FS_OPTION(sbi
).fsync_mode
=
752 FSYNC_MODE_NOBARRIER
;
759 case Opt_test_dummy_encryption
:
760 #ifdef CONFIG_FS_ENCRYPTION
761 if (!f2fs_sb_has_encrypt(sbi
)) {
762 f2fs_msg(sb
, KERN_ERR
, "Encrypt feature is off");
766 F2FS_OPTION(sbi
).test_dummy_encryption
= true;
767 f2fs_msg(sb
, KERN_INFO
,
768 "Test dummy encryption mode enabled");
770 f2fs_msg(sb
, KERN_INFO
,
771 "Test dummy encryption mount option ignored");
775 name
= match_strdup(&args
[0]);
779 if (strlen(name
) == 6 &&
780 !strncmp(name
, "enable", 6)) {
781 clear_opt(sbi
, DISABLE_CHECKPOINT
);
782 } else if (strlen(name
) == 7 &&
783 !strncmp(name
, "disable", 7)) {
784 set_opt(sbi
, DISABLE_CHECKPOINT
);
792 f2fs_msg(sb
, KERN_ERR
,
793 "Unrecognized mount option \"%s\" or missing value",
799 if (f2fs_check_quota_options(sbi
))
802 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sbi
->sb
)) {
803 f2fs_msg(sbi
->sb
, KERN_INFO
,
804 "Filesystem with quota feature cannot be mounted RDWR "
805 "without CONFIG_QUOTA");
808 if (f2fs_sb_has_project_quota(sbi
) && !f2fs_readonly(sbi
->sb
)) {
809 f2fs_msg(sb
, KERN_ERR
,
810 "Filesystem with project quota feature cannot be "
811 "mounted RDWR without CONFIG_QUOTA");
816 if (F2FS_IO_SIZE_BITS(sbi
) && !test_opt(sbi
, LFS
)) {
817 f2fs_msg(sb
, KERN_ERR
,
818 "Should set mode=lfs with %uKB-sized IO",
819 F2FS_IO_SIZE_KB(sbi
));
823 if (test_opt(sbi
, INLINE_XATTR_SIZE
)) {
824 int min_size
, max_size
;
826 if (!f2fs_sb_has_extra_attr(sbi
) ||
827 !f2fs_sb_has_flexible_inline_xattr(sbi
)) {
828 f2fs_msg(sb
, KERN_ERR
,
829 "extra_attr or flexible_inline_xattr "
833 if (!test_opt(sbi
, INLINE_XATTR
)) {
834 f2fs_msg(sb
, KERN_ERR
,
835 "inline_xattr_size option should be "
836 "set with inline_xattr option");
840 min_size
= sizeof(struct f2fs_xattr_header
) / sizeof(__le32
);
841 max_size
= MAX_INLINE_XATTR_SIZE
;
843 if (F2FS_OPTION(sbi
).inline_xattr_size
< min_size
||
844 F2FS_OPTION(sbi
).inline_xattr_size
> max_size
) {
845 f2fs_msg(sb
, KERN_ERR
,
846 "inline xattr size is out of range: %d ~ %d",
852 if (test_opt(sbi
, DISABLE_CHECKPOINT
) && test_opt(sbi
, LFS
)) {
853 f2fs_msg(sb
, KERN_ERR
,
854 "LFS not compatible with checkpoint=disable\n");
858 /* Not pass down write hints if the number of active logs is lesser
859 * than NR_CURSEG_TYPE.
861 if (F2FS_OPTION(sbi
).active_logs
!= NR_CURSEG_TYPE
)
862 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
866 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
868 struct f2fs_inode_info
*fi
;
870 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
874 init_once((void *) fi
);
876 /* Initialize f2fs-specific inode info */
877 atomic_set(&fi
->dirty_pages
, 0);
878 init_rwsem(&fi
->i_sem
);
879 INIT_LIST_HEAD(&fi
->dirty_list
);
880 INIT_LIST_HEAD(&fi
->gdirty_list
);
881 INIT_LIST_HEAD(&fi
->inmem_ilist
);
882 INIT_LIST_HEAD(&fi
->inmem_pages
);
883 mutex_init(&fi
->inmem_lock
);
884 init_rwsem(&fi
->i_gc_rwsem
[READ
]);
885 init_rwsem(&fi
->i_gc_rwsem
[WRITE
]);
886 init_rwsem(&fi
->i_mmap_sem
);
887 init_rwsem(&fi
->i_xattr_sem
);
889 /* Will be used by directory only */
890 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
892 return &fi
->vfs_inode
;
895 static int f2fs_drop_inode(struct inode
*inode
)
899 * This is to avoid a deadlock condition like below.
900 * writeback_single_inode(inode)
901 * - f2fs_write_data_page
902 * - f2fs_gc -> iput -> evict
903 * - inode_wait_for_writeback(inode)
905 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
906 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
907 /* to avoid evict_inode call simultaneously */
908 atomic_inc(&inode
->i_count
);
909 spin_unlock(&inode
->i_lock
);
911 /* some remained atomic pages should discarded */
912 if (f2fs_is_atomic_file(inode
))
913 f2fs_drop_inmem_pages(inode
);
915 /* should remain fi->extent_tree for writepage */
916 f2fs_destroy_extent_node(inode
);
918 sb_start_intwrite(inode
->i_sb
);
919 f2fs_i_size_write(inode
, 0);
921 f2fs_submit_merged_write_cond(F2FS_I_SB(inode
),
922 inode
, NULL
, 0, DATA
);
923 truncate_inode_pages_final(inode
->i_mapping
);
925 if (F2FS_HAS_BLOCKS(inode
))
926 f2fs_truncate(inode
);
928 sb_end_intwrite(inode
->i_sb
);
930 spin_lock(&inode
->i_lock
);
931 atomic_dec(&inode
->i_count
);
933 trace_f2fs_drop_inode(inode
, 0);
936 ret
= generic_drop_inode(inode
);
937 trace_f2fs_drop_inode(inode
, ret
);
941 int f2fs_inode_dirtied(struct inode
*inode
, bool sync
)
943 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
946 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
947 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
950 set_inode_flag(inode
, FI_DIRTY_INODE
);
951 stat_inc_dirty_inode(sbi
, DIRTY_META
);
953 if (sync
&& list_empty(&F2FS_I(inode
)->gdirty_list
)) {
954 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
955 &sbi
->inode_list
[DIRTY_META
]);
956 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
958 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
962 void f2fs_inode_synced(struct inode
*inode
)
964 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
966 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
967 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
968 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
971 if (!list_empty(&F2FS_I(inode
)->gdirty_list
)) {
972 list_del_init(&F2FS_I(inode
)->gdirty_list
);
973 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
975 clear_inode_flag(inode
, FI_DIRTY_INODE
);
976 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
977 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
978 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
982 * f2fs_dirty_inode() is called from __mark_inode_dirty()
984 * We should call set_dirty_inode to write the dirty inode through write_inode.
986 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
988 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
990 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
991 inode
->i_ino
== F2FS_META_INO(sbi
))
994 if (flags
== I_DIRTY_TIME
)
997 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
998 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
1000 f2fs_inode_dirtied(inode
, false);
1003 static void f2fs_free_inode(struct inode
*inode
)
1005 fscrypt_free_inode(inode
);
1006 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
1009 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
1011 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
1012 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
1015 static void destroy_device_list(struct f2fs_sb_info
*sbi
)
1019 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
1020 blkdev_put(FDEV(i
).bdev
, FMODE_EXCL
);
1021 #ifdef CONFIG_BLK_DEV_ZONED
1022 kvfree(FDEV(i
).blkz_seq
);
1028 static void f2fs_put_super(struct super_block
*sb
)
1030 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1034 f2fs_quota_off_umount(sb
);
1036 /* prevent remaining shrinker jobs */
1037 mutex_lock(&sbi
->umount_mutex
);
1040 * We don't need to do checkpoint when superblock is clean.
1041 * But, the previous checkpoint was not done by umount, it needs to do
1042 * clean checkpoint again.
1044 if ((is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
1045 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
))) {
1046 struct cp_control cpc
= {
1047 .reason
= CP_UMOUNT
,
1049 f2fs_write_checkpoint(sbi
, &cpc
);
1052 /* be sure to wait for any on-going discard commands */
1053 dropped
= f2fs_issue_discard_timeout(sbi
);
1055 if ((f2fs_hw_support_discard(sbi
) || f2fs_hw_should_discard(sbi
)) &&
1056 !sbi
->discard_blks
&& !dropped
) {
1057 struct cp_control cpc
= {
1058 .reason
= CP_UMOUNT
| CP_TRIMMED
,
1060 f2fs_write_checkpoint(sbi
, &cpc
);
1064 * normally superblock is clean, so we need to release this.
1065 * In addition, EIO will skip do checkpoint, we need this as well.
1067 f2fs_release_ino_entry(sbi
, true);
1069 f2fs_leave_shrinker(sbi
);
1070 mutex_unlock(&sbi
->umount_mutex
);
1072 /* our cp_error case, we can wait for any writeback page */
1073 f2fs_flush_merged_writes(sbi
);
1075 f2fs_wait_on_all_pages_writeback(sbi
);
1077 f2fs_bug_on(sbi
, sbi
->fsync_node_num
);
1079 iput(sbi
->node_inode
);
1080 sbi
->node_inode
= NULL
;
1082 iput(sbi
->meta_inode
);
1083 sbi
->meta_inode
= NULL
;
1086 * iput() can update stat information, if f2fs_write_checkpoint()
1087 * above failed with error.
1089 f2fs_destroy_stats(sbi
);
1091 /* destroy f2fs internal modules */
1092 f2fs_destroy_node_manager(sbi
);
1093 f2fs_destroy_segment_manager(sbi
);
1097 f2fs_unregister_sysfs(sbi
);
1099 sb
->s_fs_info
= NULL
;
1100 if (sbi
->s_chksum_driver
)
1101 crypto_free_shash(sbi
->s_chksum_driver
);
1102 kvfree(sbi
->raw_super
);
1104 destroy_device_list(sbi
);
1105 mempool_destroy(sbi
->write_io_dummy
);
1107 for (i
= 0; i
< MAXQUOTAS
; i
++)
1108 kvfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
1110 destroy_percpu_info(sbi
);
1111 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
1112 kvfree(sbi
->write_io
[i
]);
1116 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
1118 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1121 if (unlikely(f2fs_cp_error(sbi
)))
1123 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
1126 trace_f2fs_sync_fs(sb
, sync
);
1128 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1132 struct cp_control cpc
;
1134 cpc
.reason
= __get_cp_reason(sbi
);
1136 mutex_lock(&sbi
->gc_mutex
);
1137 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1138 mutex_unlock(&sbi
->gc_mutex
);
1140 f2fs_trace_ios(NULL
, 1);
1145 static int f2fs_freeze(struct super_block
*sb
)
1147 if (f2fs_readonly(sb
))
1150 /* IO error happened before */
1151 if (unlikely(f2fs_cp_error(F2FS_SB(sb
))))
1154 /* must be clean, since sync_filesystem() was already called */
1155 if (is_sbi_flag_set(F2FS_SB(sb
), SBI_IS_DIRTY
))
1160 static int f2fs_unfreeze(struct super_block
*sb
)
1166 static int f2fs_statfs_project(struct super_block
*sb
,
1167 kprojid_t projid
, struct kstatfs
*buf
)
1170 struct dquot
*dquot
;
1174 qid
= make_kqid_projid(projid
);
1175 dquot
= dqget(sb
, qid
);
1177 return PTR_ERR(dquot
);
1178 spin_lock(&dquot
->dq_dqb_lock
);
1180 limit
= (dquot
->dq_dqb
.dqb_bsoftlimit
?
1181 dquot
->dq_dqb
.dqb_bsoftlimit
:
1182 dquot
->dq_dqb
.dqb_bhardlimit
) >> sb
->s_blocksize_bits
;
1183 if (limit
&& buf
->f_blocks
> limit
) {
1184 curblock
= dquot
->dq_dqb
.dqb_curspace
>> sb
->s_blocksize_bits
;
1185 buf
->f_blocks
= limit
;
1186 buf
->f_bfree
= buf
->f_bavail
=
1187 (buf
->f_blocks
> curblock
) ?
1188 (buf
->f_blocks
- curblock
) : 0;
1191 limit
= dquot
->dq_dqb
.dqb_isoftlimit
?
1192 dquot
->dq_dqb
.dqb_isoftlimit
:
1193 dquot
->dq_dqb
.dqb_ihardlimit
;
1194 if (limit
&& buf
->f_files
> limit
) {
1195 buf
->f_files
= limit
;
1197 (buf
->f_files
> dquot
->dq_dqb
.dqb_curinodes
) ?
1198 (buf
->f_files
- dquot
->dq_dqb
.dqb_curinodes
) : 0;
1201 spin_unlock(&dquot
->dq_dqb_lock
);
1207 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1209 struct super_block
*sb
= dentry
->d_sb
;
1210 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1211 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
1212 block_t total_count
, user_block_count
, start_count
;
1213 u64 avail_node_count
;
1215 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
1216 user_block_count
= sbi
->user_block_count
;
1217 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
1218 buf
->f_type
= F2FS_SUPER_MAGIC
;
1219 buf
->f_bsize
= sbi
->blocksize
;
1221 buf
->f_blocks
= total_count
- start_count
;
1222 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) -
1223 sbi
->current_reserved_blocks
;
1225 spin_lock(&sbi
->stat_lock
);
1226 if (unlikely(buf
->f_bfree
<= sbi
->unusable_block_count
))
1229 buf
->f_bfree
-= sbi
->unusable_block_count
;
1230 spin_unlock(&sbi
->stat_lock
);
1232 if (buf
->f_bfree
> F2FS_OPTION(sbi
).root_reserved_blocks
)
1233 buf
->f_bavail
= buf
->f_bfree
-
1234 F2FS_OPTION(sbi
).root_reserved_blocks
;
1238 avail_node_count
= sbi
->total_node_count
- sbi
->nquota_files
-
1239 F2FS_RESERVED_NODE_NUM
;
1241 if (avail_node_count
> user_block_count
) {
1242 buf
->f_files
= user_block_count
;
1243 buf
->f_ffree
= buf
->f_bavail
;
1245 buf
->f_files
= avail_node_count
;
1246 buf
->f_ffree
= min(avail_node_count
- valid_node_count(sbi
),
1250 buf
->f_namelen
= F2FS_NAME_LEN
;
1251 buf
->f_fsid
.val
[0] = (u32
)id
;
1252 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
1255 if (is_inode_flag_set(dentry
->d_inode
, FI_PROJ_INHERIT
) &&
1256 sb_has_quota_limits_enabled(sb
, PRJQUOTA
)) {
1257 f2fs_statfs_project(sb
, F2FS_I(dentry
->d_inode
)->i_projid
, buf
);
1263 static inline void f2fs_show_quota_options(struct seq_file
*seq
,
1264 struct super_block
*sb
)
1267 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1269 if (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1272 switch (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1283 seq_printf(seq
, ",jqfmt=%s", fmtname
);
1286 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
1287 seq_show_option(seq
, "usrjquota",
1288 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
]);
1290 if (F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
1291 seq_show_option(seq
, "grpjquota",
1292 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
]);
1294 if (F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
1295 seq_show_option(seq
, "prjjquota",
1296 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]);
1300 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
1302 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
1304 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
1305 if (test_opt(sbi
, FORCE_FG_GC
))
1306 seq_printf(seq
, ",background_gc=%s", "sync");
1308 seq_printf(seq
, ",background_gc=%s", "on");
1310 seq_printf(seq
, ",background_gc=%s", "off");
1312 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
1313 seq_puts(seq
, ",disable_roll_forward");
1314 if (test_opt(sbi
, DISCARD
))
1315 seq_puts(seq
, ",discard");
1316 if (test_opt(sbi
, NOHEAP
))
1317 seq_puts(seq
, ",no_heap");
1319 seq_puts(seq
, ",heap");
1320 #ifdef CONFIG_F2FS_FS_XATTR
1321 if (test_opt(sbi
, XATTR_USER
))
1322 seq_puts(seq
, ",user_xattr");
1324 seq_puts(seq
, ",nouser_xattr");
1325 if (test_opt(sbi
, INLINE_XATTR
))
1326 seq_puts(seq
, ",inline_xattr");
1328 seq_puts(seq
, ",noinline_xattr");
1329 if (test_opt(sbi
, INLINE_XATTR_SIZE
))
1330 seq_printf(seq
, ",inline_xattr_size=%u",
1331 F2FS_OPTION(sbi
).inline_xattr_size
);
1333 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1334 if (test_opt(sbi
, POSIX_ACL
))
1335 seq_puts(seq
, ",acl");
1337 seq_puts(seq
, ",noacl");
1339 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
1340 seq_puts(seq
, ",disable_ext_identify");
1341 if (test_opt(sbi
, INLINE_DATA
))
1342 seq_puts(seq
, ",inline_data");
1344 seq_puts(seq
, ",noinline_data");
1345 if (test_opt(sbi
, INLINE_DENTRY
))
1346 seq_puts(seq
, ",inline_dentry");
1348 seq_puts(seq
, ",noinline_dentry");
1349 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
1350 seq_puts(seq
, ",flush_merge");
1351 if (test_opt(sbi
, NOBARRIER
))
1352 seq_puts(seq
, ",nobarrier");
1353 if (test_opt(sbi
, FASTBOOT
))
1354 seq_puts(seq
, ",fastboot");
1355 if (test_opt(sbi
, EXTENT_CACHE
))
1356 seq_puts(seq
, ",extent_cache");
1358 seq_puts(seq
, ",noextent_cache");
1359 if (test_opt(sbi
, DATA_FLUSH
))
1360 seq_puts(seq
, ",data_flush");
1362 seq_puts(seq
, ",mode=");
1363 if (test_opt(sbi
, ADAPTIVE
))
1364 seq_puts(seq
, "adaptive");
1365 else if (test_opt(sbi
, LFS
))
1366 seq_puts(seq
, "lfs");
1367 seq_printf(seq
, ",active_logs=%u", F2FS_OPTION(sbi
).active_logs
);
1368 if (test_opt(sbi
, RESERVE_ROOT
))
1369 seq_printf(seq
, ",reserve_root=%u,resuid=%u,resgid=%u",
1370 F2FS_OPTION(sbi
).root_reserved_blocks
,
1371 from_kuid_munged(&init_user_ns
,
1372 F2FS_OPTION(sbi
).s_resuid
),
1373 from_kgid_munged(&init_user_ns
,
1374 F2FS_OPTION(sbi
).s_resgid
));
1375 if (F2FS_IO_SIZE_BITS(sbi
))
1376 seq_printf(seq
, ",io_bits=%u",
1377 F2FS_OPTION(sbi
).write_io_size_bits
);
1378 #ifdef CONFIG_F2FS_FAULT_INJECTION
1379 if (test_opt(sbi
, FAULT_INJECTION
)) {
1380 seq_printf(seq
, ",fault_injection=%u",
1381 F2FS_OPTION(sbi
).fault_info
.inject_rate
);
1382 seq_printf(seq
, ",fault_type=%u",
1383 F2FS_OPTION(sbi
).fault_info
.inject_type
);
1387 if (test_opt(sbi
, QUOTA
))
1388 seq_puts(seq
, ",quota");
1389 if (test_opt(sbi
, USRQUOTA
))
1390 seq_puts(seq
, ",usrquota");
1391 if (test_opt(sbi
, GRPQUOTA
))
1392 seq_puts(seq
, ",grpquota");
1393 if (test_opt(sbi
, PRJQUOTA
))
1394 seq_puts(seq
, ",prjquota");
1396 f2fs_show_quota_options(seq
, sbi
->sb
);
1397 if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_USER
)
1398 seq_printf(seq
, ",whint_mode=%s", "user-based");
1399 else if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_FS
)
1400 seq_printf(seq
, ",whint_mode=%s", "fs-based");
1401 #ifdef CONFIG_FS_ENCRYPTION
1402 if (F2FS_OPTION(sbi
).test_dummy_encryption
)
1403 seq_puts(seq
, ",test_dummy_encryption");
1406 if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_DEFAULT
)
1407 seq_printf(seq
, ",alloc_mode=%s", "default");
1408 else if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_REUSE
)
1409 seq_printf(seq
, ",alloc_mode=%s", "reuse");
1411 if (test_opt(sbi
, DISABLE_CHECKPOINT
))
1412 seq_puts(seq
, ",checkpoint=disable");
1414 if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_POSIX
)
1415 seq_printf(seq
, ",fsync_mode=%s", "posix");
1416 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
)
1417 seq_printf(seq
, ",fsync_mode=%s", "strict");
1418 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_NOBARRIER
)
1419 seq_printf(seq
, ",fsync_mode=%s", "nobarrier");
1423 static void default_options(struct f2fs_sb_info
*sbi
)
1425 /* init some FS parameters */
1426 F2FS_OPTION(sbi
).active_logs
= NR_CURSEG_TYPE
;
1427 F2FS_OPTION(sbi
).inline_xattr_size
= DEFAULT_INLINE_XATTR_ADDRS
;
1428 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
1429 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
1430 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
1431 F2FS_OPTION(sbi
).test_dummy_encryption
= false;
1432 F2FS_OPTION(sbi
).s_resuid
= make_kuid(&init_user_ns
, F2FS_DEF_RESUID
);
1433 F2FS_OPTION(sbi
).s_resgid
= make_kgid(&init_user_ns
, F2FS_DEF_RESGID
);
1435 set_opt(sbi
, BG_GC
);
1436 set_opt(sbi
, INLINE_XATTR
);
1437 set_opt(sbi
, INLINE_DATA
);
1438 set_opt(sbi
, INLINE_DENTRY
);
1439 set_opt(sbi
, EXTENT_CACHE
);
1440 set_opt(sbi
, NOHEAP
);
1441 clear_opt(sbi
, DISABLE_CHECKPOINT
);
1442 sbi
->sb
->s_flags
|= SB_LAZYTIME
;
1443 set_opt(sbi
, FLUSH_MERGE
);
1444 set_opt(sbi
, DISCARD
);
1445 if (f2fs_sb_has_blkzoned(sbi
))
1446 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
1448 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
1450 #ifdef CONFIG_F2FS_FS_XATTR
1451 set_opt(sbi
, XATTR_USER
);
1453 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1454 set_opt(sbi
, POSIX_ACL
);
1457 f2fs_build_fault_attr(sbi
, 0, 0);
1461 static int f2fs_enable_quotas(struct super_block
*sb
);
1464 static int f2fs_disable_checkpoint(struct f2fs_sb_info
*sbi
)
1466 unsigned int s_flags
= sbi
->sb
->s_flags
;
1467 struct cp_control cpc
;
1471 if (s_flags
& SB_RDONLY
) {
1472 f2fs_msg(sbi
->sb
, KERN_ERR
,
1473 "checkpoint=disable on readonly fs");
1476 sbi
->sb
->s_flags
|= SB_ACTIVE
;
1478 f2fs_update_time(sbi
, DISABLE_TIME
);
1480 while (!f2fs_time_over(sbi
, DISABLE_TIME
)) {
1481 mutex_lock(&sbi
->gc_mutex
);
1482 err
= f2fs_gc(sbi
, true, false, NULL_SEGNO
);
1483 if (err
== -ENODATA
) {
1487 if (err
&& err
!= -EAGAIN
)
1491 ret
= sync_filesystem(sbi
->sb
);
1493 err
= ret
? ret
: err
;
1497 if (f2fs_disable_cp_again(sbi
)) {
1502 mutex_lock(&sbi
->gc_mutex
);
1503 cpc
.reason
= CP_PAUSE
;
1504 set_sbi_flag(sbi
, SBI_CP_DISABLED
);
1505 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1509 spin_lock(&sbi
->stat_lock
);
1510 sbi
->unusable_block_count
= 0;
1511 spin_unlock(&sbi
->stat_lock
);
1514 mutex_unlock(&sbi
->gc_mutex
);
1516 sbi
->sb
->s_flags
= s_flags
; /* Restore MS_RDONLY status */
1520 static void f2fs_enable_checkpoint(struct f2fs_sb_info
*sbi
)
1522 mutex_lock(&sbi
->gc_mutex
);
1523 f2fs_dirty_to_prefree(sbi
);
1525 clear_sbi_flag(sbi
, SBI_CP_DISABLED
);
1526 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1527 mutex_unlock(&sbi
->gc_mutex
);
1529 f2fs_sync_fs(sbi
->sb
, 1);
1532 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
1534 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1535 struct f2fs_mount_info org_mount_opt
;
1536 unsigned long old_sb_flags
;
1538 bool need_restart_gc
= false;
1539 bool need_stop_gc
= false;
1540 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
1541 bool disable_checkpoint
= test_opt(sbi
, DISABLE_CHECKPOINT
);
1542 bool checkpoint_changed
;
1548 * Save the old mount options in case we
1549 * need to restore them.
1551 org_mount_opt
= sbi
->mount_opt
;
1552 old_sb_flags
= sb
->s_flags
;
1555 org_mount_opt
.s_jquota_fmt
= F2FS_OPTION(sbi
).s_jquota_fmt
;
1556 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1557 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
1558 org_mount_opt
.s_qf_names
[i
] =
1559 kstrdup(F2FS_OPTION(sbi
).s_qf_names
[i
],
1561 if (!org_mount_opt
.s_qf_names
[i
]) {
1562 for (j
= 0; j
< i
; j
++)
1563 kvfree(org_mount_opt
.s_qf_names
[j
]);
1567 org_mount_opt
.s_qf_names
[i
] = NULL
;
1572 /* recover superblocks we couldn't write due to previous RO mount */
1573 if (!(*flags
& SB_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
1574 err
= f2fs_commit_super(sbi
, false);
1575 f2fs_msg(sb
, KERN_INFO
,
1576 "Try to recover all the superblocks, ret: %d", err
);
1578 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1581 default_options(sbi
);
1583 /* parse mount options */
1584 err
= parse_options(sb
, data
);
1587 checkpoint_changed
=
1588 disable_checkpoint
!= test_opt(sbi
, DISABLE_CHECKPOINT
);
1591 * Previous and new state of filesystem is RO,
1592 * so skip checking GC and FLUSH_MERGE conditions.
1594 if (f2fs_readonly(sb
) && (*flags
& SB_RDONLY
))
1598 if (!f2fs_readonly(sb
) && (*flags
& SB_RDONLY
)) {
1599 err
= dquot_suspend(sb
, -1);
1602 } else if (f2fs_readonly(sb
) && !(*flags
& SB_RDONLY
)) {
1603 /* dquot_resume needs RW */
1604 sb
->s_flags
&= ~SB_RDONLY
;
1605 if (sb_any_quota_suspended(sb
)) {
1606 dquot_resume(sb
, -1);
1607 } else if (f2fs_sb_has_quota_ino(sbi
)) {
1608 err
= f2fs_enable_quotas(sb
);
1614 /* disallow enable/disable extent_cache dynamically */
1615 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
1617 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1618 "switch extent_cache option is not allowed");
1622 if ((*flags
& SB_RDONLY
) && test_opt(sbi
, DISABLE_CHECKPOINT
)) {
1624 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1625 "disabling checkpoint not compatible with read-only");
1630 * We stop the GC thread if FS is mounted as RO
1631 * or if background_gc = off is passed in mount
1632 * option. Also sync the filesystem.
1634 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
1635 if (sbi
->gc_thread
) {
1636 f2fs_stop_gc_thread(sbi
);
1637 need_restart_gc
= true;
1639 } else if (!sbi
->gc_thread
) {
1640 err
= f2fs_start_gc_thread(sbi
);
1643 need_stop_gc
= true;
1646 if (*flags
& SB_RDONLY
||
1647 F2FS_OPTION(sbi
).whint_mode
!= org_mount_opt
.whint_mode
) {
1648 writeback_inodes_sb(sb
, WB_REASON_SYNC
);
1651 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1652 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
1653 f2fs_sync_fs(sb
, 1);
1654 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
1657 if (checkpoint_changed
) {
1658 if (test_opt(sbi
, DISABLE_CHECKPOINT
)) {
1659 err
= f2fs_disable_checkpoint(sbi
);
1663 f2fs_enable_checkpoint(sbi
);
1668 * We stop issue flush thread if FS is mounted as RO
1669 * or if flush_merge is not passed in mount option.
1671 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
1672 clear_opt(sbi
, FLUSH_MERGE
);
1673 f2fs_destroy_flush_cmd_control(sbi
, false);
1675 err
= f2fs_create_flush_cmd_control(sbi
);
1681 /* Release old quota file names */
1682 for (i
= 0; i
< MAXQUOTAS
; i
++)
1683 kvfree(org_mount_opt
.s_qf_names
[i
]);
1685 /* Update the POSIXACL Flag */
1686 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
1687 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
1689 limit_reserve_root(sbi
);
1690 *flags
= (*flags
& ~SB_LAZYTIME
) | (sb
->s_flags
& SB_LAZYTIME
);
1693 if (need_restart_gc
) {
1694 if (f2fs_start_gc_thread(sbi
))
1695 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1696 "background gc thread has stopped");
1697 } else if (need_stop_gc
) {
1698 f2fs_stop_gc_thread(sbi
);
1702 F2FS_OPTION(sbi
).s_jquota_fmt
= org_mount_opt
.s_jquota_fmt
;
1703 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1704 kvfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
1705 F2FS_OPTION(sbi
).s_qf_names
[i
] = org_mount_opt
.s_qf_names
[i
];
1708 sbi
->mount_opt
= org_mount_opt
;
1709 sb
->s_flags
= old_sb_flags
;
1714 /* Read data from quotafile */
1715 static ssize_t
f2fs_quota_read(struct super_block
*sb
, int type
, char *data
,
1716 size_t len
, loff_t off
)
1718 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1719 struct address_space
*mapping
= inode
->i_mapping
;
1720 block_t blkidx
= F2FS_BYTES_TO_BLK(off
);
1721 int offset
= off
& (sb
->s_blocksize
- 1);
1724 loff_t i_size
= i_size_read(inode
);
1731 if (off
+ len
> i_size
)
1734 while (toread
> 0) {
1735 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
, toread
);
1737 page
= read_cache_page_gfp(mapping
, blkidx
, GFP_NOFS
);
1739 if (PTR_ERR(page
) == -ENOMEM
) {
1740 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1743 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
1744 return PTR_ERR(page
);
1749 if (unlikely(page
->mapping
!= mapping
)) {
1750 f2fs_put_page(page
, 1);
1753 if (unlikely(!PageUptodate(page
))) {
1754 f2fs_put_page(page
, 1);
1755 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
1759 kaddr
= kmap_atomic(page
);
1760 memcpy(data
, kaddr
+ offset
, tocopy
);
1761 kunmap_atomic(kaddr
);
1762 f2fs_put_page(page
, 1);
1772 /* Write to quotafile */
1773 static ssize_t
f2fs_quota_write(struct super_block
*sb
, int type
,
1774 const char *data
, size_t len
, loff_t off
)
1776 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1777 struct address_space
*mapping
= inode
->i_mapping
;
1778 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
1779 int offset
= off
& (sb
->s_blocksize
- 1);
1780 size_t towrite
= len
;
1786 while (towrite
> 0) {
1787 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
,
1790 err
= a_ops
->write_begin(NULL
, mapping
, off
, tocopy
, 0,
1792 if (unlikely(err
)) {
1793 if (err
== -ENOMEM
) {
1794 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1797 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
1801 kaddr
= kmap_atomic(page
);
1802 memcpy(kaddr
+ offset
, data
, tocopy
);
1803 kunmap_atomic(kaddr
);
1804 flush_dcache_page(page
);
1806 a_ops
->write_end(NULL
, mapping
, off
, tocopy
, tocopy
,
1817 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1818 f2fs_mark_inode_dirty_sync(inode
, false);
1819 return len
- towrite
;
1822 static struct dquot
**f2fs_get_dquots(struct inode
*inode
)
1824 return F2FS_I(inode
)->i_dquot
;
1827 static qsize_t
*f2fs_get_reserved_space(struct inode
*inode
)
1829 return &F2FS_I(inode
)->i_reserved_quota
;
1832 static int f2fs_quota_on_mount(struct f2fs_sb_info
*sbi
, int type
)
1834 if (is_set_ckpt_flags(sbi
, CP_QUOTA_NEED_FSCK_FLAG
)) {
1835 f2fs_msg(sbi
->sb
, KERN_ERR
,
1836 "quota sysfile may be corrupted, skip loading it");
1840 return dquot_quota_on_mount(sbi
->sb
, F2FS_OPTION(sbi
).s_qf_names
[type
],
1841 F2FS_OPTION(sbi
).s_jquota_fmt
, type
);
1844 int f2fs_enable_quota_files(struct f2fs_sb_info
*sbi
, bool rdonly
)
1849 if (f2fs_sb_has_quota_ino(sbi
) && rdonly
) {
1850 err
= f2fs_enable_quotas(sbi
->sb
);
1852 f2fs_msg(sbi
->sb
, KERN_ERR
,
1853 "Cannot turn on quota_ino: %d", err
);
1859 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1860 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
1861 err
= f2fs_quota_on_mount(sbi
, i
);
1866 f2fs_msg(sbi
->sb
, KERN_ERR
,
1867 "Cannot turn on quotas: %d on %d", err
, i
);
1873 static int f2fs_quota_enable(struct super_block
*sb
, int type
, int format_id
,
1876 struct inode
*qf_inode
;
1877 unsigned long qf_inum
;
1880 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb
)));
1882 qf_inum
= f2fs_qf_ino(sb
, type
);
1886 qf_inode
= f2fs_iget(sb
, qf_inum
);
1887 if (IS_ERR(qf_inode
)) {
1888 f2fs_msg(sb
, KERN_ERR
,
1889 "Bad quota inode %u:%lu", type
, qf_inum
);
1890 return PTR_ERR(qf_inode
);
1893 /* Don't account quota for quota files to avoid recursion */
1894 qf_inode
->i_flags
|= S_NOQUOTA
;
1895 err
= dquot_enable(qf_inode
, type
, format_id
, flags
);
1900 static int f2fs_enable_quotas(struct super_block
*sb
)
1903 unsigned long qf_inum
;
1904 bool quota_mopt
[MAXQUOTAS
] = {
1905 test_opt(F2FS_SB(sb
), USRQUOTA
),
1906 test_opt(F2FS_SB(sb
), GRPQUOTA
),
1907 test_opt(F2FS_SB(sb
), PRJQUOTA
),
1910 if (is_set_ckpt_flags(F2FS_SB(sb
), CP_QUOTA_NEED_FSCK_FLAG
)) {
1911 f2fs_msg(sb
, KERN_ERR
,
1912 "quota file may be corrupted, skip loading it");
1916 sb_dqopt(sb
)->flags
|= DQUOT_QUOTA_SYS_FILE
;
1918 for (type
= 0; type
< MAXQUOTAS
; type
++) {
1919 qf_inum
= f2fs_qf_ino(sb
, type
);
1921 err
= f2fs_quota_enable(sb
, type
, QFMT_VFS_V1
,
1922 DQUOT_USAGE_ENABLED
|
1923 (quota_mopt
[type
] ? DQUOT_LIMITS_ENABLED
: 0));
1925 f2fs_msg(sb
, KERN_ERR
,
1926 "Failed to enable quota tracking "
1927 "(type=%d, err=%d). Please run "
1928 "fsck to fix.", type
, err
);
1929 for (type
--; type
>= 0; type
--)
1930 dquot_quota_off(sb
, type
);
1931 set_sbi_flag(F2FS_SB(sb
),
1932 SBI_QUOTA_NEED_REPAIR
);
1940 int f2fs_quota_sync(struct super_block
*sb
, int type
)
1942 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1943 struct quota_info
*dqopt
= sb_dqopt(sb
);
1947 ret
= dquot_writeback_dquots(sb
, type
);
1952 * Now when everything is written we can discard the pagecache so
1953 * that userspace sees the changes.
1955 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1956 struct address_space
*mapping
;
1958 if (type
!= -1 && cnt
!= type
)
1960 if (!sb_has_quota_active(sb
, cnt
))
1963 mapping
= dqopt
->files
[cnt
]->i_mapping
;
1965 ret
= filemap_fdatawrite(mapping
);
1969 /* if we are using journalled quota */
1970 if (is_journalled_quota(sbi
))
1973 ret
= filemap_fdatawait(mapping
);
1975 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
1977 inode_lock(dqopt
->files
[cnt
]);
1978 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
1979 inode_unlock(dqopt
->files
[cnt
]);
1983 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
1987 static int f2fs_quota_on(struct super_block
*sb
, int type
, int format_id
,
1988 const struct path
*path
)
1990 struct inode
*inode
;
1993 err
= f2fs_quota_sync(sb
, type
);
1997 err
= dquot_quota_on(sb
, type
, format_id
, path
);
2001 inode
= d_inode(path
->dentry
);
2004 F2FS_I(inode
)->i_flags
|= F2FS_NOATIME_FL
| F2FS_IMMUTABLE_FL
;
2005 f2fs_set_inode_flags(inode
);
2006 inode_unlock(inode
);
2007 f2fs_mark_inode_dirty_sync(inode
, false);
2012 static int f2fs_quota_off(struct super_block
*sb
, int type
)
2014 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
2017 if (!inode
|| !igrab(inode
))
2018 return dquot_quota_off(sb
, type
);
2020 err
= f2fs_quota_sync(sb
, type
);
2024 err
= dquot_quota_off(sb
, type
);
2025 if (err
|| f2fs_sb_has_quota_ino(F2FS_SB(sb
)))
2029 F2FS_I(inode
)->i_flags
&= ~(F2FS_NOATIME_FL
| F2FS_IMMUTABLE_FL
);
2030 f2fs_set_inode_flags(inode
);
2031 inode_unlock(inode
);
2032 f2fs_mark_inode_dirty_sync(inode
, false);
2038 void f2fs_quota_off_umount(struct super_block
*sb
)
2043 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2044 err
= f2fs_quota_off(sb
, type
);
2046 int ret
= dquot_quota_off(sb
, type
);
2048 f2fs_msg(sb
, KERN_ERR
,
2049 "Fail to turn off disk quota "
2050 "(type: %d, err: %d, ret:%d), Please "
2051 "run fsck to fix it.", type
, err
, ret
);
2052 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2056 * In case of checkpoint=disable, we must flush quota blocks.
2057 * This can cause NULL exception for node_inode in end_io, since
2058 * put_super already dropped it.
2060 sync_filesystem(sb
);
2063 static void f2fs_truncate_quota_inode_pages(struct super_block
*sb
)
2065 struct quota_info
*dqopt
= sb_dqopt(sb
);
2068 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2069 if (!dqopt
->files
[type
])
2071 f2fs_inode_synced(dqopt
->files
[type
]);
2075 static int f2fs_dquot_commit(struct dquot
*dquot
)
2079 ret
= dquot_commit(dquot
);
2081 set_sbi_flag(F2FS_SB(dquot
->dq_sb
), SBI_QUOTA_NEED_REPAIR
);
2085 static int f2fs_dquot_acquire(struct dquot
*dquot
)
2089 ret
= dquot_acquire(dquot
);
2091 set_sbi_flag(F2FS_SB(dquot
->dq_sb
), SBI_QUOTA_NEED_REPAIR
);
2096 static int f2fs_dquot_release(struct dquot
*dquot
)
2100 ret
= dquot_release(dquot
);
2102 set_sbi_flag(F2FS_SB(dquot
->dq_sb
), SBI_QUOTA_NEED_REPAIR
);
2106 static int f2fs_dquot_mark_dquot_dirty(struct dquot
*dquot
)
2108 struct super_block
*sb
= dquot
->dq_sb
;
2109 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2112 ret
= dquot_mark_dquot_dirty(dquot
);
2114 /* if we are using journalled quota */
2115 if (is_journalled_quota(sbi
))
2116 set_sbi_flag(sbi
, SBI_QUOTA_NEED_FLUSH
);
2121 static int f2fs_dquot_commit_info(struct super_block
*sb
, int type
)
2125 ret
= dquot_commit_info(sb
, type
);
2127 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2131 static int f2fs_get_projid(struct inode
*inode
, kprojid_t
*projid
)
2133 *projid
= F2FS_I(inode
)->i_projid
;
2137 static const struct dquot_operations f2fs_quota_operations
= {
2138 .get_reserved_space
= f2fs_get_reserved_space
,
2139 .write_dquot
= f2fs_dquot_commit
,
2140 .acquire_dquot
= f2fs_dquot_acquire
,
2141 .release_dquot
= f2fs_dquot_release
,
2142 .mark_dirty
= f2fs_dquot_mark_dquot_dirty
,
2143 .write_info
= f2fs_dquot_commit_info
,
2144 .alloc_dquot
= dquot_alloc
,
2145 .destroy_dquot
= dquot_destroy
,
2146 .get_projid
= f2fs_get_projid
,
2147 .get_next_id
= dquot_get_next_id
,
2150 static const struct quotactl_ops f2fs_quotactl_ops
= {
2151 .quota_on
= f2fs_quota_on
,
2152 .quota_off
= f2fs_quota_off
,
2153 .quota_sync
= f2fs_quota_sync
,
2154 .get_state
= dquot_get_state
,
2155 .set_info
= dquot_set_dqinfo
,
2156 .get_dqblk
= dquot_get_dqblk
,
2157 .set_dqblk
= dquot_set_dqblk
,
2158 .get_nextdqblk
= dquot_get_next_dqblk
,
2161 int f2fs_quota_sync(struct super_block
*sb
, int type
)
2166 void f2fs_quota_off_umount(struct super_block
*sb
)
2171 static const struct super_operations f2fs_sops
= {
2172 .alloc_inode
= f2fs_alloc_inode
,
2173 .free_inode
= f2fs_free_inode
,
2174 .drop_inode
= f2fs_drop_inode
,
2175 .write_inode
= f2fs_write_inode
,
2176 .dirty_inode
= f2fs_dirty_inode
,
2177 .show_options
= f2fs_show_options
,
2179 .quota_read
= f2fs_quota_read
,
2180 .quota_write
= f2fs_quota_write
,
2181 .get_dquots
= f2fs_get_dquots
,
2183 .evict_inode
= f2fs_evict_inode
,
2184 .put_super
= f2fs_put_super
,
2185 .sync_fs
= f2fs_sync_fs
,
2186 .freeze_fs
= f2fs_freeze
,
2187 .unfreeze_fs
= f2fs_unfreeze
,
2188 .statfs
= f2fs_statfs
,
2189 .remount_fs
= f2fs_remount
,
2192 #ifdef CONFIG_FS_ENCRYPTION
2193 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
2195 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
2196 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
2200 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
2203 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2206 * Encrypting the root directory is not allowed because fsck
2207 * expects lost+found directory to exist and remain unencrypted
2208 * if LOST_FOUND feature is enabled.
2211 if (f2fs_sb_has_lost_found(sbi
) &&
2212 inode
->i_ino
== F2FS_ROOT_INO(sbi
))
2215 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
2216 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
2217 ctx
, len
, fs_data
, XATTR_CREATE
);
2220 static bool f2fs_dummy_context(struct inode
*inode
)
2222 return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode
));
2225 static const struct fscrypt_operations f2fs_cryptops
= {
2226 .key_prefix
= "f2fs:",
2227 .get_context
= f2fs_get_context
,
2228 .set_context
= f2fs_set_context
,
2229 .dummy_context
= f2fs_dummy_context
,
2230 .empty_dir
= f2fs_empty_dir
,
2231 .max_namelen
= F2FS_NAME_LEN
,
2235 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
2236 u64 ino
, u32 generation
)
2238 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2239 struct inode
*inode
;
2241 if (f2fs_check_nid_range(sbi
, ino
))
2242 return ERR_PTR(-ESTALE
);
2245 * f2fs_iget isn't quite right if the inode is currently unallocated!
2246 * However f2fs_iget currently does appropriate checks to handle stale
2247 * inodes so everything is OK.
2249 inode
= f2fs_iget(sb
, ino
);
2251 return ERR_CAST(inode
);
2252 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
2253 /* we didn't find the right inode.. */
2255 return ERR_PTR(-ESTALE
);
2260 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
2261 int fh_len
, int fh_type
)
2263 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
2264 f2fs_nfs_get_inode
);
2267 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
2268 int fh_len
, int fh_type
)
2270 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
2271 f2fs_nfs_get_inode
);
2274 static const struct export_operations f2fs_export_ops
= {
2275 .fh_to_dentry
= f2fs_fh_to_dentry
,
2276 .fh_to_parent
= f2fs_fh_to_parent
,
2277 .get_parent
= f2fs_get_parent
,
2280 static loff_t
max_file_blocks(void)
2283 loff_t leaf_count
= DEF_ADDRS_PER_BLOCK
;
2286 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2287 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2288 * space in inode.i_addr, it will be more safe to reassign
2292 /* two direct node blocks */
2293 result
+= (leaf_count
* 2);
2295 /* two indirect node blocks */
2296 leaf_count
*= NIDS_PER_BLOCK
;
2297 result
+= (leaf_count
* 2);
2299 /* one double indirect node block */
2300 leaf_count
*= NIDS_PER_BLOCK
;
2301 result
+= leaf_count
;
2306 static int __f2fs_commit_super(struct buffer_head
*bh
,
2307 struct f2fs_super_block
*super
)
2311 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
2312 set_buffer_dirty(bh
);
2315 /* it's rare case, we can do fua all the time */
2316 return __sync_dirty_buffer(bh
, REQ_SYNC
| REQ_PREFLUSH
| REQ_FUA
);
2319 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
2320 struct buffer_head
*bh
)
2322 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
2323 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
2324 struct super_block
*sb
= sbi
->sb
;
2325 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
2326 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
2327 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
2328 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
2329 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
2330 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
2331 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
2332 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
2333 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
2334 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
2335 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
2336 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
2337 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2338 u64 main_end_blkaddr
= main_blkaddr
+
2339 (segment_count_main
<< log_blocks_per_seg
);
2340 u64 seg_end_blkaddr
= segment0_blkaddr
+
2341 (segment_count
<< log_blocks_per_seg
);
2343 if (segment0_blkaddr
!= cp_blkaddr
) {
2344 f2fs_msg(sb
, KERN_INFO
,
2345 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2346 segment0_blkaddr
, cp_blkaddr
);
2350 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
2352 f2fs_msg(sb
, KERN_INFO
,
2353 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2354 cp_blkaddr
, sit_blkaddr
,
2355 segment_count_ckpt
<< log_blocks_per_seg
);
2359 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
2361 f2fs_msg(sb
, KERN_INFO
,
2362 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2363 sit_blkaddr
, nat_blkaddr
,
2364 segment_count_sit
<< log_blocks_per_seg
);
2368 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
2370 f2fs_msg(sb
, KERN_INFO
,
2371 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2372 nat_blkaddr
, ssa_blkaddr
,
2373 segment_count_nat
<< log_blocks_per_seg
);
2377 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
2379 f2fs_msg(sb
, KERN_INFO
,
2380 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2381 ssa_blkaddr
, main_blkaddr
,
2382 segment_count_ssa
<< log_blocks_per_seg
);
2386 if (main_end_blkaddr
> seg_end_blkaddr
) {
2387 f2fs_msg(sb
, KERN_INFO
,
2388 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2391 (segment_count
<< log_blocks_per_seg
),
2392 segment_count_main
<< log_blocks_per_seg
);
2394 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
2398 /* fix in-memory information all the time */
2399 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
2400 segment0_blkaddr
) >> log_blocks_per_seg
);
2402 if (f2fs_readonly(sb
) || bdev_read_only(sb
->s_bdev
)) {
2403 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
2406 err
= __f2fs_commit_super(bh
, NULL
);
2407 res
= err
? "failed" : "done";
2409 f2fs_msg(sb
, KERN_INFO
,
2410 "Fix alignment : %s, start(%u) end(%u) block(%u)",
2413 (segment_count
<< log_blocks_per_seg
),
2414 segment_count_main
<< log_blocks_per_seg
);
2421 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
2422 struct buffer_head
*bh
)
2424 block_t segment_count
, segs_per_sec
, secs_per_zone
;
2425 block_t total_sections
, blocks_per_seg
;
2426 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
2427 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
2428 struct super_block
*sb
= sbi
->sb
;
2429 unsigned int blocksize
;
2430 size_t crc_offset
= 0;
2433 /* Check checksum_offset and crc in superblock */
2434 if (__F2FS_HAS_FEATURE(raw_super
, F2FS_FEATURE_SB_CHKSUM
)) {
2435 crc_offset
= le32_to_cpu(raw_super
->checksum_offset
);
2437 offsetof(struct f2fs_super_block
, crc
)) {
2438 f2fs_msg(sb
, KERN_INFO
,
2439 "Invalid SB checksum offset: %zu",
2443 crc
= le32_to_cpu(raw_super
->crc
);
2444 if (!f2fs_crc_valid(sbi
, crc
, raw_super
, crc_offset
)) {
2445 f2fs_msg(sb
, KERN_INFO
,
2446 "Invalid SB checksum value: %u", crc
);
2451 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
2452 f2fs_msg(sb
, KERN_INFO
,
2453 "Magic Mismatch, valid(0x%x) - read(0x%x)",
2454 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
2458 /* Currently, support only 4KB page cache size */
2459 if (F2FS_BLKSIZE
!= PAGE_SIZE
) {
2460 f2fs_msg(sb
, KERN_INFO
,
2461 "Invalid page_cache_size (%lu), supports only 4KB",
2466 /* Currently, support only 4KB block size */
2467 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
2468 if (blocksize
!= F2FS_BLKSIZE
) {
2469 f2fs_msg(sb
, KERN_INFO
,
2470 "Invalid blocksize (%u), supports only 4KB",
2475 /* check log blocks per segment */
2476 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
2477 f2fs_msg(sb
, KERN_INFO
,
2478 "Invalid log blocks per segment (%u)",
2479 le32_to_cpu(raw_super
->log_blocks_per_seg
));
2483 /* Currently, support 512/1024/2048/4096 bytes sector size */
2484 if (le32_to_cpu(raw_super
->log_sectorsize
) >
2485 F2FS_MAX_LOG_SECTOR_SIZE
||
2486 le32_to_cpu(raw_super
->log_sectorsize
) <
2487 F2FS_MIN_LOG_SECTOR_SIZE
) {
2488 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize (%u)",
2489 le32_to_cpu(raw_super
->log_sectorsize
));
2492 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
2493 le32_to_cpu(raw_super
->log_sectorsize
) !=
2494 F2FS_MAX_LOG_SECTOR_SIZE
) {
2495 f2fs_msg(sb
, KERN_INFO
,
2496 "Invalid log sectors per block(%u) log sectorsize(%u)",
2497 le32_to_cpu(raw_super
->log_sectors_per_block
),
2498 le32_to_cpu(raw_super
->log_sectorsize
));
2502 segment_count
= le32_to_cpu(raw_super
->segment_count
);
2503 segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
2504 secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
2505 total_sections
= le32_to_cpu(raw_super
->section_count
);
2507 /* blocks_per_seg should be 512, given the above check */
2508 blocks_per_seg
= 1 << le32_to_cpu(raw_super
->log_blocks_per_seg
);
2510 if (segment_count
> F2FS_MAX_SEGMENT
||
2511 segment_count
< F2FS_MIN_SEGMENTS
) {
2512 f2fs_msg(sb
, KERN_INFO
,
2513 "Invalid segment count (%u)",
2518 if (total_sections
> segment_count
||
2519 total_sections
< F2FS_MIN_SEGMENTS
||
2520 segs_per_sec
> segment_count
|| !segs_per_sec
) {
2521 f2fs_msg(sb
, KERN_INFO
,
2522 "Invalid segment/section count (%u, %u x %u)",
2523 segment_count
, total_sections
, segs_per_sec
);
2527 if ((segment_count
/ segs_per_sec
) < total_sections
) {
2528 f2fs_msg(sb
, KERN_INFO
,
2529 "Small segment_count (%u < %u * %u)",
2530 segment_count
, segs_per_sec
, total_sections
);
2534 if (segment_count
> (le64_to_cpu(raw_super
->block_count
) >> 9)) {
2535 f2fs_msg(sb
, KERN_INFO
,
2536 "Wrong segment_count / block_count (%u > %llu)",
2537 segment_count
, le64_to_cpu(raw_super
->block_count
));
2541 if (secs_per_zone
> total_sections
|| !secs_per_zone
) {
2542 f2fs_msg(sb
, KERN_INFO
,
2543 "Wrong secs_per_zone / total_sections (%u, %u)",
2544 secs_per_zone
, total_sections
);
2547 if (le32_to_cpu(raw_super
->extension_count
) > F2FS_MAX_EXTENSION
||
2548 raw_super
->hot_ext_count
> F2FS_MAX_EXTENSION
||
2549 (le32_to_cpu(raw_super
->extension_count
) +
2550 raw_super
->hot_ext_count
) > F2FS_MAX_EXTENSION
) {
2551 f2fs_msg(sb
, KERN_INFO
,
2552 "Corrupted extension count (%u + %u > %u)",
2553 le32_to_cpu(raw_super
->extension_count
),
2554 raw_super
->hot_ext_count
,
2555 F2FS_MAX_EXTENSION
);
2559 if (le32_to_cpu(raw_super
->cp_payload
) >
2560 (blocks_per_seg
- F2FS_CP_PACKS
)) {
2561 f2fs_msg(sb
, KERN_INFO
,
2562 "Insane cp_payload (%u > %u)",
2563 le32_to_cpu(raw_super
->cp_payload
),
2564 blocks_per_seg
- F2FS_CP_PACKS
);
2568 /* check reserved ino info */
2569 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
2570 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
2571 le32_to_cpu(raw_super
->root_ino
) != 3) {
2572 f2fs_msg(sb
, KERN_INFO
,
2573 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2574 le32_to_cpu(raw_super
->node_ino
),
2575 le32_to_cpu(raw_super
->meta_ino
),
2576 le32_to_cpu(raw_super
->root_ino
));
2580 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2581 if (sanity_check_area_boundary(sbi
, bh
))
2587 int f2fs_sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
2589 unsigned int total
, fsmeta
;
2590 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
2591 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
2592 unsigned int ovp_segments
, reserved_segments
;
2593 unsigned int main_segs
, blocks_per_seg
;
2594 unsigned int sit_segs
, nat_segs
;
2595 unsigned int sit_bitmap_size
, nat_bitmap_size
;
2596 unsigned int log_blocks_per_seg
;
2597 unsigned int segment_count_main
;
2598 unsigned int cp_pack_start_sum
, cp_payload
;
2599 block_t user_block_count
, valid_user_blocks
;
2600 block_t avail_node_count
, valid_node_count
;
2603 total
= le32_to_cpu(raw_super
->segment_count
);
2604 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
2605 sit_segs
= le32_to_cpu(raw_super
->segment_count_sit
);
2607 nat_segs
= le32_to_cpu(raw_super
->segment_count_nat
);
2609 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
2610 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
2612 if (unlikely(fsmeta
>= total
))
2615 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
2616 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
2618 if (unlikely(fsmeta
< F2FS_MIN_SEGMENTS
||
2619 ovp_segments
== 0 || reserved_segments
== 0)) {
2620 f2fs_msg(sbi
->sb
, KERN_ERR
,
2621 "Wrong layout: check mkfs.f2fs version");
2625 user_block_count
= le64_to_cpu(ckpt
->user_block_count
);
2626 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
2627 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2628 if (!user_block_count
|| user_block_count
>=
2629 segment_count_main
<< log_blocks_per_seg
) {
2630 f2fs_msg(sbi
->sb
, KERN_ERR
,
2631 "Wrong user_block_count: %u", user_block_count
);
2635 valid_user_blocks
= le64_to_cpu(ckpt
->valid_block_count
);
2636 if (valid_user_blocks
> user_block_count
) {
2637 f2fs_msg(sbi
->sb
, KERN_ERR
,
2638 "Wrong valid_user_blocks: %u, user_block_count: %u",
2639 valid_user_blocks
, user_block_count
);
2643 valid_node_count
= le32_to_cpu(ckpt
->valid_node_count
);
2644 avail_node_count
= sbi
->total_node_count
- sbi
->nquota_files
-
2645 F2FS_RESERVED_NODE_NUM
;
2646 if (valid_node_count
> avail_node_count
) {
2647 f2fs_msg(sbi
->sb
, KERN_ERR
,
2648 "Wrong valid_node_count: %u, avail_node_count: %u",
2649 valid_node_count
, avail_node_count
);
2653 main_segs
= le32_to_cpu(raw_super
->segment_count_main
);
2654 blocks_per_seg
= sbi
->blocks_per_seg
;
2656 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
2657 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) >= main_segs
||
2658 le16_to_cpu(ckpt
->cur_node_blkoff
[i
]) >= blocks_per_seg
)
2660 for (j
= i
+ 1; j
< NR_CURSEG_NODE_TYPE
; j
++) {
2661 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
2662 le32_to_cpu(ckpt
->cur_node_segno
[j
])) {
2663 f2fs_msg(sbi
->sb
, KERN_ERR
,
2664 "Node segment (%u, %u) has the same "
2666 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
2671 for (i
= 0; i
< NR_CURSEG_DATA_TYPE
; i
++) {
2672 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) >= main_segs
||
2673 le16_to_cpu(ckpt
->cur_data_blkoff
[i
]) >= blocks_per_seg
)
2675 for (j
= i
+ 1; j
< NR_CURSEG_DATA_TYPE
; j
++) {
2676 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) ==
2677 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
2678 f2fs_msg(sbi
->sb
, KERN_ERR
,
2679 "Data segment (%u, %u) has the same "
2681 le32_to_cpu(ckpt
->cur_data_segno
[i
]));
2686 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
2687 for (j
= i
; j
< NR_CURSEG_DATA_TYPE
; j
++) {
2688 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
2689 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
2690 f2fs_msg(sbi
->sb
, KERN_ERR
,
2691 "Data segment (%u) and Data segment (%u)"
2692 " has the same segno: %u", i
, j
,
2693 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
2699 sit_bitmap_size
= le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
);
2700 nat_bitmap_size
= le32_to_cpu(ckpt
->nat_ver_bitmap_bytesize
);
2702 if (sit_bitmap_size
!= ((sit_segs
/ 2) << log_blocks_per_seg
) / 8 ||
2703 nat_bitmap_size
!= ((nat_segs
/ 2) << log_blocks_per_seg
) / 8) {
2704 f2fs_msg(sbi
->sb
, KERN_ERR
,
2705 "Wrong bitmap size: sit: %u, nat:%u",
2706 sit_bitmap_size
, nat_bitmap_size
);
2710 cp_pack_start_sum
= __start_sum_addr(sbi
);
2711 cp_payload
= __cp_payload(sbi
);
2712 if (cp_pack_start_sum
< cp_payload
+ 1 ||
2713 cp_pack_start_sum
> blocks_per_seg
- 1 -
2715 f2fs_msg(sbi
->sb
, KERN_ERR
,
2716 "Wrong cp_pack_start_sum: %u",
2721 if (unlikely(f2fs_cp_error(sbi
))) {
2722 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
2728 static void init_sb_info(struct f2fs_sb_info
*sbi
)
2730 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
2733 sbi
->log_sectors_per_block
=
2734 le32_to_cpu(raw_super
->log_sectors_per_block
);
2735 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
2736 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
2737 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2738 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
2739 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
2740 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
2741 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
2742 sbi
->total_node_count
=
2743 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
2744 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
2745 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
2746 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
2747 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
2748 sbi
->cur_victim_sec
= NULL_SECNO
;
2749 sbi
->next_victim_seg
[BG_GC
] = NULL_SEGNO
;
2750 sbi
->next_victim_seg
[FG_GC
] = NULL_SEGNO
;
2751 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
2752 sbi
->migration_granularity
= sbi
->segs_per_sec
;
2754 sbi
->dir_level
= DEF_DIR_LEVEL
;
2755 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
2756 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
2757 sbi
->interval_time
[DISCARD_TIME
] = DEF_IDLE_INTERVAL
;
2758 sbi
->interval_time
[GC_TIME
] = DEF_IDLE_INTERVAL
;
2759 sbi
->interval_time
[DISABLE_TIME
] = DEF_DISABLE_INTERVAL
;
2760 sbi
->interval_time
[UMOUNT_DISCARD_TIMEOUT
] =
2761 DEF_UMOUNT_DISCARD_TIMEOUT
;
2762 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
2764 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
2765 atomic_set(&sbi
->nr_pages
[i
], 0);
2767 for (i
= 0; i
< META
; i
++)
2768 atomic_set(&sbi
->wb_sync_req
[i
], 0);
2770 INIT_LIST_HEAD(&sbi
->s_list
);
2771 mutex_init(&sbi
->umount_mutex
);
2772 init_rwsem(&sbi
->io_order_lock
);
2773 spin_lock_init(&sbi
->cp_lock
);
2775 sbi
->dirty_device
= 0;
2776 spin_lock_init(&sbi
->dev_lock
);
2778 init_rwsem(&sbi
->sb_lock
);
2781 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
2785 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
2789 err
= percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
2792 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
2797 #ifdef CONFIG_BLK_DEV_ZONED
2798 static int init_blkz_info(struct f2fs_sb_info
*sbi
, int devi
)
2800 struct block_device
*bdev
= FDEV(devi
).bdev
;
2801 sector_t nr_sectors
= bdev
->bd_part
->nr_sects
;
2802 sector_t sector
= 0;
2803 struct blk_zone
*zones
;
2804 unsigned int i
, nr_zones
;
2808 if (!f2fs_sb_has_blkzoned(sbi
))
2811 if (sbi
->blocks_per_blkz
&& sbi
->blocks_per_blkz
!=
2812 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
)))
2814 sbi
->blocks_per_blkz
= SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
));
2815 if (sbi
->log_blocks_per_blkz
&& sbi
->log_blocks_per_blkz
!=
2816 __ilog2_u32(sbi
->blocks_per_blkz
))
2818 sbi
->log_blocks_per_blkz
= __ilog2_u32(sbi
->blocks_per_blkz
);
2819 FDEV(devi
).nr_blkz
= SECTOR_TO_BLOCK(nr_sectors
) >>
2820 sbi
->log_blocks_per_blkz
;
2821 if (nr_sectors
& (bdev_zone_sectors(bdev
) - 1))
2822 FDEV(devi
).nr_blkz
++;
2824 FDEV(devi
).blkz_seq
= f2fs_kzalloc(sbi
,
2825 BITS_TO_LONGS(FDEV(devi
).nr_blkz
)
2826 * sizeof(unsigned long),
2828 if (!FDEV(devi
).blkz_seq
)
2831 #define F2FS_REPORT_NR_ZONES 4096
2833 zones
= f2fs_kzalloc(sbi
,
2834 array_size(F2FS_REPORT_NR_ZONES
,
2835 sizeof(struct blk_zone
)),
2840 /* Get block zones type */
2841 while (zones
&& sector
< nr_sectors
) {
2843 nr_zones
= F2FS_REPORT_NR_ZONES
;
2844 err
= blkdev_report_zones(bdev
, sector
,
2854 for (i
= 0; i
< nr_zones
; i
++) {
2855 if (zones
[i
].type
!= BLK_ZONE_TYPE_CONVENTIONAL
)
2856 set_bit(n
, FDEV(devi
).blkz_seq
);
2857 sector
+= zones
[i
].len
;
2869 * Read f2fs raw super block.
2870 * Because we have two copies of super block, so read both of them
2871 * to get the first valid one. If any one of them is broken, we pass
2872 * them recovery flag back to the caller.
2874 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
2875 struct f2fs_super_block
**raw_super
,
2876 int *valid_super_block
, int *recovery
)
2878 struct super_block
*sb
= sbi
->sb
;
2880 struct buffer_head
*bh
;
2881 struct f2fs_super_block
*super
;
2884 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
2888 for (block
= 0; block
< 2; block
++) {
2889 bh
= sb_bread(sb
, block
);
2891 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
2897 /* sanity checking of raw super */
2898 if (sanity_check_raw_super(sbi
, bh
)) {
2899 f2fs_msg(sb
, KERN_ERR
,
2900 "Can't find valid F2FS filesystem in %dth superblock",
2908 memcpy(super
, bh
->b_data
+ F2FS_SUPER_OFFSET
,
2910 *valid_super_block
= block
;
2916 /* Fail to read any one of the superblocks*/
2920 /* No valid superblock */
2929 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
2931 struct buffer_head
*bh
;
2935 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
2936 bdev_read_only(sbi
->sb
->s_bdev
)) {
2937 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
2941 /* we should update superblock crc here */
2942 if (!recover
&& f2fs_sb_has_sb_chksum(sbi
)) {
2943 crc
= f2fs_crc32(sbi
, F2FS_RAW_SUPER(sbi
),
2944 offsetof(struct f2fs_super_block
, crc
));
2945 F2FS_RAW_SUPER(sbi
)->crc
= cpu_to_le32(crc
);
2948 /* write back-up superblock first */
2949 bh
= sb_bread(sbi
->sb
, sbi
->valid_super_block
? 0 : 1);
2952 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
2955 /* if we are in recovery path, skip writing valid superblock */
2959 /* write current valid superblock */
2960 bh
= sb_bread(sbi
->sb
, sbi
->valid_super_block
);
2963 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
2968 static int f2fs_scan_devices(struct f2fs_sb_info
*sbi
)
2970 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
2971 unsigned int max_devices
= MAX_DEVICES
;
2974 /* Initialize single device information */
2975 if (!RDEV(0).path
[0]) {
2976 if (!bdev_is_zoned(sbi
->sb
->s_bdev
))
2982 * Initialize multiple devices information, or single
2983 * zoned block device information.
2985 sbi
->devs
= f2fs_kzalloc(sbi
,
2986 array_size(max_devices
,
2987 sizeof(struct f2fs_dev_info
)),
2992 for (i
= 0; i
< max_devices
; i
++) {
2994 if (i
> 0 && !RDEV(i
).path
[0])
2997 if (max_devices
== 1) {
2998 /* Single zoned block device mount */
3000 blkdev_get_by_dev(sbi
->sb
->s_bdev
->bd_dev
,
3001 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
3003 /* Multi-device mount */
3004 memcpy(FDEV(i
).path
, RDEV(i
).path
, MAX_PATH_LEN
);
3005 FDEV(i
).total_segments
=
3006 le32_to_cpu(RDEV(i
).total_segments
);
3008 FDEV(i
).start_blk
= 0;
3009 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
3010 (FDEV(i
).total_segments
<<
3011 sbi
->log_blocks_per_seg
) - 1 +
3012 le32_to_cpu(raw_super
->segment0_blkaddr
);
3014 FDEV(i
).start_blk
= FDEV(i
- 1).end_blk
+ 1;
3015 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
3016 (FDEV(i
).total_segments
<<
3017 sbi
->log_blocks_per_seg
) - 1;
3019 FDEV(i
).bdev
= blkdev_get_by_path(FDEV(i
).path
,
3020 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
3022 if (IS_ERR(FDEV(i
).bdev
))
3023 return PTR_ERR(FDEV(i
).bdev
);
3025 /* to release errored devices */
3026 sbi
->s_ndevs
= i
+ 1;
3028 #ifdef CONFIG_BLK_DEV_ZONED
3029 if (bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HM
&&
3030 !f2fs_sb_has_blkzoned(sbi
)) {
3031 f2fs_msg(sbi
->sb
, KERN_ERR
,
3032 "Zoned block device feature not enabled\n");
3035 if (bdev_zoned_model(FDEV(i
).bdev
) != BLK_ZONED_NONE
) {
3036 if (init_blkz_info(sbi
, i
)) {
3037 f2fs_msg(sbi
->sb
, KERN_ERR
,
3038 "Failed to initialize F2FS blkzone information");
3041 if (max_devices
== 1)
3043 f2fs_msg(sbi
->sb
, KERN_INFO
,
3044 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3046 FDEV(i
).total_segments
,
3047 FDEV(i
).start_blk
, FDEV(i
).end_blk
,
3048 bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HA
?
3049 "Host-aware" : "Host-managed");
3053 f2fs_msg(sbi
->sb
, KERN_INFO
,
3054 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3056 FDEV(i
).total_segments
,
3057 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
3059 f2fs_msg(sbi
->sb
, KERN_INFO
,
3060 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi
));
3064 static void f2fs_tuning_parameters(struct f2fs_sb_info
*sbi
)
3066 struct f2fs_sm_info
*sm_i
= SM_I(sbi
);
3068 /* adjust parameters according to the volume size */
3069 if (sm_i
->main_segments
<= SMALL_VOLUME_SEGMENTS
) {
3070 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
3071 sm_i
->dcc_info
->discard_granularity
= 1;
3072 sm_i
->ipu_policy
= 1 << F2FS_IPU_FORCE
;
3075 sbi
->readdir_ra
= 1;
3078 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
3080 struct f2fs_sb_info
*sbi
;
3081 struct f2fs_super_block
*raw_super
;
3084 bool skip_recovery
= false, need_fsck
= false;
3085 char *options
= NULL
;
3086 int recovery
, i
, valid_super_block
;
3087 struct curseg_info
*seg_i
;
3093 valid_super_block
= -1;
3096 /* allocate memory for f2fs-specific super block info */
3097 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
3103 /* Load the checksum driver */
3104 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
3105 if (IS_ERR(sbi
->s_chksum_driver
)) {
3106 f2fs_msg(sb
, KERN_ERR
, "Cannot load crc32 driver.");
3107 err
= PTR_ERR(sbi
->s_chksum_driver
);
3108 sbi
->s_chksum_driver
= NULL
;
3112 /* set a block size */
3113 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
3114 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
3118 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
3123 sb
->s_fs_info
= sbi
;
3124 sbi
->raw_super
= raw_super
;
3126 /* precompute checksum seed for metadata */
3127 if (f2fs_sb_has_inode_chksum(sbi
))
3128 sbi
->s_chksum_seed
= f2fs_chksum(sbi
, ~0, raw_super
->uuid
,
3129 sizeof(raw_super
->uuid
));
3132 * The BLKZONED feature indicates that the drive was formatted with
3133 * zone alignment optimization. This is optional for host-aware
3134 * devices, but mandatory for host-managed zoned block devices.
3136 #ifndef CONFIG_BLK_DEV_ZONED
3137 if (f2fs_sb_has_blkzoned(sbi
)) {
3138 f2fs_msg(sb
, KERN_ERR
,
3139 "Zoned block device support is not enabled");
3144 default_options(sbi
);
3145 /* parse mount options */
3146 options
= kstrdup((const char *)data
, GFP_KERNEL
);
3147 if (data
&& !options
) {
3152 err
= parse_options(sb
, options
);
3156 sbi
->max_file_blocks
= max_file_blocks();
3157 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
3158 le32_to_cpu(raw_super
->log_blocksize
);
3159 sb
->s_max_links
= F2FS_LINK_MAX
;
3162 sb
->dq_op
= &f2fs_quota_operations
;
3163 if (f2fs_sb_has_quota_ino(sbi
))
3164 sb
->s_qcop
= &dquot_quotactl_sysfile_ops
;
3166 sb
->s_qcop
= &f2fs_quotactl_ops
;
3167 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
3169 if (f2fs_sb_has_quota_ino(sbi
)) {
3170 for (i
= 0; i
< MAXQUOTAS
; i
++) {
3171 if (f2fs_qf_ino(sbi
->sb
, i
))
3172 sbi
->nquota_files
++;
3177 sb
->s_op
= &f2fs_sops
;
3178 #ifdef CONFIG_FS_ENCRYPTION
3179 sb
->s_cop
= &f2fs_cryptops
;
3181 sb
->s_xattr
= f2fs_xattr_handlers
;
3182 sb
->s_export_op
= &f2fs_export_ops
;
3183 sb
->s_magic
= F2FS_SUPER_MAGIC
;
3184 sb
->s_time_gran
= 1;
3185 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
3186 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
3187 memcpy(&sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
3188 sb
->s_iflags
|= SB_I_CGROUPWB
;
3190 /* init f2fs-specific super block info */
3191 sbi
->valid_super_block
= valid_super_block
;
3192 mutex_init(&sbi
->gc_mutex
);
3193 mutex_init(&sbi
->writepages
);
3194 mutex_init(&sbi
->cp_mutex
);
3195 init_rwsem(&sbi
->node_write
);
3196 init_rwsem(&sbi
->node_change
);
3198 /* disallow all the data/node/meta page writes */
3199 set_sbi_flag(sbi
, SBI_POR_DOING
);
3200 spin_lock_init(&sbi
->stat_lock
);
3202 /* init iostat info */
3203 spin_lock_init(&sbi
->iostat_lock
);
3204 sbi
->iostat_enable
= false;
3206 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
3207 int n
= (i
== META
) ? 1: NR_TEMP_TYPE
;
3213 sizeof(struct f2fs_bio_info
)),
3215 if (!sbi
->write_io
[i
]) {
3220 for (j
= HOT
; j
< n
; j
++) {
3221 init_rwsem(&sbi
->write_io
[i
][j
].io_rwsem
);
3222 sbi
->write_io
[i
][j
].sbi
= sbi
;
3223 sbi
->write_io
[i
][j
].bio
= NULL
;
3224 spin_lock_init(&sbi
->write_io
[i
][j
].io_lock
);
3225 INIT_LIST_HEAD(&sbi
->write_io
[i
][j
].io_list
);
3229 init_rwsem(&sbi
->cp_rwsem
);
3230 init_waitqueue_head(&sbi
->cp_wait
);
3233 err
= init_percpu_info(sbi
);
3237 if (F2FS_IO_SIZE(sbi
) > 1) {
3238 sbi
->write_io_dummy
=
3239 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi
) - 1), 0);
3240 if (!sbi
->write_io_dummy
) {
3246 /* get an inode for meta space */
3247 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
3248 if (IS_ERR(sbi
->meta_inode
)) {
3249 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
3250 err
= PTR_ERR(sbi
->meta_inode
);
3254 err
= f2fs_get_valid_checkpoint(sbi
);
3256 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
3257 goto free_meta_inode
;
3260 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_QUOTA_NEED_FSCK_FLAG
))
3261 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
3262 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_DISABLED_QUICK_FLAG
)) {
3263 set_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
3264 sbi
->interval_time
[DISABLE_TIME
] = DEF_DISABLE_QUICK_INTERVAL
;
3267 /* Initialize device list */
3268 err
= f2fs_scan_devices(sbi
);
3270 f2fs_msg(sb
, KERN_ERR
, "Failed to find devices");
3274 sbi
->total_valid_node_count
=
3275 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
3276 percpu_counter_set(&sbi
->total_valid_inode_count
,
3277 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
3278 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
3279 sbi
->total_valid_block_count
=
3280 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
3281 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
3282 sbi
->reserved_blocks
= 0;
3283 sbi
->current_reserved_blocks
= 0;
3284 limit_reserve_root(sbi
);
3286 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
3287 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
3288 spin_lock_init(&sbi
->inode_lock
[i
]);
3291 f2fs_init_extent_cache_info(sbi
);
3293 f2fs_init_ino_entry_info(sbi
);
3295 f2fs_init_fsync_node_info(sbi
);
3297 /* setup f2fs internal modules */
3298 err
= f2fs_build_segment_manager(sbi
);
3300 f2fs_msg(sb
, KERN_ERR
,
3301 "Failed to initialize F2FS segment manager");
3304 err
= f2fs_build_node_manager(sbi
);
3306 f2fs_msg(sb
, KERN_ERR
,
3307 "Failed to initialize F2FS node manager");
3311 /* For write statistics */
3312 if (sb
->s_bdev
->bd_part
)
3313 sbi
->sectors_written_start
=
3314 (u64
)part_stat_read(sb
->s_bdev
->bd_part
,
3315 sectors
[STAT_WRITE
]);
3317 /* Read accumulated write IO statistics if exists */
3318 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
3319 if (__exist_node_summaries(sbi
))
3320 sbi
->kbytes_written
=
3321 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
3323 f2fs_build_gc_manager(sbi
);
3325 err
= f2fs_build_stats(sbi
);
3329 /* get an inode for node space */
3330 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
3331 if (IS_ERR(sbi
->node_inode
)) {
3332 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
3333 err
= PTR_ERR(sbi
->node_inode
);
3337 /* read root inode and dentry */
3338 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
3340 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
3341 err
= PTR_ERR(root
);
3342 goto free_node_inode
;
3344 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
||
3345 !root
->i_size
|| !root
->i_nlink
) {
3348 goto free_node_inode
;
3351 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
3354 goto free_node_inode
;
3357 err
= f2fs_register_sysfs(sbi
);
3359 goto free_root_inode
;
3362 /* Enable quota usage during mount */
3363 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sb
)) {
3364 err
= f2fs_enable_quotas(sb
);
3366 f2fs_msg(sb
, KERN_ERR
,
3367 "Cannot turn on quotas: error %d", err
);
3370 /* if there are nt orphan nodes free them */
3371 err
= f2fs_recover_orphan_inodes(sbi
);
3375 if (unlikely(is_set_ckpt_flags(sbi
, CP_DISABLED_FLAG
)))
3376 goto reset_checkpoint
;
3378 /* recover fsynced data */
3379 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
3381 * mount should be failed, when device has readonly mode, and
3382 * previous checkpoint was not done by clean system shutdown.
3384 if (f2fs_hw_is_readonly(sbi
)) {
3385 if (!is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
3387 f2fs_msg(sb
, KERN_ERR
,
3388 "Need to recover fsync data, but "
3389 "write access unavailable");
3392 f2fs_msg(sbi
->sb
, KERN_INFO
, "write access "
3393 "unavailable, skipping recovery");
3394 goto reset_checkpoint
;
3398 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
3401 goto reset_checkpoint
;
3403 err
= f2fs_recover_fsync_data(sbi
, false);
3406 skip_recovery
= true;
3408 f2fs_msg(sb
, KERN_ERR
,
3409 "Cannot recover all fsync data errno=%d", err
);
3413 err
= f2fs_recover_fsync_data(sbi
, true);
3415 if (!f2fs_readonly(sb
) && err
> 0) {
3417 f2fs_msg(sb
, KERN_ERR
,
3418 "Need to recover fsync data");
3423 /* f2fs_recover_fsync_data() cleared this already */
3424 clear_sbi_flag(sbi
, SBI_POR_DOING
);
3426 if (test_opt(sbi
, DISABLE_CHECKPOINT
)) {
3427 err
= f2fs_disable_checkpoint(sbi
);
3429 goto sync_free_meta
;
3430 } else if (is_set_ckpt_flags(sbi
, CP_DISABLED_FLAG
)) {
3431 f2fs_enable_checkpoint(sbi
);
3435 * If filesystem is not mounted as read-only then
3436 * do start the gc_thread.
3438 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
3439 /* After POR, we can run background GC thread.*/
3440 err
= f2fs_start_gc_thread(sbi
);
3442 goto sync_free_meta
;
3446 /* recover broken superblock */
3448 err
= f2fs_commit_super(sbi
, true);
3449 f2fs_msg(sb
, KERN_INFO
,
3450 "Try to recover %dth superblock, ret: %d",
3451 sbi
->valid_super_block
? 1 : 2, err
);
3454 f2fs_join_shrinker(sbi
);
3456 f2fs_tuning_parameters(sbi
);
3458 f2fs_msg(sbi
->sb
, KERN_NOTICE
, "Mounted with checkpoint version = %llx",
3459 cur_cp_version(F2FS_CKPT(sbi
)));
3460 f2fs_update_time(sbi
, CP_TIME
);
3461 f2fs_update_time(sbi
, REQ_TIME
);
3462 clear_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
3466 /* safe to flush all the data */
3467 sync_filesystem(sbi
->sb
);
3472 f2fs_truncate_quota_inode_pages(sb
);
3473 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sb
))
3474 f2fs_quota_off_umount(sbi
->sb
);
3477 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
3478 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
3479 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
3480 * falls into an infinite loop in f2fs_sync_meta_pages().
3482 truncate_inode_pages_final(META_MAPPING(sbi
));
3483 /* evict some inodes being cached by GC */
3485 f2fs_unregister_sysfs(sbi
);
3490 f2fs_release_ino_entry(sbi
, true);
3491 truncate_inode_pages_final(NODE_MAPPING(sbi
));
3492 iput(sbi
->node_inode
);
3493 sbi
->node_inode
= NULL
;
3495 f2fs_destroy_stats(sbi
);
3497 f2fs_destroy_node_manager(sbi
);
3499 f2fs_destroy_segment_manager(sbi
);
3501 destroy_device_list(sbi
);
3504 make_bad_inode(sbi
->meta_inode
);
3505 iput(sbi
->meta_inode
);
3506 sbi
->meta_inode
= NULL
;
3508 mempool_destroy(sbi
->write_io_dummy
);
3510 destroy_percpu_info(sbi
);
3512 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
3513 kvfree(sbi
->write_io
[i
]);
3516 for (i
= 0; i
< MAXQUOTAS
; i
++)
3517 kvfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
3523 if (sbi
->s_chksum_driver
)
3524 crypto_free_shash(sbi
->s_chksum_driver
);
3527 /* give only one another chance */
3528 if (retry_cnt
> 0 && skip_recovery
) {
3530 shrink_dcache_sb(sb
);
3536 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
3537 const char *dev_name
, void *data
)
3539 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
3542 static void kill_f2fs_super(struct super_block
*sb
)
3545 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
3547 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
3548 f2fs_stop_gc_thread(sbi
);
3549 f2fs_stop_discard_thread(sbi
);
3551 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
3552 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
3553 struct cp_control cpc
= {
3554 .reason
= CP_UMOUNT
,
3556 f2fs_write_checkpoint(sbi
, &cpc
);
3559 if (is_sbi_flag_set(sbi
, SBI_IS_RECOVERED
) && f2fs_readonly(sb
))
3560 sb
->s_flags
&= ~SB_RDONLY
;
3562 kill_block_super(sb
);
3565 static struct file_system_type f2fs_fs_type
= {
3566 .owner
= THIS_MODULE
,
3568 .mount
= f2fs_mount
,
3569 .kill_sb
= kill_f2fs_super
,
3570 .fs_flags
= FS_REQUIRES_DEV
,
3572 MODULE_ALIAS_FS("f2fs");
3574 static int __init
init_inodecache(void)
3576 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
3577 sizeof(struct f2fs_inode_info
), 0,
3578 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
3579 if (!f2fs_inode_cachep
)
3584 static void destroy_inodecache(void)
3587 * Make sure all delayed rcu free inodes are flushed before we
3591 kmem_cache_destroy(f2fs_inode_cachep
);
3594 static int __init
init_f2fs_fs(void)
3598 if (PAGE_SIZE
!= F2FS_BLKSIZE
) {
3599 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
3600 PAGE_SIZE
, F2FS_BLKSIZE
);
3604 f2fs_build_trace_ios();
3606 err
= init_inodecache();
3609 err
= f2fs_create_node_manager_caches();
3611 goto free_inodecache
;
3612 err
= f2fs_create_segment_manager_caches();
3614 goto free_node_manager_caches
;
3615 err
= f2fs_create_checkpoint_caches();
3617 goto free_segment_manager_caches
;
3618 err
= f2fs_create_extent_cache();
3620 goto free_checkpoint_caches
;
3621 err
= f2fs_init_sysfs();
3623 goto free_extent_cache
;
3624 err
= register_shrinker(&f2fs_shrinker_info
);
3627 err
= register_filesystem(&f2fs_fs_type
);
3630 f2fs_create_root_stats();
3631 err
= f2fs_init_post_read_processing();
3633 goto free_root_stats
;
3637 f2fs_destroy_root_stats();
3638 unregister_filesystem(&f2fs_fs_type
);
3640 unregister_shrinker(&f2fs_shrinker_info
);
3644 f2fs_destroy_extent_cache();
3645 free_checkpoint_caches
:
3646 f2fs_destroy_checkpoint_caches();
3647 free_segment_manager_caches
:
3648 f2fs_destroy_segment_manager_caches();
3649 free_node_manager_caches
:
3650 f2fs_destroy_node_manager_caches();
3652 destroy_inodecache();
3657 static void __exit
exit_f2fs_fs(void)
3659 f2fs_destroy_post_read_processing();
3660 f2fs_destroy_root_stats();
3661 unregister_filesystem(&f2fs_fs_type
);
3662 unregister_shrinker(&f2fs_shrinker_info
);
3664 f2fs_destroy_extent_cache();
3665 f2fs_destroy_checkpoint_caches();
3666 f2fs_destroy_segment_manager_caches();
3667 f2fs_destroy_node_manager_caches();
3668 destroy_inodecache();
3669 f2fs_destroy_trace_ios();
3672 module_init(init_f2fs_fs
)
3673 module_exit(exit_f2fs_fs
)
3675 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
3676 MODULE_DESCRIPTION("Flash Friendly File System");
3677 MODULE_LICENSE("GPL");