4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/f2fs_fs.h>
27 #include <linux/sysfs.h>
28 #include <linux/quota.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/f2fs.h>
40 static struct kmem_cache
*f2fs_inode_cachep
;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
44 char *fault_name
[FAULT_MAX
] = {
45 [FAULT_KMALLOC
] = "kmalloc",
46 [FAULT_KVMALLOC
] = "kvmalloc",
47 [FAULT_PAGE_ALLOC
] = "page alloc",
48 [FAULT_PAGE_GET
] = "page get",
49 [FAULT_ALLOC_BIO
] = "alloc bio",
50 [FAULT_ALLOC_NID
] = "alloc nid",
51 [FAULT_ORPHAN
] = "orphan",
52 [FAULT_BLOCK
] = "no more block",
53 [FAULT_DIR_DEPTH
] = "too big dir depth",
54 [FAULT_EVICT_INODE
] = "evict_inode fail",
55 [FAULT_TRUNCATE
] = "truncate fail",
56 [FAULT_IO
] = "IO error",
57 [FAULT_CHECKPOINT
] = "checkpoint error",
60 static void f2fs_build_fault_attr(struct f2fs_sb_info
*sbi
,
63 struct f2fs_fault_info
*ffi
= &F2FS_OPTION(sbi
).fault_info
;
66 atomic_set(&ffi
->inject_ops
, 0);
67 ffi
->inject_rate
= rate
;
68 ffi
->inject_type
= (1 << FAULT_MAX
) - 1;
70 memset(ffi
, 0, sizeof(struct f2fs_fault_info
));
75 /* f2fs-wide shrinker description */
76 static struct shrinker f2fs_shrinker_info
= {
77 .scan_objects
= f2fs_shrink_scan
,
78 .count_objects
= f2fs_shrink_count
,
79 .seeks
= DEFAULT_SEEKS
,
84 Opt_disable_roll_forward
,
95 Opt_disable_ext_identify
,
98 Opt_inline_xattr_size
,
135 Opt_test_dummy_encryption
,
139 static match_table_t f2fs_tokens
= {
140 {Opt_gc_background
, "background_gc=%s"},
141 {Opt_disable_roll_forward
, "disable_roll_forward"},
142 {Opt_norecovery
, "norecovery"},
143 {Opt_discard
, "discard"},
144 {Opt_nodiscard
, "nodiscard"},
145 {Opt_noheap
, "no_heap"},
147 {Opt_user_xattr
, "user_xattr"},
148 {Opt_nouser_xattr
, "nouser_xattr"},
150 {Opt_noacl
, "noacl"},
151 {Opt_active_logs
, "active_logs=%u"},
152 {Opt_disable_ext_identify
, "disable_ext_identify"},
153 {Opt_inline_xattr
, "inline_xattr"},
154 {Opt_noinline_xattr
, "noinline_xattr"},
155 {Opt_inline_xattr_size
, "inline_xattr_size=%u"},
156 {Opt_inline_data
, "inline_data"},
157 {Opt_inline_dentry
, "inline_dentry"},
158 {Opt_noinline_dentry
, "noinline_dentry"},
159 {Opt_flush_merge
, "flush_merge"},
160 {Opt_noflush_merge
, "noflush_merge"},
161 {Opt_nobarrier
, "nobarrier"},
162 {Opt_fastboot
, "fastboot"},
163 {Opt_extent_cache
, "extent_cache"},
164 {Opt_noextent_cache
, "noextent_cache"},
165 {Opt_noinline_data
, "noinline_data"},
166 {Opt_data_flush
, "data_flush"},
167 {Opt_reserve_root
, "reserve_root=%u"},
168 {Opt_resgid
, "resgid=%u"},
169 {Opt_resuid
, "resuid=%u"},
170 {Opt_mode
, "mode=%s"},
171 {Opt_io_size_bits
, "io_bits=%u"},
172 {Opt_fault_injection
, "fault_injection=%u"},
173 {Opt_lazytime
, "lazytime"},
174 {Opt_nolazytime
, "nolazytime"},
175 {Opt_quota
, "quota"},
176 {Opt_noquota
, "noquota"},
177 {Opt_usrquota
, "usrquota"},
178 {Opt_grpquota
, "grpquota"},
179 {Opt_prjquota
, "prjquota"},
180 {Opt_usrjquota
, "usrjquota=%s"},
181 {Opt_grpjquota
, "grpjquota=%s"},
182 {Opt_prjjquota
, "prjjquota=%s"},
183 {Opt_offusrjquota
, "usrjquota="},
184 {Opt_offgrpjquota
, "grpjquota="},
185 {Opt_offprjjquota
, "prjjquota="},
186 {Opt_jqfmt_vfsold
, "jqfmt=vfsold"},
187 {Opt_jqfmt_vfsv0
, "jqfmt=vfsv0"},
188 {Opt_jqfmt_vfsv1
, "jqfmt=vfsv1"},
189 {Opt_whint
, "whint_mode=%s"},
190 {Opt_alloc
, "alloc_mode=%s"},
191 {Opt_fsync
, "fsync_mode=%s"},
192 {Opt_test_dummy_encryption
, "test_dummy_encryption"},
196 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
198 struct va_format vaf
;
204 printk_ratelimited("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
208 static inline void limit_reserve_root(struct f2fs_sb_info
*sbi
)
210 block_t limit
= (sbi
->user_block_count
<< 1) / 1000;
213 if (test_opt(sbi
, RESERVE_ROOT
) &&
214 F2FS_OPTION(sbi
).root_reserved_blocks
> limit
) {
215 F2FS_OPTION(sbi
).root_reserved_blocks
= limit
;
216 f2fs_msg(sbi
->sb
, KERN_INFO
,
217 "Reduce reserved blocks for root = %u",
218 F2FS_OPTION(sbi
).root_reserved_blocks
);
220 if (!test_opt(sbi
, RESERVE_ROOT
) &&
221 (!uid_eq(F2FS_OPTION(sbi
).s_resuid
,
222 make_kuid(&init_user_ns
, F2FS_DEF_RESUID
)) ||
223 !gid_eq(F2FS_OPTION(sbi
).s_resgid
,
224 make_kgid(&init_user_ns
, F2FS_DEF_RESGID
))))
225 f2fs_msg(sbi
->sb
, KERN_INFO
,
226 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
227 from_kuid_munged(&init_user_ns
,
228 F2FS_OPTION(sbi
).s_resuid
),
229 from_kgid_munged(&init_user_ns
,
230 F2FS_OPTION(sbi
).s_resgid
));
233 static void init_once(void *foo
)
235 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
237 inode_init_once(&fi
->vfs_inode
);
241 static const char * const quotatypes
[] = INITQFNAMES
;
242 #define QTYPE2NAME(t) (quotatypes[t])
243 static int f2fs_set_qf_name(struct super_block
*sb
, int qtype
,
246 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
250 if (sb_any_quota_loaded(sb
) && !F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
251 f2fs_msg(sb
, KERN_ERR
,
252 "Cannot change journaled "
253 "quota options when quota turned on");
256 if (f2fs_sb_has_quota_ino(sb
)) {
257 f2fs_msg(sb
, KERN_INFO
,
258 "QUOTA feature is enabled, so ignore qf_name");
262 qname
= match_strdup(args
);
264 f2fs_msg(sb
, KERN_ERR
,
265 "Not enough memory for storing quotafile name");
268 if (F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
269 if (strcmp(F2FS_OPTION(sbi
).s_qf_names
[qtype
], qname
) == 0)
272 f2fs_msg(sb
, KERN_ERR
,
273 "%s quota file already specified",
277 if (strchr(qname
, '/')) {
278 f2fs_msg(sb
, KERN_ERR
,
279 "quotafile must be on filesystem root");
282 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = qname
;
290 static int f2fs_clear_qf_name(struct super_block
*sb
, int qtype
)
292 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
294 if (sb_any_quota_loaded(sb
) && F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
295 f2fs_msg(sb
, KERN_ERR
, "Cannot change journaled quota options"
296 " when quota turned on");
299 kfree(F2FS_OPTION(sbi
).s_qf_names
[qtype
]);
300 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = NULL
;
304 static int f2fs_check_quota_options(struct f2fs_sb_info
*sbi
)
307 * We do the test below only for project quotas. 'usrquota' and
308 * 'grpquota' mount options are allowed even without quota feature
309 * to support legacy quotas in quota files.
311 if (test_opt(sbi
, PRJQUOTA
) && !f2fs_sb_has_project_quota(sbi
->sb
)) {
312 f2fs_msg(sbi
->sb
, KERN_ERR
, "Project quota feature not enabled. "
313 "Cannot enable project quota enforcement.");
316 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
] ||
317 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
] ||
318 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]) {
319 if (test_opt(sbi
, USRQUOTA
) &&
320 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
321 clear_opt(sbi
, USRQUOTA
);
323 if (test_opt(sbi
, GRPQUOTA
) &&
324 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
325 clear_opt(sbi
, GRPQUOTA
);
327 if (test_opt(sbi
, PRJQUOTA
) &&
328 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
329 clear_opt(sbi
, PRJQUOTA
);
331 if (test_opt(sbi
, GRPQUOTA
) || test_opt(sbi
, USRQUOTA
) ||
332 test_opt(sbi
, PRJQUOTA
)) {
333 f2fs_msg(sbi
->sb
, KERN_ERR
, "old and new quota "
338 if (!F2FS_OPTION(sbi
).s_jquota_fmt
) {
339 f2fs_msg(sbi
->sb
, KERN_ERR
, "journaled quota format "
345 if (f2fs_sb_has_quota_ino(sbi
->sb
) && F2FS_OPTION(sbi
).s_jquota_fmt
) {
346 f2fs_msg(sbi
->sb
, KERN_INFO
,
347 "QUOTA feature is enabled, so ignore jquota_fmt");
348 F2FS_OPTION(sbi
).s_jquota_fmt
= 0;
350 if (f2fs_sb_has_quota_ino(sbi
->sb
) && f2fs_readonly(sbi
->sb
)) {
351 f2fs_msg(sbi
->sb
, KERN_INFO
,
352 "Filesystem with quota feature cannot be mounted RDWR "
353 "without CONFIG_QUOTA");
360 static int parse_options(struct super_block
*sb
, char *options
)
362 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
363 struct request_queue
*q
;
364 substring_t args
[MAX_OPT_ARGS
];
376 while ((p
= strsep(&options
, ",")) != NULL
) {
381 * Initialize args struct so we know whether arg was
382 * found; some options take optional arguments.
384 args
[0].to
= args
[0].from
= NULL
;
385 token
= match_token(p
, f2fs_tokens
, args
);
388 case Opt_gc_background
:
389 name
= match_strdup(&args
[0]);
393 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
395 clear_opt(sbi
, FORCE_FG_GC
);
396 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
397 clear_opt(sbi
, BG_GC
);
398 clear_opt(sbi
, FORCE_FG_GC
);
399 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
401 set_opt(sbi
, FORCE_FG_GC
);
408 case Opt_disable_roll_forward
:
409 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
412 /* this option mounts f2fs with ro */
413 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
414 if (!f2fs_readonly(sb
))
418 q
= bdev_get_queue(sb
->s_bdev
);
419 if (blk_queue_discard(q
)) {
420 set_opt(sbi
, DISCARD
);
421 } else if (!f2fs_sb_has_blkzoned(sb
)) {
422 f2fs_msg(sb
, KERN_WARNING
,
423 "mounting with \"discard\" option, but "
424 "the device does not support discard");
428 if (f2fs_sb_has_blkzoned(sb
)) {
429 f2fs_msg(sb
, KERN_WARNING
,
430 "discard is required for zoned block devices");
433 clear_opt(sbi
, DISCARD
);
436 set_opt(sbi
, NOHEAP
);
439 clear_opt(sbi
, NOHEAP
);
441 #ifdef CONFIG_F2FS_FS_XATTR
443 set_opt(sbi
, XATTR_USER
);
445 case Opt_nouser_xattr
:
446 clear_opt(sbi
, XATTR_USER
);
448 case Opt_inline_xattr
:
449 set_opt(sbi
, INLINE_XATTR
);
451 case Opt_noinline_xattr
:
452 clear_opt(sbi
, INLINE_XATTR
);
454 case Opt_inline_xattr_size
:
455 if (args
->from
&& match_int(args
, &arg
))
457 set_opt(sbi
, INLINE_XATTR_SIZE
);
458 F2FS_OPTION(sbi
).inline_xattr_size
= arg
;
462 f2fs_msg(sb
, KERN_INFO
,
463 "user_xattr options not supported");
465 case Opt_nouser_xattr
:
466 f2fs_msg(sb
, KERN_INFO
,
467 "nouser_xattr options not supported");
469 case Opt_inline_xattr
:
470 f2fs_msg(sb
, KERN_INFO
,
471 "inline_xattr options not supported");
473 case Opt_noinline_xattr
:
474 f2fs_msg(sb
, KERN_INFO
,
475 "noinline_xattr options not supported");
478 #ifdef CONFIG_F2FS_FS_POSIX_ACL
480 set_opt(sbi
, POSIX_ACL
);
483 clear_opt(sbi
, POSIX_ACL
);
487 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
490 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
493 case Opt_active_logs
:
494 if (args
->from
&& match_int(args
, &arg
))
496 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
498 F2FS_OPTION(sbi
).active_logs
= arg
;
500 case Opt_disable_ext_identify
:
501 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
503 case Opt_inline_data
:
504 set_opt(sbi
, INLINE_DATA
);
506 case Opt_inline_dentry
:
507 set_opt(sbi
, INLINE_DENTRY
);
509 case Opt_noinline_dentry
:
510 clear_opt(sbi
, INLINE_DENTRY
);
512 case Opt_flush_merge
:
513 set_opt(sbi
, FLUSH_MERGE
);
515 case Opt_noflush_merge
:
516 clear_opt(sbi
, FLUSH_MERGE
);
519 set_opt(sbi
, NOBARRIER
);
522 set_opt(sbi
, FASTBOOT
);
524 case Opt_extent_cache
:
525 set_opt(sbi
, EXTENT_CACHE
);
527 case Opt_noextent_cache
:
528 clear_opt(sbi
, EXTENT_CACHE
);
530 case Opt_noinline_data
:
531 clear_opt(sbi
, INLINE_DATA
);
534 set_opt(sbi
, DATA_FLUSH
);
536 case Opt_reserve_root
:
537 if (args
->from
&& match_int(args
, &arg
))
539 if (test_opt(sbi
, RESERVE_ROOT
)) {
540 f2fs_msg(sb
, KERN_INFO
,
541 "Preserve previous reserve_root=%u",
542 F2FS_OPTION(sbi
).root_reserved_blocks
);
544 F2FS_OPTION(sbi
).root_reserved_blocks
= arg
;
545 set_opt(sbi
, RESERVE_ROOT
);
549 if (args
->from
&& match_int(args
, &arg
))
551 uid
= make_kuid(current_user_ns(), arg
);
552 if (!uid_valid(uid
)) {
553 f2fs_msg(sb
, KERN_ERR
,
554 "Invalid uid value %d", arg
);
557 F2FS_OPTION(sbi
).s_resuid
= uid
;
560 if (args
->from
&& match_int(args
, &arg
))
562 gid
= make_kgid(current_user_ns(), arg
);
563 if (!gid_valid(gid
)) {
564 f2fs_msg(sb
, KERN_ERR
,
565 "Invalid gid value %d", arg
);
568 F2FS_OPTION(sbi
).s_resgid
= gid
;
571 name
= match_strdup(&args
[0]);
575 if (strlen(name
) == 8 &&
576 !strncmp(name
, "adaptive", 8)) {
577 if (f2fs_sb_has_blkzoned(sb
)) {
578 f2fs_msg(sb
, KERN_WARNING
,
579 "adaptive mode is not allowed with "
580 "zoned block device feature");
584 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
585 } else if (strlen(name
) == 3 &&
586 !strncmp(name
, "lfs", 3)) {
587 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
594 case Opt_io_size_bits
:
595 if (args
->from
&& match_int(args
, &arg
))
597 if (arg
> __ilog2_u32(BIO_MAX_PAGES
)) {
598 f2fs_msg(sb
, KERN_WARNING
,
599 "Not support %d, larger than %d",
600 1 << arg
, BIO_MAX_PAGES
);
603 F2FS_OPTION(sbi
).write_io_size_bits
= arg
;
605 case Opt_fault_injection
:
606 if (args
->from
&& match_int(args
, &arg
))
608 #ifdef CONFIG_F2FS_FAULT_INJECTION
609 f2fs_build_fault_attr(sbi
, arg
);
610 set_opt(sbi
, FAULT_INJECTION
);
612 f2fs_msg(sb
, KERN_INFO
,
613 "FAULT_INJECTION was not selected");
617 sb
->s_flags
|= SB_LAZYTIME
;
620 sb
->s_flags
&= ~SB_LAZYTIME
;
625 set_opt(sbi
, USRQUOTA
);
628 set_opt(sbi
, GRPQUOTA
);
631 set_opt(sbi
, PRJQUOTA
);
634 ret
= f2fs_set_qf_name(sb
, USRQUOTA
, &args
[0]);
639 ret
= f2fs_set_qf_name(sb
, GRPQUOTA
, &args
[0]);
644 ret
= f2fs_set_qf_name(sb
, PRJQUOTA
, &args
[0]);
648 case Opt_offusrjquota
:
649 ret
= f2fs_clear_qf_name(sb
, USRQUOTA
);
653 case Opt_offgrpjquota
:
654 ret
= f2fs_clear_qf_name(sb
, GRPQUOTA
);
658 case Opt_offprjjquota
:
659 ret
= f2fs_clear_qf_name(sb
, PRJQUOTA
);
663 case Opt_jqfmt_vfsold
:
664 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_OLD
;
666 case Opt_jqfmt_vfsv0
:
667 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V0
;
669 case Opt_jqfmt_vfsv1
:
670 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V1
;
673 clear_opt(sbi
, QUOTA
);
674 clear_opt(sbi
, USRQUOTA
);
675 clear_opt(sbi
, GRPQUOTA
);
676 clear_opt(sbi
, PRJQUOTA
);
686 case Opt_offusrjquota
:
687 case Opt_offgrpjquota
:
688 case Opt_offprjjquota
:
689 case Opt_jqfmt_vfsold
:
690 case Opt_jqfmt_vfsv0
:
691 case Opt_jqfmt_vfsv1
:
693 f2fs_msg(sb
, KERN_INFO
,
694 "quota operations not supported");
698 name
= match_strdup(&args
[0]);
701 if (strlen(name
) == 10 &&
702 !strncmp(name
, "user-based", 10)) {
703 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_USER
;
704 } else if (strlen(name
) == 3 &&
705 !strncmp(name
, "off", 3)) {
706 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
707 } else if (strlen(name
) == 8 &&
708 !strncmp(name
, "fs-based", 8)) {
709 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_FS
;
717 name
= match_strdup(&args
[0]);
721 if (strlen(name
) == 7 &&
722 !strncmp(name
, "default", 7)) {
723 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
724 } else if (strlen(name
) == 5 &&
725 !strncmp(name
, "reuse", 5)) {
726 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
734 name
= match_strdup(&args
[0]);
737 if (strlen(name
) == 5 &&
738 !strncmp(name
, "posix", 5)) {
739 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
740 } else if (strlen(name
) == 6 &&
741 !strncmp(name
, "strict", 6)) {
742 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_STRICT
;
743 } else if (strlen(name
) == 9 &&
744 !strncmp(name
, "nobarrier", 9)) {
745 F2FS_OPTION(sbi
).fsync_mode
=
746 FSYNC_MODE_NOBARRIER
;
753 case Opt_test_dummy_encryption
:
754 #ifdef CONFIG_F2FS_FS_ENCRYPTION
755 if (!f2fs_sb_has_encrypt(sb
)) {
756 f2fs_msg(sb
, KERN_ERR
, "Encrypt feature is off");
760 F2FS_OPTION(sbi
).test_dummy_encryption
= true;
761 f2fs_msg(sb
, KERN_INFO
,
762 "Test dummy encryption mode enabled");
764 f2fs_msg(sb
, KERN_INFO
,
765 "Test dummy encryption mount option ignored");
769 f2fs_msg(sb
, KERN_ERR
,
770 "Unrecognized mount option \"%s\" or missing value",
776 if (f2fs_check_quota_options(sbi
))
780 if (F2FS_IO_SIZE_BITS(sbi
) && !test_opt(sbi
, LFS
)) {
781 f2fs_msg(sb
, KERN_ERR
,
782 "Should set mode=lfs with %uKB-sized IO",
783 F2FS_IO_SIZE_KB(sbi
));
787 if (test_opt(sbi
, INLINE_XATTR_SIZE
)) {
788 if (!f2fs_sb_has_extra_attr(sb
) ||
789 !f2fs_sb_has_flexible_inline_xattr(sb
)) {
790 f2fs_msg(sb
, KERN_ERR
,
791 "extra_attr or flexible_inline_xattr "
795 if (!test_opt(sbi
, INLINE_XATTR
)) {
796 f2fs_msg(sb
, KERN_ERR
,
797 "inline_xattr_size option should be "
798 "set with inline_xattr option");
801 if (!F2FS_OPTION(sbi
).inline_xattr_size
||
802 F2FS_OPTION(sbi
).inline_xattr_size
>=
803 DEF_ADDRS_PER_INODE
-
804 F2FS_TOTAL_EXTRA_ATTR_SIZE
-
805 DEF_INLINE_RESERVED_SIZE
-
806 DEF_MIN_INLINE_SIZE
) {
807 f2fs_msg(sb
, KERN_ERR
,
808 "inline xattr size is out of range");
813 /* Not pass down write hints if the number of active logs is lesser
814 * than NR_CURSEG_TYPE.
816 if (F2FS_OPTION(sbi
).active_logs
!= NR_CURSEG_TYPE
)
817 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
821 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
823 struct f2fs_inode_info
*fi
;
825 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
829 init_once((void *) fi
);
831 /* Initialize f2fs-specific inode info */
832 atomic_set(&fi
->dirty_pages
, 0);
833 init_rwsem(&fi
->i_sem
);
834 INIT_LIST_HEAD(&fi
->dirty_list
);
835 INIT_LIST_HEAD(&fi
->gdirty_list
);
836 INIT_LIST_HEAD(&fi
->inmem_ilist
);
837 INIT_LIST_HEAD(&fi
->inmem_pages
);
838 mutex_init(&fi
->inmem_lock
);
839 init_rwsem(&fi
->i_gc_rwsem
[READ
]);
840 init_rwsem(&fi
->i_gc_rwsem
[WRITE
]);
841 init_rwsem(&fi
->i_mmap_sem
);
842 init_rwsem(&fi
->i_xattr_sem
);
844 /* Will be used by directory only */
845 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
847 return &fi
->vfs_inode
;
850 static int f2fs_drop_inode(struct inode
*inode
)
854 * This is to avoid a deadlock condition like below.
855 * writeback_single_inode(inode)
856 * - f2fs_write_data_page
857 * - f2fs_gc -> iput -> evict
858 * - inode_wait_for_writeback(inode)
860 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
861 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
862 /* to avoid evict_inode call simultaneously */
863 atomic_inc(&inode
->i_count
);
864 spin_unlock(&inode
->i_lock
);
866 /* some remained atomic pages should discarded */
867 if (f2fs_is_atomic_file(inode
))
868 f2fs_drop_inmem_pages(inode
);
870 /* should remain fi->extent_tree for writepage */
871 f2fs_destroy_extent_node(inode
);
873 sb_start_intwrite(inode
->i_sb
);
874 f2fs_i_size_write(inode
, 0);
876 if (F2FS_HAS_BLOCKS(inode
))
877 f2fs_truncate(inode
);
879 sb_end_intwrite(inode
->i_sb
);
881 spin_lock(&inode
->i_lock
);
882 atomic_dec(&inode
->i_count
);
884 trace_f2fs_drop_inode(inode
, 0);
887 ret
= generic_drop_inode(inode
);
888 trace_f2fs_drop_inode(inode
, ret
);
892 int f2fs_inode_dirtied(struct inode
*inode
, bool sync
)
894 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
897 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
898 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
901 set_inode_flag(inode
, FI_DIRTY_INODE
);
902 stat_inc_dirty_inode(sbi
, DIRTY_META
);
904 if (sync
&& list_empty(&F2FS_I(inode
)->gdirty_list
)) {
905 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
906 &sbi
->inode_list
[DIRTY_META
]);
907 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
909 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
913 void f2fs_inode_synced(struct inode
*inode
)
915 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
917 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
918 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
919 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
922 if (!list_empty(&F2FS_I(inode
)->gdirty_list
)) {
923 list_del_init(&F2FS_I(inode
)->gdirty_list
);
924 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
926 clear_inode_flag(inode
, FI_DIRTY_INODE
);
927 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
928 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
929 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
933 * f2fs_dirty_inode() is called from __mark_inode_dirty()
935 * We should call set_dirty_inode to write the dirty inode through write_inode.
937 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
939 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
941 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
942 inode
->i_ino
== F2FS_META_INO(sbi
))
945 if (flags
== I_DIRTY_TIME
)
948 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
949 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
951 f2fs_inode_dirtied(inode
, false);
954 static void f2fs_i_callback(struct rcu_head
*head
)
956 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
957 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
960 static void f2fs_destroy_inode(struct inode
*inode
)
962 call_rcu(&inode
->i_rcu
, f2fs_i_callback
);
965 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
967 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
968 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
971 static void destroy_device_list(struct f2fs_sb_info
*sbi
)
975 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
976 blkdev_put(FDEV(i
).bdev
, FMODE_EXCL
);
977 #ifdef CONFIG_BLK_DEV_ZONED
978 kfree(FDEV(i
).blkz_type
);
984 static void f2fs_put_super(struct super_block
*sb
)
986 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
990 f2fs_quota_off_umount(sb
);
992 /* prevent remaining shrinker jobs */
993 mutex_lock(&sbi
->umount_mutex
);
996 * We don't need to do checkpoint when superblock is clean.
997 * But, the previous checkpoint was not done by umount, it needs to do
998 * clean checkpoint again.
1000 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
1001 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
1002 struct cp_control cpc
= {
1003 .reason
= CP_UMOUNT
,
1005 f2fs_write_checkpoint(sbi
, &cpc
);
1008 /* be sure to wait for any on-going discard commands */
1009 dropped
= f2fs_wait_discard_bios(sbi
);
1011 if (f2fs_discard_en(sbi
) && !sbi
->discard_blks
&& !dropped
) {
1012 struct cp_control cpc
= {
1013 .reason
= CP_UMOUNT
| CP_TRIMMED
,
1015 f2fs_write_checkpoint(sbi
, &cpc
);
1018 /* f2fs_write_checkpoint can update stat informaion */
1019 f2fs_destroy_stats(sbi
);
1022 * normally superblock is clean, so we need to release this.
1023 * In addition, EIO will skip do checkpoint, we need this as well.
1025 f2fs_release_ino_entry(sbi
, true);
1027 f2fs_leave_shrinker(sbi
);
1028 mutex_unlock(&sbi
->umount_mutex
);
1030 /* our cp_error case, we can wait for any writeback page */
1031 f2fs_flush_merged_writes(sbi
);
1033 iput(sbi
->node_inode
);
1034 iput(sbi
->meta_inode
);
1036 /* destroy f2fs internal modules */
1037 f2fs_destroy_node_manager(sbi
);
1038 f2fs_destroy_segment_manager(sbi
);
1042 f2fs_unregister_sysfs(sbi
);
1044 sb
->s_fs_info
= NULL
;
1045 if (sbi
->s_chksum_driver
)
1046 crypto_free_shash(sbi
->s_chksum_driver
);
1047 kfree(sbi
->raw_super
);
1049 destroy_device_list(sbi
);
1050 mempool_destroy(sbi
->write_io_dummy
);
1052 for (i
= 0; i
< MAXQUOTAS
; i
++)
1053 kfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
1055 destroy_percpu_info(sbi
);
1056 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
1057 kfree(sbi
->write_io
[i
]);
1061 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
1063 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1066 if (unlikely(f2fs_cp_error(sbi
)))
1069 trace_f2fs_sync_fs(sb
, sync
);
1071 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1075 struct cp_control cpc
;
1077 cpc
.reason
= __get_cp_reason(sbi
);
1079 mutex_lock(&sbi
->gc_mutex
);
1080 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1081 mutex_unlock(&sbi
->gc_mutex
);
1083 f2fs_trace_ios(NULL
, 1);
1088 static int f2fs_freeze(struct super_block
*sb
)
1090 if (f2fs_readonly(sb
))
1093 /* IO error happened before */
1094 if (unlikely(f2fs_cp_error(F2FS_SB(sb
))))
1097 /* must be clean, since sync_filesystem() was already called */
1098 if (is_sbi_flag_set(F2FS_SB(sb
), SBI_IS_DIRTY
))
1103 static int f2fs_unfreeze(struct super_block
*sb
)
1109 static int f2fs_statfs_project(struct super_block
*sb
,
1110 kprojid_t projid
, struct kstatfs
*buf
)
1113 struct dquot
*dquot
;
1117 qid
= make_kqid_projid(projid
);
1118 dquot
= dqget(sb
, qid
);
1120 return PTR_ERR(dquot
);
1121 spin_lock(&dq_data_lock
);
1123 limit
= (dquot
->dq_dqb
.dqb_bsoftlimit
?
1124 dquot
->dq_dqb
.dqb_bsoftlimit
:
1125 dquot
->dq_dqb
.dqb_bhardlimit
) >> sb
->s_blocksize_bits
;
1126 if (limit
&& buf
->f_blocks
> limit
) {
1127 curblock
= dquot
->dq_dqb
.dqb_curspace
>> sb
->s_blocksize_bits
;
1128 buf
->f_blocks
= limit
;
1129 buf
->f_bfree
= buf
->f_bavail
=
1130 (buf
->f_blocks
> curblock
) ?
1131 (buf
->f_blocks
- curblock
) : 0;
1134 limit
= dquot
->dq_dqb
.dqb_isoftlimit
?
1135 dquot
->dq_dqb
.dqb_isoftlimit
:
1136 dquot
->dq_dqb
.dqb_ihardlimit
;
1137 if (limit
&& buf
->f_files
> limit
) {
1138 buf
->f_files
= limit
;
1140 (buf
->f_files
> dquot
->dq_dqb
.dqb_curinodes
) ?
1141 (buf
->f_files
- dquot
->dq_dqb
.dqb_curinodes
) : 0;
1144 spin_unlock(&dq_data_lock
);
1150 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1152 struct super_block
*sb
= dentry
->d_sb
;
1153 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1154 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
1155 block_t total_count
, user_block_count
, start_count
;
1156 u64 avail_node_count
;
1158 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
1159 user_block_count
= sbi
->user_block_count
;
1160 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
1161 buf
->f_type
= F2FS_SUPER_MAGIC
;
1162 buf
->f_bsize
= sbi
->blocksize
;
1164 buf
->f_blocks
= total_count
- start_count
;
1165 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) -
1166 sbi
->current_reserved_blocks
;
1167 if (buf
->f_bfree
> F2FS_OPTION(sbi
).root_reserved_blocks
)
1168 buf
->f_bavail
= buf
->f_bfree
-
1169 F2FS_OPTION(sbi
).root_reserved_blocks
;
1173 avail_node_count
= sbi
->total_node_count
- sbi
->nquota_files
-
1174 F2FS_RESERVED_NODE_NUM
;
1176 if (avail_node_count
> user_block_count
) {
1177 buf
->f_files
= user_block_count
;
1178 buf
->f_ffree
= buf
->f_bavail
;
1180 buf
->f_files
= avail_node_count
;
1181 buf
->f_ffree
= min(avail_node_count
- valid_node_count(sbi
),
1185 buf
->f_namelen
= F2FS_NAME_LEN
;
1186 buf
->f_fsid
.val
[0] = (u32
)id
;
1187 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
1190 if (is_inode_flag_set(dentry
->d_inode
, FI_PROJ_INHERIT
) &&
1191 sb_has_quota_limits_enabled(sb
, PRJQUOTA
)) {
1192 f2fs_statfs_project(sb
, F2FS_I(dentry
->d_inode
)->i_projid
, buf
);
1198 static inline void f2fs_show_quota_options(struct seq_file
*seq
,
1199 struct super_block
*sb
)
1202 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1204 if (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1207 switch (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1218 seq_printf(seq
, ",jqfmt=%s", fmtname
);
1221 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
1222 seq_show_option(seq
, "usrjquota",
1223 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
]);
1225 if (F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
1226 seq_show_option(seq
, "grpjquota",
1227 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
]);
1229 if (F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
1230 seq_show_option(seq
, "prjjquota",
1231 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]);
1235 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
1237 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
1239 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
1240 if (test_opt(sbi
, FORCE_FG_GC
))
1241 seq_printf(seq
, ",background_gc=%s", "sync");
1243 seq_printf(seq
, ",background_gc=%s", "on");
1245 seq_printf(seq
, ",background_gc=%s", "off");
1247 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
1248 seq_puts(seq
, ",disable_roll_forward");
1249 if (test_opt(sbi
, DISCARD
))
1250 seq_puts(seq
, ",discard");
1251 if (test_opt(sbi
, NOHEAP
))
1252 seq_puts(seq
, ",no_heap");
1254 seq_puts(seq
, ",heap");
1255 #ifdef CONFIG_F2FS_FS_XATTR
1256 if (test_opt(sbi
, XATTR_USER
))
1257 seq_puts(seq
, ",user_xattr");
1259 seq_puts(seq
, ",nouser_xattr");
1260 if (test_opt(sbi
, INLINE_XATTR
))
1261 seq_puts(seq
, ",inline_xattr");
1263 seq_puts(seq
, ",noinline_xattr");
1264 if (test_opt(sbi
, INLINE_XATTR_SIZE
))
1265 seq_printf(seq
, ",inline_xattr_size=%u",
1266 F2FS_OPTION(sbi
).inline_xattr_size
);
1268 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1269 if (test_opt(sbi
, POSIX_ACL
))
1270 seq_puts(seq
, ",acl");
1272 seq_puts(seq
, ",noacl");
1274 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
1275 seq_puts(seq
, ",disable_ext_identify");
1276 if (test_opt(sbi
, INLINE_DATA
))
1277 seq_puts(seq
, ",inline_data");
1279 seq_puts(seq
, ",noinline_data");
1280 if (test_opt(sbi
, INLINE_DENTRY
))
1281 seq_puts(seq
, ",inline_dentry");
1283 seq_puts(seq
, ",noinline_dentry");
1284 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
1285 seq_puts(seq
, ",flush_merge");
1286 if (test_opt(sbi
, NOBARRIER
))
1287 seq_puts(seq
, ",nobarrier");
1288 if (test_opt(sbi
, FASTBOOT
))
1289 seq_puts(seq
, ",fastboot");
1290 if (test_opt(sbi
, EXTENT_CACHE
))
1291 seq_puts(seq
, ",extent_cache");
1293 seq_puts(seq
, ",noextent_cache");
1294 if (test_opt(sbi
, DATA_FLUSH
))
1295 seq_puts(seq
, ",data_flush");
1297 seq_puts(seq
, ",mode=");
1298 if (test_opt(sbi
, ADAPTIVE
))
1299 seq_puts(seq
, "adaptive");
1300 else if (test_opt(sbi
, LFS
))
1301 seq_puts(seq
, "lfs");
1302 seq_printf(seq
, ",active_logs=%u", F2FS_OPTION(sbi
).active_logs
);
1303 if (test_opt(sbi
, RESERVE_ROOT
))
1304 seq_printf(seq
, ",reserve_root=%u,resuid=%u,resgid=%u",
1305 F2FS_OPTION(sbi
).root_reserved_blocks
,
1306 from_kuid_munged(&init_user_ns
,
1307 F2FS_OPTION(sbi
).s_resuid
),
1308 from_kgid_munged(&init_user_ns
,
1309 F2FS_OPTION(sbi
).s_resgid
));
1310 if (F2FS_IO_SIZE_BITS(sbi
))
1311 seq_printf(seq
, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi
));
1312 #ifdef CONFIG_F2FS_FAULT_INJECTION
1313 if (test_opt(sbi
, FAULT_INJECTION
))
1314 seq_printf(seq
, ",fault_injection=%u",
1315 F2FS_OPTION(sbi
).fault_info
.inject_rate
);
1318 if (test_opt(sbi
, QUOTA
))
1319 seq_puts(seq
, ",quota");
1320 if (test_opt(sbi
, USRQUOTA
))
1321 seq_puts(seq
, ",usrquota");
1322 if (test_opt(sbi
, GRPQUOTA
))
1323 seq_puts(seq
, ",grpquota");
1324 if (test_opt(sbi
, PRJQUOTA
))
1325 seq_puts(seq
, ",prjquota");
1327 f2fs_show_quota_options(seq
, sbi
->sb
);
1328 if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_USER
)
1329 seq_printf(seq
, ",whint_mode=%s", "user-based");
1330 else if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_FS
)
1331 seq_printf(seq
, ",whint_mode=%s", "fs-based");
1332 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1333 if (F2FS_OPTION(sbi
).test_dummy_encryption
)
1334 seq_puts(seq
, ",test_dummy_encryption");
1337 if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_DEFAULT
)
1338 seq_printf(seq
, ",alloc_mode=%s", "default");
1339 else if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_REUSE
)
1340 seq_printf(seq
, ",alloc_mode=%s", "reuse");
1342 if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_POSIX
)
1343 seq_printf(seq
, ",fsync_mode=%s", "posix");
1344 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
)
1345 seq_printf(seq
, ",fsync_mode=%s", "strict");
1349 static void default_options(struct f2fs_sb_info
*sbi
)
1351 /* init some FS parameters */
1352 F2FS_OPTION(sbi
).active_logs
= NR_CURSEG_TYPE
;
1353 F2FS_OPTION(sbi
).inline_xattr_size
= DEFAULT_INLINE_XATTR_ADDRS
;
1354 F2FS_OPTION(sbi
).whint_mode
= WHINT_MODE_OFF
;
1355 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
1356 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
1357 F2FS_OPTION(sbi
).test_dummy_encryption
= false;
1358 sbi
->readdir_ra
= 1;
1360 set_opt(sbi
, BG_GC
);
1361 set_opt(sbi
, INLINE_XATTR
);
1362 set_opt(sbi
, INLINE_DATA
);
1363 set_opt(sbi
, INLINE_DENTRY
);
1364 set_opt(sbi
, EXTENT_CACHE
);
1365 set_opt(sbi
, NOHEAP
);
1366 sbi
->sb
->s_flags
|= SB_LAZYTIME
;
1367 set_opt(sbi
, FLUSH_MERGE
);
1368 if (f2fs_sb_has_blkzoned(sbi
->sb
)) {
1369 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
1370 set_opt(sbi
, DISCARD
);
1372 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
1375 #ifdef CONFIG_F2FS_FS_XATTR
1376 set_opt(sbi
, XATTR_USER
);
1378 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1379 set_opt(sbi
, POSIX_ACL
);
1382 #ifdef CONFIG_F2FS_FAULT_INJECTION
1383 f2fs_build_fault_attr(sbi
, 0);
1388 static int f2fs_enable_quotas(struct super_block
*sb
);
1390 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
1392 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1393 struct f2fs_mount_info org_mount_opt
;
1394 unsigned long old_sb_flags
;
1396 bool need_restart_gc
= false;
1397 bool need_stop_gc
= false;
1398 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
1404 * Save the old mount options in case we
1405 * need to restore them.
1407 org_mount_opt
= sbi
->mount_opt
;
1408 old_sb_flags
= sb
->s_flags
;
1411 org_mount_opt
.s_jquota_fmt
= F2FS_OPTION(sbi
).s_jquota_fmt
;
1412 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1413 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
1414 org_mount_opt
.s_qf_names
[i
] =
1415 kstrdup(F2FS_OPTION(sbi
).s_qf_names
[i
],
1417 if (!org_mount_opt
.s_qf_names
[i
]) {
1418 for (j
= 0; j
< i
; j
++)
1419 kfree(org_mount_opt
.s_qf_names
[j
]);
1423 org_mount_opt
.s_qf_names
[i
] = NULL
;
1428 /* recover superblocks we couldn't write due to previous RO mount */
1429 if (!(*flags
& SB_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
1430 err
= f2fs_commit_super(sbi
, false);
1431 f2fs_msg(sb
, KERN_INFO
,
1432 "Try to recover all the superblocks, ret: %d", err
);
1434 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1437 default_options(sbi
);
1439 /* parse mount options */
1440 err
= parse_options(sb
, data
);
1445 * Previous and new state of filesystem is RO,
1446 * so skip checking GC and FLUSH_MERGE conditions.
1448 if (f2fs_readonly(sb
) && (*flags
& SB_RDONLY
))
1452 if (!f2fs_readonly(sb
) && (*flags
& SB_RDONLY
)) {
1453 err
= dquot_suspend(sb
, -1);
1456 } else if (f2fs_readonly(sb
) && !(*flags
& MS_RDONLY
)) {
1457 /* dquot_resume needs RW */
1458 sb
->s_flags
&= ~SB_RDONLY
;
1459 if (sb_any_quota_suspended(sb
)) {
1460 dquot_resume(sb
, -1);
1461 } else if (f2fs_sb_has_quota_ino(sb
)) {
1462 err
= f2fs_enable_quotas(sb
);
1468 /* disallow enable/disable extent_cache dynamically */
1469 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
1471 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1472 "switch extent_cache option is not allowed");
1477 * We stop the GC thread if FS is mounted as RO
1478 * or if background_gc = off is passed in mount
1479 * option. Also sync the filesystem.
1481 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
1482 if (sbi
->gc_thread
) {
1483 f2fs_stop_gc_thread(sbi
);
1484 need_restart_gc
= true;
1486 } else if (!sbi
->gc_thread
) {
1487 err
= f2fs_start_gc_thread(sbi
);
1490 need_stop_gc
= true;
1493 if (*flags
& SB_RDONLY
||
1494 F2FS_OPTION(sbi
).whint_mode
!= org_mount_opt
.whint_mode
) {
1495 writeback_inodes_sb(sb
, WB_REASON_SYNC
);
1498 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1499 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
1500 f2fs_sync_fs(sb
, 1);
1501 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
1505 * We stop issue flush thread if FS is mounted as RO
1506 * or if flush_merge is not passed in mount option.
1508 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
1509 clear_opt(sbi
, FLUSH_MERGE
);
1510 f2fs_destroy_flush_cmd_control(sbi
, false);
1512 err
= f2fs_create_flush_cmd_control(sbi
);
1518 /* Release old quota file names */
1519 for (i
= 0; i
< MAXQUOTAS
; i
++)
1520 kfree(org_mount_opt
.s_qf_names
[i
]);
1522 /* Update the POSIXACL Flag */
1523 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
1524 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
1526 limit_reserve_root(sbi
);
1529 if (need_restart_gc
) {
1530 if (f2fs_start_gc_thread(sbi
))
1531 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1532 "background gc thread has stopped");
1533 } else if (need_stop_gc
) {
1534 f2fs_stop_gc_thread(sbi
);
1538 F2FS_OPTION(sbi
).s_jquota_fmt
= org_mount_opt
.s_jquota_fmt
;
1539 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1540 kfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
1541 F2FS_OPTION(sbi
).s_qf_names
[i
] = org_mount_opt
.s_qf_names
[i
];
1544 sbi
->mount_opt
= org_mount_opt
;
1545 sb
->s_flags
= old_sb_flags
;
1550 /* Read data from quotafile */
1551 static ssize_t
f2fs_quota_read(struct super_block
*sb
, int type
, char *data
,
1552 size_t len
, loff_t off
)
1554 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1555 struct address_space
*mapping
= inode
->i_mapping
;
1556 block_t blkidx
= F2FS_BYTES_TO_BLK(off
);
1557 int offset
= off
& (sb
->s_blocksize
- 1);
1560 loff_t i_size
= i_size_read(inode
);
1567 if (off
+ len
> i_size
)
1570 while (toread
> 0) {
1571 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
, toread
);
1573 page
= read_cache_page_gfp(mapping
, blkidx
, GFP_NOFS
);
1575 if (PTR_ERR(page
) == -ENOMEM
) {
1576 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1579 return PTR_ERR(page
);
1584 if (unlikely(page
->mapping
!= mapping
)) {
1585 f2fs_put_page(page
, 1);
1588 if (unlikely(!PageUptodate(page
))) {
1589 f2fs_put_page(page
, 1);
1593 kaddr
= kmap_atomic(page
);
1594 memcpy(data
, kaddr
+ offset
, tocopy
);
1595 kunmap_atomic(kaddr
);
1596 f2fs_put_page(page
, 1);
1606 /* Write to quotafile */
1607 static ssize_t
f2fs_quota_write(struct super_block
*sb
, int type
,
1608 const char *data
, size_t len
, loff_t off
)
1610 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1611 struct address_space
*mapping
= inode
->i_mapping
;
1612 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
1613 int offset
= off
& (sb
->s_blocksize
- 1);
1614 size_t towrite
= len
;
1620 while (towrite
> 0) {
1621 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
,
1624 err
= a_ops
->write_begin(NULL
, mapping
, off
, tocopy
, 0,
1626 if (unlikely(err
)) {
1627 if (err
== -ENOMEM
) {
1628 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1634 kaddr
= kmap_atomic(page
);
1635 memcpy(kaddr
+ offset
, data
, tocopy
);
1636 kunmap_atomic(kaddr
);
1637 flush_dcache_page(page
);
1639 a_ops
->write_end(NULL
, mapping
, off
, tocopy
, tocopy
,
1650 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1651 f2fs_mark_inode_dirty_sync(inode
, false);
1652 return len
- towrite
;
1655 static struct dquot
**f2fs_get_dquots(struct inode
*inode
)
1657 return F2FS_I(inode
)->i_dquot
;
1660 static qsize_t
*f2fs_get_reserved_space(struct inode
*inode
)
1662 return &F2FS_I(inode
)->i_reserved_quota
;
1665 static int f2fs_quota_on_mount(struct f2fs_sb_info
*sbi
, int type
)
1667 return dquot_quota_on_mount(sbi
->sb
, F2FS_OPTION(sbi
).s_qf_names
[type
],
1668 F2FS_OPTION(sbi
).s_jquota_fmt
, type
);
1671 int f2fs_enable_quota_files(struct f2fs_sb_info
*sbi
, bool rdonly
)
1676 if (f2fs_sb_has_quota_ino(sbi
->sb
) && rdonly
) {
1677 err
= f2fs_enable_quotas(sbi
->sb
);
1679 f2fs_msg(sbi
->sb
, KERN_ERR
,
1680 "Cannot turn on quota_ino: %d", err
);
1686 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1687 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
1688 err
= f2fs_quota_on_mount(sbi
, i
);
1693 f2fs_msg(sbi
->sb
, KERN_ERR
,
1694 "Cannot turn on quotas: %d on %d", err
, i
);
1700 static int f2fs_quota_enable(struct super_block
*sb
, int type
, int format_id
,
1703 struct inode
*qf_inode
;
1704 unsigned long qf_inum
;
1707 BUG_ON(!f2fs_sb_has_quota_ino(sb
));
1709 qf_inum
= f2fs_qf_ino(sb
, type
);
1713 qf_inode
= f2fs_iget(sb
, qf_inum
);
1714 if (IS_ERR(qf_inode
)) {
1715 f2fs_msg(sb
, KERN_ERR
,
1716 "Bad quota inode %u:%lu", type
, qf_inum
);
1717 return PTR_ERR(qf_inode
);
1720 /* Don't account quota for quota files to avoid recursion */
1721 qf_inode
->i_flags
|= S_NOQUOTA
;
1722 err
= dquot_enable(qf_inode
, type
, format_id
, flags
);
1727 static int f2fs_enable_quotas(struct super_block
*sb
)
1730 unsigned long qf_inum
;
1731 bool quota_mopt
[MAXQUOTAS
] = {
1732 test_opt(F2FS_SB(sb
), USRQUOTA
),
1733 test_opt(F2FS_SB(sb
), GRPQUOTA
),
1734 test_opt(F2FS_SB(sb
), PRJQUOTA
),
1737 sb_dqopt(sb
)->flags
|= DQUOT_QUOTA_SYS_FILE
| DQUOT_NOLIST_DIRTY
;
1738 for (type
= 0; type
< MAXQUOTAS
; type
++) {
1739 qf_inum
= f2fs_qf_ino(sb
, type
);
1741 err
= f2fs_quota_enable(sb
, type
, QFMT_VFS_V1
,
1742 DQUOT_USAGE_ENABLED
|
1743 (quota_mopt
[type
] ? DQUOT_LIMITS_ENABLED
: 0));
1745 f2fs_msg(sb
, KERN_ERR
,
1746 "Failed to enable quota tracking "
1747 "(type=%d, err=%d). Please run "
1748 "fsck to fix.", type
, err
);
1749 for (type
--; type
>= 0; type
--)
1750 dquot_quota_off(sb
, type
);
1758 static int f2fs_quota_sync(struct super_block
*sb
, int type
)
1760 struct quota_info
*dqopt
= sb_dqopt(sb
);
1764 ret
= dquot_writeback_dquots(sb
, type
);
1769 * Now when everything is written we can discard the pagecache so
1770 * that userspace sees the changes.
1772 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1773 if (type
!= -1 && cnt
!= type
)
1775 if (!sb_has_quota_active(sb
, cnt
))
1778 ret
= filemap_write_and_wait(dqopt
->files
[cnt
]->i_mapping
);
1782 inode_lock(dqopt
->files
[cnt
]);
1783 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
1784 inode_unlock(dqopt
->files
[cnt
]);
1789 static int f2fs_quota_on(struct super_block
*sb
, int type
, int format_id
,
1790 const struct path
*path
)
1792 struct inode
*inode
;
1795 err
= f2fs_quota_sync(sb
, type
);
1799 err
= dquot_quota_on(sb
, type
, format_id
, path
);
1803 inode
= d_inode(path
->dentry
);
1806 F2FS_I(inode
)->i_flags
|= F2FS_NOATIME_FL
| F2FS_IMMUTABLE_FL
;
1807 inode_set_flags(inode
, S_NOATIME
| S_IMMUTABLE
,
1808 S_NOATIME
| S_IMMUTABLE
);
1809 inode_unlock(inode
);
1810 f2fs_mark_inode_dirty_sync(inode
, false);
1815 static int f2fs_quota_off(struct super_block
*sb
, int type
)
1817 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1820 if (!inode
|| !igrab(inode
))
1821 return dquot_quota_off(sb
, type
);
1823 f2fs_quota_sync(sb
, type
);
1825 err
= dquot_quota_off(sb
, type
);
1826 if (err
|| f2fs_sb_has_quota_ino(sb
))
1830 F2FS_I(inode
)->i_flags
&= ~(F2FS_NOATIME_FL
| F2FS_IMMUTABLE_FL
);
1831 inode_set_flags(inode
, 0, S_NOATIME
| S_IMMUTABLE
);
1832 inode_unlock(inode
);
1833 f2fs_mark_inode_dirty_sync(inode
, false);
1839 void f2fs_quota_off_umount(struct super_block
*sb
)
1843 for (type
= 0; type
< MAXQUOTAS
; type
++)
1844 f2fs_quota_off(sb
, type
);
1847 static int f2fs_get_projid(struct inode
*inode
, kprojid_t
*projid
)
1849 *projid
= F2FS_I(inode
)->i_projid
;
1853 static const struct dquot_operations f2fs_quota_operations
= {
1854 .get_reserved_space
= f2fs_get_reserved_space
,
1855 .write_dquot
= dquot_commit
,
1856 .acquire_dquot
= dquot_acquire
,
1857 .release_dquot
= dquot_release
,
1858 .mark_dirty
= dquot_mark_dquot_dirty
,
1859 .write_info
= dquot_commit_info
,
1860 .alloc_dquot
= dquot_alloc
,
1861 .destroy_dquot
= dquot_destroy
,
1862 .get_projid
= f2fs_get_projid
,
1863 .get_next_id
= dquot_get_next_id
,
1866 static const struct quotactl_ops f2fs_quotactl_ops
= {
1867 .quota_on
= f2fs_quota_on
,
1868 .quota_off
= f2fs_quota_off
,
1869 .quota_sync
= f2fs_quota_sync
,
1870 .get_state
= dquot_get_state
,
1871 .set_info
= dquot_set_dqinfo
,
1872 .get_dqblk
= dquot_get_dqblk
,
1873 .set_dqblk
= dquot_set_dqblk
,
1874 .get_nextdqblk
= dquot_get_next_dqblk
,
1877 void f2fs_quota_off_umount(struct super_block
*sb
)
1882 static const struct super_operations f2fs_sops
= {
1883 .alloc_inode
= f2fs_alloc_inode
,
1884 .drop_inode
= f2fs_drop_inode
,
1885 .destroy_inode
= f2fs_destroy_inode
,
1886 .write_inode
= f2fs_write_inode
,
1887 .dirty_inode
= f2fs_dirty_inode
,
1888 .show_options
= f2fs_show_options
,
1890 .quota_read
= f2fs_quota_read
,
1891 .quota_write
= f2fs_quota_write
,
1892 .get_dquots
= f2fs_get_dquots
,
1894 .evict_inode
= f2fs_evict_inode
,
1895 .put_super
= f2fs_put_super
,
1896 .sync_fs
= f2fs_sync_fs
,
1897 .freeze_fs
= f2fs_freeze
,
1898 .unfreeze_fs
= f2fs_unfreeze
,
1899 .statfs
= f2fs_statfs
,
1900 .remount_fs
= f2fs_remount
,
1903 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1904 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
1906 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1907 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1911 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
1914 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1917 * Encrypting the root directory is not allowed because fsck
1918 * expects lost+found directory to exist and remain unencrypted
1919 * if LOST_FOUND feature is enabled.
1922 if (f2fs_sb_has_lost_found(sbi
->sb
) &&
1923 inode
->i_ino
== F2FS_ROOT_INO(sbi
))
1926 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1927 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1928 ctx
, len
, fs_data
, XATTR_CREATE
);
1931 static bool f2fs_dummy_context(struct inode
*inode
)
1933 return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode
));
1936 static const struct fscrypt_operations f2fs_cryptops
= {
1937 .key_prefix
= "f2fs:",
1938 .get_context
= f2fs_get_context
,
1939 .set_context
= f2fs_set_context
,
1940 .dummy_context
= f2fs_dummy_context
,
1941 .empty_dir
= f2fs_empty_dir
,
1942 .max_namelen
= F2FS_NAME_LEN
,
1946 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
1947 u64 ino
, u32 generation
)
1949 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1950 struct inode
*inode
;
1952 if (f2fs_check_nid_range(sbi
, ino
))
1953 return ERR_PTR(-ESTALE
);
1956 * f2fs_iget isn't quite right if the inode is currently unallocated!
1957 * However f2fs_iget currently does appropriate checks to handle stale
1958 * inodes so everything is OK.
1960 inode
= f2fs_iget(sb
, ino
);
1962 return ERR_CAST(inode
);
1963 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
1964 /* we didn't find the right inode.. */
1966 return ERR_PTR(-ESTALE
);
1971 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
1972 int fh_len
, int fh_type
)
1974 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
1975 f2fs_nfs_get_inode
);
1978 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
1979 int fh_len
, int fh_type
)
1981 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
1982 f2fs_nfs_get_inode
);
1985 static const struct export_operations f2fs_export_ops
= {
1986 .fh_to_dentry
= f2fs_fh_to_dentry
,
1987 .fh_to_parent
= f2fs_fh_to_parent
,
1988 .get_parent
= f2fs_get_parent
,
1991 static loff_t
max_file_blocks(void)
1994 loff_t leaf_count
= ADDRS_PER_BLOCK
;
1997 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
1998 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
1999 * space in inode.i_addr, it will be more safe to reassign
2003 /* two direct node blocks */
2004 result
+= (leaf_count
* 2);
2006 /* two indirect node blocks */
2007 leaf_count
*= NIDS_PER_BLOCK
;
2008 result
+= (leaf_count
* 2);
2010 /* one double indirect node block */
2011 leaf_count
*= NIDS_PER_BLOCK
;
2012 result
+= leaf_count
;
2017 static int __f2fs_commit_super(struct buffer_head
*bh
,
2018 struct f2fs_super_block
*super
)
2022 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
2023 set_buffer_dirty(bh
);
2026 /* it's rare case, we can do fua all the time */
2027 return __sync_dirty_buffer(bh
, REQ_SYNC
| REQ_PREFLUSH
| REQ_FUA
);
2030 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
2031 struct buffer_head
*bh
)
2033 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
2034 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
2035 struct super_block
*sb
= sbi
->sb
;
2036 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
2037 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
2038 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
2039 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
2040 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
2041 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
2042 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
2043 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
2044 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
2045 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
2046 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
2047 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
2048 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2049 u64 main_end_blkaddr
= main_blkaddr
+
2050 (segment_count_main
<< log_blocks_per_seg
);
2051 u64 seg_end_blkaddr
= segment0_blkaddr
+
2052 (segment_count
<< log_blocks_per_seg
);
2054 if (segment0_blkaddr
!= cp_blkaddr
) {
2055 f2fs_msg(sb
, KERN_INFO
,
2056 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2057 segment0_blkaddr
, cp_blkaddr
);
2061 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
2063 f2fs_msg(sb
, KERN_INFO
,
2064 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2065 cp_blkaddr
, sit_blkaddr
,
2066 segment_count_ckpt
<< log_blocks_per_seg
);
2070 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
2072 f2fs_msg(sb
, KERN_INFO
,
2073 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2074 sit_blkaddr
, nat_blkaddr
,
2075 segment_count_sit
<< log_blocks_per_seg
);
2079 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
2081 f2fs_msg(sb
, KERN_INFO
,
2082 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2083 nat_blkaddr
, ssa_blkaddr
,
2084 segment_count_nat
<< log_blocks_per_seg
);
2088 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
2090 f2fs_msg(sb
, KERN_INFO
,
2091 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2092 ssa_blkaddr
, main_blkaddr
,
2093 segment_count_ssa
<< log_blocks_per_seg
);
2097 if (main_end_blkaddr
> seg_end_blkaddr
) {
2098 f2fs_msg(sb
, KERN_INFO
,
2099 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2102 (segment_count
<< log_blocks_per_seg
),
2103 segment_count_main
<< log_blocks_per_seg
);
2105 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
2109 /* fix in-memory information all the time */
2110 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
2111 segment0_blkaddr
) >> log_blocks_per_seg
);
2113 if (f2fs_readonly(sb
) || bdev_read_only(sb
->s_bdev
)) {
2114 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
2117 err
= __f2fs_commit_super(bh
, NULL
);
2118 res
= err
? "failed" : "done";
2120 f2fs_msg(sb
, KERN_INFO
,
2121 "Fix alignment : %s, start(%u) end(%u) block(%u)",
2124 (segment_count
<< log_blocks_per_seg
),
2125 segment_count_main
<< log_blocks_per_seg
);
2132 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
2133 struct buffer_head
*bh
)
2135 block_t segment_count
, segs_per_sec
, secs_per_zone
;
2136 block_t total_sections
, blocks_per_seg
;
2137 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
2138 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
2139 struct super_block
*sb
= sbi
->sb
;
2140 unsigned int blocksize
;
2142 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
2143 f2fs_msg(sb
, KERN_INFO
,
2144 "Magic Mismatch, valid(0x%x) - read(0x%x)",
2145 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
2149 /* Currently, support only 4KB page cache size */
2150 if (F2FS_BLKSIZE
!= PAGE_SIZE
) {
2151 f2fs_msg(sb
, KERN_INFO
,
2152 "Invalid page_cache_size (%lu), supports only 4KB\n",
2157 /* Currently, support only 4KB block size */
2158 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
2159 if (blocksize
!= F2FS_BLKSIZE
) {
2160 f2fs_msg(sb
, KERN_INFO
,
2161 "Invalid blocksize (%u), supports only 4KB\n",
2166 /* check log blocks per segment */
2167 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
2168 f2fs_msg(sb
, KERN_INFO
,
2169 "Invalid log blocks per segment (%u)\n",
2170 le32_to_cpu(raw_super
->log_blocks_per_seg
));
2174 /* Currently, support 512/1024/2048/4096 bytes sector size */
2175 if (le32_to_cpu(raw_super
->log_sectorsize
) >
2176 F2FS_MAX_LOG_SECTOR_SIZE
||
2177 le32_to_cpu(raw_super
->log_sectorsize
) <
2178 F2FS_MIN_LOG_SECTOR_SIZE
) {
2179 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize (%u)",
2180 le32_to_cpu(raw_super
->log_sectorsize
));
2183 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
2184 le32_to_cpu(raw_super
->log_sectorsize
) !=
2185 F2FS_MAX_LOG_SECTOR_SIZE
) {
2186 f2fs_msg(sb
, KERN_INFO
,
2187 "Invalid log sectors per block(%u) log sectorsize(%u)",
2188 le32_to_cpu(raw_super
->log_sectors_per_block
),
2189 le32_to_cpu(raw_super
->log_sectorsize
));
2193 segment_count
= le32_to_cpu(raw_super
->segment_count
);
2194 segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
2195 secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
2196 total_sections
= le32_to_cpu(raw_super
->section_count
);
2198 /* blocks_per_seg should be 512, given the above check */
2199 blocks_per_seg
= 1 << le32_to_cpu(raw_super
->log_blocks_per_seg
);
2201 if (segment_count
> F2FS_MAX_SEGMENT
||
2202 segment_count
< F2FS_MIN_SEGMENTS
) {
2203 f2fs_msg(sb
, KERN_INFO
,
2204 "Invalid segment count (%u)",
2209 if (total_sections
> segment_count
||
2210 total_sections
< F2FS_MIN_SEGMENTS
||
2211 segs_per_sec
> segment_count
|| !segs_per_sec
) {
2212 f2fs_msg(sb
, KERN_INFO
,
2213 "Invalid segment/section count (%u, %u x %u)",
2214 segment_count
, total_sections
, segs_per_sec
);
2218 if ((segment_count
/ segs_per_sec
) < total_sections
) {
2219 f2fs_msg(sb
, KERN_INFO
,
2220 "Small segment_count (%u < %u * %u)",
2221 segment_count
, segs_per_sec
, total_sections
);
2225 if (segment_count
> (le32_to_cpu(raw_super
->block_count
) >> 9)) {
2226 f2fs_msg(sb
, KERN_INFO
,
2227 "Wrong segment_count / block_count (%u > %u)",
2228 segment_count
, le32_to_cpu(raw_super
->block_count
));
2232 if (secs_per_zone
> total_sections
|| !secs_per_zone
) {
2233 f2fs_msg(sb
, KERN_INFO
,
2234 "Wrong secs_per_zone / total_sections (%u, %u)",
2235 secs_per_zone
, total_sections
);
2238 if (le32_to_cpu(raw_super
->extension_count
) > F2FS_MAX_EXTENSION
||
2239 raw_super
->hot_ext_count
> F2FS_MAX_EXTENSION
||
2240 (le32_to_cpu(raw_super
->extension_count
) +
2241 raw_super
->hot_ext_count
) > F2FS_MAX_EXTENSION
) {
2242 f2fs_msg(sb
, KERN_INFO
,
2243 "Corrupted extension count (%u + %u > %u)",
2244 le32_to_cpu(raw_super
->extension_count
),
2245 raw_super
->hot_ext_count
,
2246 F2FS_MAX_EXTENSION
);
2250 if (le32_to_cpu(raw_super
->cp_payload
) >
2251 (blocks_per_seg
- F2FS_CP_PACKS
)) {
2252 f2fs_msg(sb
, KERN_INFO
,
2253 "Insane cp_payload (%u > %u)",
2254 le32_to_cpu(raw_super
->cp_payload
),
2255 blocks_per_seg
- F2FS_CP_PACKS
);
2259 /* check reserved ino info */
2260 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
2261 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
2262 le32_to_cpu(raw_super
->root_ino
) != 3) {
2263 f2fs_msg(sb
, KERN_INFO
,
2264 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2265 le32_to_cpu(raw_super
->node_ino
),
2266 le32_to_cpu(raw_super
->meta_ino
),
2267 le32_to_cpu(raw_super
->root_ino
));
2271 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2272 if (sanity_check_area_boundary(sbi
, bh
))
2278 int f2fs_sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
2280 unsigned int total
, fsmeta
;
2281 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
2282 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
2283 unsigned int ovp_segments
, reserved_segments
;
2284 unsigned int main_segs
, blocks_per_seg
;
2285 unsigned int sit_segs
, nat_segs
;
2286 unsigned int sit_bitmap_size
, nat_bitmap_size
;
2287 unsigned int log_blocks_per_seg
;
2290 total
= le32_to_cpu(raw_super
->segment_count
);
2291 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
2292 sit_segs
= le32_to_cpu(raw_super
->segment_count_sit
);
2294 nat_segs
= le32_to_cpu(raw_super
->segment_count_nat
);
2296 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
2297 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
2299 if (unlikely(fsmeta
>= total
))
2302 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
2303 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
2305 if (unlikely(fsmeta
< F2FS_MIN_SEGMENTS
||
2306 ovp_segments
== 0 || reserved_segments
== 0)) {
2307 f2fs_msg(sbi
->sb
, KERN_ERR
,
2308 "Wrong layout: check mkfs.f2fs version");
2312 main_segs
= le32_to_cpu(raw_super
->segment_count_main
);
2313 blocks_per_seg
= sbi
->blocks_per_seg
;
2315 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
2316 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) >= main_segs
||
2317 le16_to_cpu(ckpt
->cur_node_blkoff
[i
]) >= blocks_per_seg
)
2320 for (i
= 0; i
< NR_CURSEG_DATA_TYPE
; i
++) {
2321 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) >= main_segs
||
2322 le16_to_cpu(ckpt
->cur_data_blkoff
[i
]) >= blocks_per_seg
)
2326 sit_bitmap_size
= le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
);
2327 nat_bitmap_size
= le32_to_cpu(ckpt
->nat_ver_bitmap_bytesize
);
2328 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2330 if (sit_bitmap_size
!= ((sit_segs
/ 2) << log_blocks_per_seg
) / 8 ||
2331 nat_bitmap_size
!= ((nat_segs
/ 2) << log_blocks_per_seg
) / 8) {
2332 f2fs_msg(sbi
->sb
, KERN_ERR
,
2333 "Wrong bitmap size: sit: %u, nat:%u",
2334 sit_bitmap_size
, nat_bitmap_size
);
2338 if (unlikely(f2fs_cp_error(sbi
))) {
2339 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
2345 static void init_sb_info(struct f2fs_sb_info
*sbi
)
2347 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
2350 sbi
->log_sectors_per_block
=
2351 le32_to_cpu(raw_super
->log_sectors_per_block
);
2352 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
2353 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
2354 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
2355 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
2356 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
2357 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
2358 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
2359 sbi
->total_node_count
=
2360 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
2361 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
2362 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
2363 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
2364 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
2365 sbi
->cur_victim_sec
= NULL_SECNO
;
2366 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
2368 sbi
->dir_level
= DEF_DIR_LEVEL
;
2369 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
2370 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
2371 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
2373 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
2374 atomic_set(&sbi
->nr_pages
[i
], 0);
2376 for (i
= 0; i
< META
; i
++)
2377 atomic_set(&sbi
->wb_sync_req
[i
], 0);
2379 INIT_LIST_HEAD(&sbi
->s_list
);
2380 mutex_init(&sbi
->umount_mutex
);
2381 for (i
= 0; i
< NR_PAGE_TYPE
- 1; i
++)
2382 for (j
= HOT
; j
< NR_TEMP_TYPE
; j
++)
2383 mutex_init(&sbi
->wio_mutex
[i
][j
]);
2384 init_rwsem(&sbi
->io_order_lock
);
2385 spin_lock_init(&sbi
->cp_lock
);
2387 sbi
->dirty_device
= 0;
2388 spin_lock_init(&sbi
->dev_lock
);
2390 init_rwsem(&sbi
->sb_lock
);
2393 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
2397 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
2401 return percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
2405 #ifdef CONFIG_BLK_DEV_ZONED
2406 static int init_blkz_info(struct f2fs_sb_info
*sbi
, int devi
)
2408 struct block_device
*bdev
= FDEV(devi
).bdev
;
2409 sector_t nr_sectors
= bdev
->bd_part
->nr_sects
;
2410 sector_t sector
= 0;
2411 struct blk_zone
*zones
;
2412 unsigned int i
, nr_zones
;
2416 if (!f2fs_sb_has_blkzoned(sbi
->sb
))
2419 if (sbi
->blocks_per_blkz
&& sbi
->blocks_per_blkz
!=
2420 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
)))
2422 sbi
->blocks_per_blkz
= SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
));
2423 if (sbi
->log_blocks_per_blkz
&& sbi
->log_blocks_per_blkz
!=
2424 __ilog2_u32(sbi
->blocks_per_blkz
))
2426 sbi
->log_blocks_per_blkz
= __ilog2_u32(sbi
->blocks_per_blkz
);
2427 FDEV(devi
).nr_blkz
= SECTOR_TO_BLOCK(nr_sectors
) >>
2428 sbi
->log_blocks_per_blkz
;
2429 if (nr_sectors
& (bdev_zone_sectors(bdev
) - 1))
2430 FDEV(devi
).nr_blkz
++;
2432 FDEV(devi
).blkz_type
= f2fs_kmalloc(sbi
, FDEV(devi
).nr_blkz
,
2434 if (!FDEV(devi
).blkz_type
)
2437 #define F2FS_REPORT_NR_ZONES 4096
2439 zones
= f2fs_kzalloc(sbi
,
2440 array_size(F2FS_REPORT_NR_ZONES
,
2441 sizeof(struct blk_zone
)),
2446 /* Get block zones type */
2447 while (zones
&& sector
< nr_sectors
) {
2449 nr_zones
= F2FS_REPORT_NR_ZONES
;
2450 err
= blkdev_report_zones(bdev
, sector
,
2460 for (i
= 0; i
< nr_zones
; i
++) {
2461 FDEV(devi
).blkz_type
[n
] = zones
[i
].type
;
2462 sector
+= zones
[i
].len
;
2474 * Read f2fs raw super block.
2475 * Because we have two copies of super block, so read both of them
2476 * to get the first valid one. If any one of them is broken, we pass
2477 * them recovery flag back to the caller.
2479 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
2480 struct f2fs_super_block
**raw_super
,
2481 int *valid_super_block
, int *recovery
)
2483 struct super_block
*sb
= sbi
->sb
;
2485 struct buffer_head
*bh
;
2486 struct f2fs_super_block
*super
;
2489 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
2493 for (block
= 0; block
< 2; block
++) {
2494 bh
= sb_bread(sb
, block
);
2496 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
2502 /* sanity checking of raw super */
2503 if (sanity_check_raw_super(sbi
, bh
)) {
2504 f2fs_msg(sb
, KERN_ERR
,
2505 "Can't find valid F2FS filesystem in %dth superblock",
2513 memcpy(super
, bh
->b_data
+ F2FS_SUPER_OFFSET
,
2515 *valid_super_block
= block
;
2521 /* Fail to read any one of the superblocks*/
2525 /* No valid superblock */
2534 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
2536 struct buffer_head
*bh
;
2539 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
2540 bdev_read_only(sbi
->sb
->s_bdev
)) {
2541 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
2545 /* write back-up superblock first */
2546 bh
= sb_bread(sbi
->sb
, sbi
->valid_super_block
? 0 : 1);
2549 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
2552 /* if we are in recovery path, skip writing valid superblock */
2556 /* write current valid superblock */
2557 bh
= sb_bread(sbi
->sb
, sbi
->valid_super_block
);
2560 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
2565 static int f2fs_scan_devices(struct f2fs_sb_info
*sbi
)
2567 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
2568 unsigned int max_devices
= MAX_DEVICES
;
2571 /* Initialize single device information */
2572 if (!RDEV(0).path
[0]) {
2573 if (!bdev_is_zoned(sbi
->sb
->s_bdev
))
2579 * Initialize multiple devices information, or single
2580 * zoned block device information.
2582 sbi
->devs
= f2fs_kzalloc(sbi
,
2583 array_size(max_devices
,
2584 sizeof(struct f2fs_dev_info
)),
2589 for (i
= 0; i
< max_devices
; i
++) {
2591 if (i
> 0 && !RDEV(i
).path
[0])
2594 if (max_devices
== 1) {
2595 /* Single zoned block device mount */
2597 blkdev_get_by_dev(sbi
->sb
->s_bdev
->bd_dev
,
2598 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
2600 /* Multi-device mount */
2601 memcpy(FDEV(i
).path
, RDEV(i
).path
, MAX_PATH_LEN
);
2602 FDEV(i
).total_segments
=
2603 le32_to_cpu(RDEV(i
).total_segments
);
2605 FDEV(i
).start_blk
= 0;
2606 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
2607 (FDEV(i
).total_segments
<<
2608 sbi
->log_blocks_per_seg
) - 1 +
2609 le32_to_cpu(raw_super
->segment0_blkaddr
);
2611 FDEV(i
).start_blk
= FDEV(i
- 1).end_blk
+ 1;
2612 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
2613 (FDEV(i
).total_segments
<<
2614 sbi
->log_blocks_per_seg
) - 1;
2616 FDEV(i
).bdev
= blkdev_get_by_path(FDEV(i
).path
,
2617 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
2619 if (IS_ERR(FDEV(i
).bdev
))
2620 return PTR_ERR(FDEV(i
).bdev
);
2622 /* to release errored devices */
2623 sbi
->s_ndevs
= i
+ 1;
2625 #ifdef CONFIG_BLK_DEV_ZONED
2626 if (bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HM
&&
2627 !f2fs_sb_has_blkzoned(sbi
->sb
)) {
2628 f2fs_msg(sbi
->sb
, KERN_ERR
,
2629 "Zoned block device feature not enabled\n");
2632 if (bdev_zoned_model(FDEV(i
).bdev
) != BLK_ZONED_NONE
) {
2633 if (init_blkz_info(sbi
, i
)) {
2634 f2fs_msg(sbi
->sb
, KERN_ERR
,
2635 "Failed to initialize F2FS blkzone information");
2638 if (max_devices
== 1)
2640 f2fs_msg(sbi
->sb
, KERN_INFO
,
2641 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
2643 FDEV(i
).total_segments
,
2644 FDEV(i
).start_blk
, FDEV(i
).end_blk
,
2645 bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HA
?
2646 "Host-aware" : "Host-managed");
2650 f2fs_msg(sbi
->sb
, KERN_INFO
,
2651 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
2653 FDEV(i
).total_segments
,
2654 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
2656 f2fs_msg(sbi
->sb
, KERN_INFO
,
2657 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi
));
2661 static void f2fs_tuning_parameters(struct f2fs_sb_info
*sbi
)
2663 struct f2fs_sm_info
*sm_i
= SM_I(sbi
);
2665 /* adjust parameters according to the volume size */
2666 if (sm_i
->main_segments
<= SMALL_VOLUME_SEGMENTS
) {
2667 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
2668 sm_i
->dcc_info
->discard_granularity
= 1;
2669 sm_i
->ipu_policy
= 1 << F2FS_IPU_FORCE
;
2673 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
2675 struct f2fs_sb_info
*sbi
;
2676 struct f2fs_super_block
*raw_super
;
2679 bool retry
= true, need_fsck
= false;
2680 char *options
= NULL
;
2681 int recovery
, i
, valid_super_block
;
2682 struct curseg_info
*seg_i
;
2687 valid_super_block
= -1;
2690 /* allocate memory for f2fs-specific super block info */
2691 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
2697 /* Load the checksum driver */
2698 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
2699 if (IS_ERR(sbi
->s_chksum_driver
)) {
2700 f2fs_msg(sb
, KERN_ERR
, "Cannot load crc32 driver.");
2701 err
= PTR_ERR(sbi
->s_chksum_driver
);
2702 sbi
->s_chksum_driver
= NULL
;
2706 /* set a block size */
2707 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
2708 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
2712 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
2717 sb
->s_fs_info
= sbi
;
2718 sbi
->raw_super
= raw_super
;
2720 F2FS_OPTION(sbi
).s_resuid
= make_kuid(&init_user_ns
, F2FS_DEF_RESUID
);
2721 F2FS_OPTION(sbi
).s_resgid
= make_kgid(&init_user_ns
, F2FS_DEF_RESGID
);
2723 /* precompute checksum seed for metadata */
2724 if (f2fs_sb_has_inode_chksum(sb
))
2725 sbi
->s_chksum_seed
= f2fs_chksum(sbi
, ~0, raw_super
->uuid
,
2726 sizeof(raw_super
->uuid
));
2729 * The BLKZONED feature indicates that the drive was formatted with
2730 * zone alignment optimization. This is optional for host-aware
2731 * devices, but mandatory for host-managed zoned block devices.
2733 #ifndef CONFIG_BLK_DEV_ZONED
2734 if (f2fs_sb_has_blkzoned(sb
)) {
2735 f2fs_msg(sb
, KERN_ERR
,
2736 "Zoned block device support is not enabled\n");
2741 default_options(sbi
);
2742 /* parse mount options */
2743 options
= kstrdup((const char *)data
, GFP_KERNEL
);
2744 if (data
&& !options
) {
2749 err
= parse_options(sb
, options
);
2753 sbi
->max_file_blocks
= max_file_blocks();
2754 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
2755 le32_to_cpu(raw_super
->log_blocksize
);
2756 sb
->s_max_links
= F2FS_LINK_MAX
;
2757 get_random_bytes(&sbi
->s_next_generation
, sizeof(u32
));
2760 sb
->dq_op
= &f2fs_quota_operations
;
2761 if (f2fs_sb_has_quota_ino(sb
))
2762 sb
->s_qcop
= &dquot_quotactl_sysfile_ops
;
2764 sb
->s_qcop
= &f2fs_quotactl_ops
;
2765 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
2767 if (f2fs_sb_has_quota_ino(sbi
->sb
)) {
2768 for (i
= 0; i
< MAXQUOTAS
; i
++) {
2769 if (f2fs_qf_ino(sbi
->sb
, i
))
2770 sbi
->nquota_files
++;
2775 sb
->s_op
= &f2fs_sops
;
2776 #ifdef CONFIG_F2FS_FS_ENCRYPTION
2777 sb
->s_cop
= &f2fs_cryptops
;
2779 sb
->s_xattr
= f2fs_xattr_handlers
;
2780 sb
->s_export_op
= &f2fs_export_ops
;
2781 sb
->s_magic
= F2FS_SUPER_MAGIC
;
2782 sb
->s_time_gran
= 1;
2783 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
2784 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
2785 memcpy(&sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
2786 sb
->s_iflags
|= SB_I_CGROUPWB
;
2788 /* init f2fs-specific super block info */
2789 sbi
->valid_super_block
= valid_super_block
;
2790 mutex_init(&sbi
->gc_mutex
);
2791 mutex_init(&sbi
->cp_mutex
);
2792 init_rwsem(&sbi
->node_write
);
2793 init_rwsem(&sbi
->node_change
);
2795 /* disallow all the data/node/meta page writes */
2796 set_sbi_flag(sbi
, SBI_POR_DOING
);
2797 spin_lock_init(&sbi
->stat_lock
);
2799 /* init iostat info */
2800 spin_lock_init(&sbi
->iostat_lock
);
2801 sbi
->iostat_enable
= false;
2803 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
2804 int n
= (i
== META
) ? 1: NR_TEMP_TYPE
;
2810 sizeof(struct f2fs_bio_info
)),
2812 if (!sbi
->write_io
[i
]) {
2817 for (j
= HOT
; j
< n
; j
++) {
2818 init_rwsem(&sbi
->write_io
[i
][j
].io_rwsem
);
2819 sbi
->write_io
[i
][j
].sbi
= sbi
;
2820 sbi
->write_io
[i
][j
].bio
= NULL
;
2821 spin_lock_init(&sbi
->write_io
[i
][j
].io_lock
);
2822 INIT_LIST_HEAD(&sbi
->write_io
[i
][j
].io_list
);
2826 init_rwsem(&sbi
->cp_rwsem
);
2827 init_waitqueue_head(&sbi
->cp_wait
);
2830 err
= init_percpu_info(sbi
);
2834 if (F2FS_IO_SIZE(sbi
) > 1) {
2835 sbi
->write_io_dummy
=
2836 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi
) - 1), 0);
2837 if (!sbi
->write_io_dummy
) {
2843 /* get an inode for meta space */
2844 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
2845 if (IS_ERR(sbi
->meta_inode
)) {
2846 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
2847 err
= PTR_ERR(sbi
->meta_inode
);
2851 err
= f2fs_get_valid_checkpoint(sbi
);
2853 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
2854 goto free_meta_inode
;
2857 /* Initialize device list */
2858 err
= f2fs_scan_devices(sbi
);
2860 f2fs_msg(sb
, KERN_ERR
, "Failed to find devices");
2864 sbi
->total_valid_node_count
=
2865 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
2866 percpu_counter_set(&sbi
->total_valid_inode_count
,
2867 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
2868 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
2869 sbi
->total_valid_block_count
=
2870 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
2871 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
2872 sbi
->reserved_blocks
= 0;
2873 sbi
->current_reserved_blocks
= 0;
2874 limit_reserve_root(sbi
);
2876 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
2877 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
2878 spin_lock_init(&sbi
->inode_lock
[i
]);
2881 f2fs_init_extent_cache_info(sbi
);
2883 f2fs_init_ino_entry_info(sbi
);
2885 /* setup f2fs internal modules */
2886 err
= f2fs_build_segment_manager(sbi
);
2888 f2fs_msg(sb
, KERN_ERR
,
2889 "Failed to initialize F2FS segment manager");
2892 err
= f2fs_build_node_manager(sbi
);
2894 f2fs_msg(sb
, KERN_ERR
,
2895 "Failed to initialize F2FS node manager");
2899 /* For write statistics */
2900 if (sb
->s_bdev
->bd_part
)
2901 sbi
->sectors_written_start
=
2902 (u64
)part_stat_read(sb
->s_bdev
->bd_part
, sectors
[1]);
2904 /* Read accumulated write IO statistics if exists */
2905 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
2906 if (__exist_node_summaries(sbi
))
2907 sbi
->kbytes_written
=
2908 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
2910 f2fs_build_gc_manager(sbi
);
2912 /* get an inode for node space */
2913 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
2914 if (IS_ERR(sbi
->node_inode
)) {
2915 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
2916 err
= PTR_ERR(sbi
->node_inode
);
2920 err
= f2fs_build_stats(sbi
);
2922 goto free_node_inode
;
2924 /* read root inode and dentry */
2925 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
2927 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
2928 err
= PTR_ERR(root
);
2931 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
2934 goto free_node_inode
;
2937 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
2940 goto free_root_inode
;
2943 err
= f2fs_register_sysfs(sbi
);
2945 goto free_root_inode
;
2949 * Turn on quotas which were not enabled for read-only mounts if
2950 * filesystem has quota feature, so that they are updated correctly.
2952 if (f2fs_sb_has_quota_ino(sb
) && !f2fs_readonly(sb
)) {
2953 err
= f2fs_enable_quotas(sb
);
2955 f2fs_msg(sb
, KERN_ERR
,
2956 "Cannot turn on quotas: error %d", err
);
2961 /* if there are nt orphan nodes free them */
2962 err
= f2fs_recover_orphan_inodes(sbi
);
2966 /* recover fsynced data */
2967 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
2969 * mount should be failed, when device has readonly mode, and
2970 * previous checkpoint was not done by clean system shutdown.
2972 if (bdev_read_only(sb
->s_bdev
) &&
2973 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
2979 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2984 err
= f2fs_recover_fsync_data(sbi
, false);
2987 f2fs_msg(sb
, KERN_ERR
,
2988 "Cannot recover all fsync data errno=%d", err
);
2992 err
= f2fs_recover_fsync_data(sbi
, true);
2994 if (!f2fs_readonly(sb
) && err
> 0) {
2996 f2fs_msg(sb
, KERN_ERR
,
2997 "Need to recover fsync data");
3002 /* f2fs_recover_fsync_data() cleared this already */
3003 clear_sbi_flag(sbi
, SBI_POR_DOING
);
3006 * If filesystem is not mounted as read-only then
3007 * do start the gc_thread.
3009 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
3010 /* After POR, we can run background GC thread.*/
3011 err
= f2fs_start_gc_thread(sbi
);
3017 /* recover broken superblock */
3019 err
= f2fs_commit_super(sbi
, true);
3020 f2fs_msg(sb
, KERN_INFO
,
3021 "Try to recover %dth superblock, ret: %d",
3022 sbi
->valid_super_block
? 1 : 2, err
);
3025 f2fs_join_shrinker(sbi
);
3027 f2fs_tuning_parameters(sbi
);
3029 f2fs_msg(sbi
->sb
, KERN_NOTICE
, "Mounted with checkpoint version = %llx",
3030 cur_cp_version(F2FS_CKPT(sbi
)));
3031 f2fs_update_time(sbi
, CP_TIME
);
3032 f2fs_update_time(sbi
, REQ_TIME
);
3037 if (f2fs_sb_has_quota_ino(sb
) && !f2fs_readonly(sb
))
3038 f2fs_quota_off_umount(sbi
->sb
);
3040 f2fs_sync_inode_meta(sbi
);
3042 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
3043 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
3044 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
3045 * falls into an infinite loop in f2fs_sync_meta_pages().
3047 truncate_inode_pages_final(META_MAPPING(sbi
));
3051 f2fs_unregister_sysfs(sbi
);
3056 f2fs_destroy_stats(sbi
);
3058 f2fs_release_ino_entry(sbi
, true);
3059 truncate_inode_pages_final(NODE_MAPPING(sbi
));
3060 iput(sbi
->node_inode
);
3062 f2fs_destroy_node_manager(sbi
);
3064 f2fs_destroy_segment_manager(sbi
);
3066 destroy_device_list(sbi
);
3069 make_bad_inode(sbi
->meta_inode
);
3070 iput(sbi
->meta_inode
);
3072 mempool_destroy(sbi
->write_io_dummy
);
3074 destroy_percpu_info(sbi
);
3076 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
3077 kfree(sbi
->write_io
[i
]);
3080 for (i
= 0; i
< MAXQUOTAS
; i
++)
3081 kfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
3087 if (sbi
->s_chksum_driver
)
3088 crypto_free_shash(sbi
->s_chksum_driver
);
3091 /* give only one another chance */
3094 shrink_dcache_sb(sb
);
3100 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
3101 const char *dev_name
, void *data
)
3103 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
3106 static void kill_f2fs_super(struct super_block
*sb
)
3109 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
3111 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
3112 f2fs_stop_gc_thread(sbi
);
3113 f2fs_stop_discard_thread(sbi
);
3115 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
3116 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
3117 struct cp_control cpc
= {
3118 .reason
= CP_UMOUNT
,
3120 f2fs_write_checkpoint(sbi
, &cpc
);
3123 kill_block_super(sb
);
3126 static struct file_system_type f2fs_fs_type
= {
3127 .owner
= THIS_MODULE
,
3129 .mount
= f2fs_mount
,
3130 .kill_sb
= kill_f2fs_super
,
3131 .fs_flags
= FS_REQUIRES_DEV
,
3133 MODULE_ALIAS_FS("f2fs");
3135 static int __init
init_inodecache(void)
3137 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
3138 sizeof(struct f2fs_inode_info
), 0,
3139 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
3140 if (!f2fs_inode_cachep
)
3145 static void destroy_inodecache(void)
3148 * Make sure all delayed rcu free inodes are flushed before we
3152 kmem_cache_destroy(f2fs_inode_cachep
);
3155 static int __init
init_f2fs_fs(void)
3159 if (PAGE_SIZE
!= F2FS_BLKSIZE
) {
3160 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
3161 PAGE_SIZE
, F2FS_BLKSIZE
);
3165 f2fs_build_trace_ios();
3167 err
= init_inodecache();
3170 err
= f2fs_create_node_manager_caches();
3172 goto free_inodecache
;
3173 err
= f2fs_create_segment_manager_caches();
3175 goto free_node_manager_caches
;
3176 err
= f2fs_create_checkpoint_caches();
3178 goto free_segment_manager_caches
;
3179 err
= f2fs_create_extent_cache();
3181 goto free_checkpoint_caches
;
3182 err
= f2fs_init_sysfs();
3184 goto free_extent_cache
;
3185 err
= register_shrinker(&f2fs_shrinker_info
);
3188 err
= register_filesystem(&f2fs_fs_type
);
3191 err
= f2fs_create_root_stats();
3193 goto free_filesystem
;
3194 err
= f2fs_init_post_read_processing();
3196 goto free_root_stats
;
3200 f2fs_destroy_root_stats();
3202 unregister_filesystem(&f2fs_fs_type
);
3204 unregister_shrinker(&f2fs_shrinker_info
);
3208 f2fs_destroy_extent_cache();
3209 free_checkpoint_caches
:
3210 f2fs_destroy_checkpoint_caches();
3211 free_segment_manager_caches
:
3212 f2fs_destroy_segment_manager_caches();
3213 free_node_manager_caches
:
3214 f2fs_destroy_node_manager_caches();
3216 destroy_inodecache();
3221 static void __exit
exit_f2fs_fs(void)
3223 f2fs_destroy_post_read_processing();
3224 f2fs_destroy_root_stats();
3225 unregister_filesystem(&f2fs_fs_type
);
3226 unregister_shrinker(&f2fs_shrinker_info
);
3228 f2fs_destroy_extent_cache();
3229 f2fs_destroy_checkpoint_caches();
3230 f2fs_destroy_segment_manager_caches();
3231 f2fs_destroy_node_manager_caches();
3232 destroy_inodecache();
3233 f2fs_destroy_trace_ios();
3236 module_init(init_f2fs_fs
)
3237 module_exit(exit_f2fs_fs
)
3239 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
3240 MODULE_DESCRIPTION("Flash Friendly File System");
3241 MODULE_LICENSE("GPL");