1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
15 * Roll forward recovery scenarios.
17 * [Term] F: fsync_mark, D: dentry_mark
19 * 1. inode(x) | CP | inode(x) | dnode(F)
20 * -> Update the latest inode(x).
22 * 2. inode(x) | CP | inode(F) | dnode(F)
25 * 3. inode(x) | CP | dnode(F) | inode(x)
26 * -> Recover to the latest dnode(F), and drop the last inode(x)
28 * 4. inode(x) | CP | dnode(F) | inode(F)
31 * 5. CP | inode(x) | dnode(F)
32 * -> The inode(DF) was missing. Should drop this dnode(F).
34 * 6. CP | inode(DF) | dnode(F)
37 * 7. CP | dnode(F) | inode(DF)
38 * -> If f2fs_iget fails, then goto next to find inode(DF).
40 * 8. CP | dnode(F) | inode(x)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
42 * But it will fail due to no inode(DF).
45 static struct kmem_cache
*fsync_entry_slab
;
47 bool f2fs_space_for_roll_forward(struct f2fs_sb_info
*sbi
)
49 s64 nalloc
= percpu_counter_sum_positive(&sbi
->alloc_valid_block_count
);
51 if (sbi
->last_valid_block_count
+ nalloc
> sbi
->user_block_count
)
56 static struct fsync_inode_entry
*get_fsync_inode(struct list_head
*head
,
59 struct fsync_inode_entry
*entry
;
61 list_for_each_entry(entry
, head
, list
)
62 if (entry
->inode
->i_ino
== ino
)
68 static struct fsync_inode_entry
*add_fsync_inode(struct f2fs_sb_info
*sbi
,
69 struct list_head
*head
, nid_t ino
, bool quota_inode
)
72 struct fsync_inode_entry
*entry
;
75 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
77 return ERR_CAST(inode
);
79 err
= dquot_initialize(inode
);
84 err
= dquot_alloc_inode(inode
);
89 entry
= f2fs_kmem_cache_alloc(fsync_entry_slab
, GFP_F2FS_ZERO
);
91 list_add_tail(&entry
->list
, head
);
99 static void del_fsync_inode(struct fsync_inode_entry
*entry
, int drop
)
102 /* inode should not be recovered, drop it */
103 f2fs_inode_synced(entry
->inode
);
106 list_del(&entry
->list
);
107 kmem_cache_free(fsync_entry_slab
, entry
);
110 static int recover_dentry(struct inode
*inode
, struct page
*ipage
,
111 struct list_head
*dir_list
)
113 struct f2fs_inode
*raw_inode
= F2FS_INODE(ipage
);
114 nid_t pino
= le32_to_cpu(raw_inode
->i_pino
);
115 struct f2fs_dir_entry
*de
;
116 struct fscrypt_name fname
;
118 struct inode
*dir
, *einode
;
119 struct fsync_inode_entry
*entry
;
123 entry
= get_fsync_inode(dir_list
, pino
);
125 entry
= add_fsync_inode(F2FS_I_SB(inode
), dir_list
,
128 dir
= ERR_CAST(entry
);
129 err
= PTR_ERR(entry
);
136 memset(&fname
, 0, sizeof(struct fscrypt_name
));
137 fname
.disk_name
.len
= le32_to_cpu(raw_inode
->i_namelen
);
138 fname
.disk_name
.name
= raw_inode
->i_name
;
140 if (unlikely(fname
.disk_name
.len
> F2FS_NAME_LEN
)) {
146 de
= __f2fs_find_entry(dir
, &fname
, &page
);
147 if (de
&& inode
->i_ino
== le32_to_cpu(de
->ino
))
151 einode
= f2fs_iget_retry(inode
->i_sb
, le32_to_cpu(de
->ino
));
152 if (IS_ERR(einode
)) {
154 err
= PTR_ERR(einode
);
160 err
= dquot_initialize(einode
);
166 err
= f2fs_acquire_orphan_inode(F2FS_I_SB(inode
));
171 f2fs_delete_entry(de
, page
, dir
, einode
);
174 } else if (IS_ERR(page
)) {
177 err
= f2fs_add_dentry(dir
, &fname
, inode
,
178 inode
->i_ino
, inode
->i_mode
);
185 f2fs_put_page(page
, 0);
187 if (file_enc_name(inode
))
188 name
= "<encrypted>";
190 name
= raw_inode
->i_name
;
191 f2fs_msg(inode
->i_sb
, KERN_NOTICE
,
192 "%s: ino = %x, name = %s, dir = %lx, err = %d",
193 __func__
, ino_of_node(ipage
), name
,
194 IS_ERR(dir
) ? 0 : dir
->i_ino
, err
);
198 static int recover_quota_data(struct inode
*inode
, struct page
*page
)
200 struct f2fs_inode
*raw
= F2FS_INODE(page
);
202 uid_t i_uid
= le32_to_cpu(raw
->i_uid
);
203 gid_t i_gid
= le32_to_cpu(raw
->i_gid
);
206 memset(&attr
, 0, sizeof(attr
));
208 attr
.ia_uid
= make_kuid(inode
->i_sb
->s_user_ns
, i_uid
);
209 attr
.ia_gid
= make_kgid(inode
->i_sb
->s_user_ns
, i_gid
);
211 if (!uid_eq(attr
.ia_uid
, inode
->i_uid
))
212 attr
.ia_valid
|= ATTR_UID
;
213 if (!gid_eq(attr
.ia_gid
, inode
->i_gid
))
214 attr
.ia_valid
|= ATTR_GID
;
219 err
= dquot_transfer(inode
, &attr
);
221 set_sbi_flag(F2FS_I_SB(inode
), SBI_QUOTA_NEED_REPAIR
);
225 static void recover_inline_flags(struct inode
*inode
, struct f2fs_inode
*ri
)
227 if (ri
->i_inline
& F2FS_PIN_FILE
)
228 set_inode_flag(inode
, FI_PIN_FILE
);
230 clear_inode_flag(inode
, FI_PIN_FILE
);
231 if (ri
->i_inline
& F2FS_DATA_EXIST
)
232 set_inode_flag(inode
, FI_DATA_EXIST
);
234 clear_inode_flag(inode
, FI_DATA_EXIST
);
237 static int recover_inode(struct inode
*inode
, struct page
*page
)
239 struct f2fs_inode
*raw
= F2FS_INODE(page
);
243 inode
->i_mode
= le16_to_cpu(raw
->i_mode
);
245 err
= recover_quota_data(inode
, page
);
249 i_uid_write(inode
, le32_to_cpu(raw
->i_uid
));
250 i_gid_write(inode
, le32_to_cpu(raw
->i_gid
));
252 if (raw
->i_inline
& F2FS_EXTRA_ATTR
) {
253 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode
)) &&
254 F2FS_FITS_IN_INODE(raw
, le16_to_cpu(raw
->i_extra_isize
),
259 i_projid
= (projid_t
)le32_to_cpu(raw
->i_projid
);
260 kprojid
= make_kprojid(&init_user_ns
, i_projid
);
262 if (!projid_eq(kprojid
, F2FS_I(inode
)->i_projid
)) {
263 err
= f2fs_transfer_project_quota(inode
,
267 F2FS_I(inode
)->i_projid
= kprojid
;
272 f2fs_i_size_write(inode
, le64_to_cpu(raw
->i_size
));
273 inode
->i_atime
.tv_sec
= le64_to_cpu(raw
->i_atime
);
274 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw
->i_ctime
);
275 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw
->i_mtime
);
276 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw
->i_atime_nsec
);
277 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw
->i_ctime_nsec
);
278 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw
->i_mtime_nsec
);
280 F2FS_I(inode
)->i_advise
= raw
->i_advise
;
281 F2FS_I(inode
)->i_flags
= le32_to_cpu(raw
->i_flags
);
282 f2fs_set_inode_flags(inode
);
283 F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
] =
284 le16_to_cpu(raw
->i_gc_failures
);
286 recover_inline_flags(inode
, raw
);
288 f2fs_mark_inode_dirty_sync(inode
, true);
290 if (file_enc_name(inode
))
291 name
= "<encrypted>";
293 name
= F2FS_INODE(page
)->i_name
;
295 f2fs_msg(inode
->i_sb
, KERN_NOTICE
,
296 "recover_inode: ino = %x, name = %s, inline = %x",
297 ino_of_node(page
), name
, raw
->i_inline
);
301 static int find_fsync_dnodes(struct f2fs_sb_info
*sbi
, struct list_head
*head
,
304 struct curseg_info
*curseg
;
305 struct page
*page
= NULL
;
307 unsigned int loop_cnt
= 0;
308 unsigned int free_blocks
= MAIN_SEGS(sbi
) * sbi
->blocks_per_seg
-
309 valid_user_blocks(sbi
);
312 /* get node pages in the current segment */
313 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
314 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
317 struct fsync_inode_entry
*entry
;
319 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
322 page
= f2fs_get_tmp_page(sbi
, blkaddr
);
328 if (!is_recoverable_dnode(page
)) {
329 f2fs_put_page(page
, 1);
333 if (!is_fsync_dnode(page
))
336 entry
= get_fsync_inode(head
, ino_of_node(page
));
338 bool quota_inode
= false;
341 IS_INODE(page
) && is_dent_dnode(page
)) {
342 err
= f2fs_recover_inode_page(sbi
, page
);
344 f2fs_put_page(page
, 1);
351 * CP | dnode(F) | inode(DF)
352 * For this case, we should not give up now.
354 entry
= add_fsync_inode(sbi
, head
, ino_of_node(page
),
357 err
= PTR_ERR(entry
);
358 if (err
== -ENOENT
) {
362 f2fs_put_page(page
, 1);
366 entry
->blkaddr
= blkaddr
;
368 if (IS_INODE(page
) && is_dent_dnode(page
))
369 entry
->last_dentry
= blkaddr
;
371 /* sanity check in order to detect looped node chain */
372 if (++loop_cnt
>= free_blocks
||
373 blkaddr
== next_blkaddr_of_node(page
)) {
374 f2fs_msg(sbi
->sb
, KERN_NOTICE
,
375 "%s: detect looped node chain, "
376 "blkaddr:%u, next:%u",
377 __func__
, blkaddr
, next_blkaddr_of_node(page
));
378 f2fs_put_page(page
, 1);
383 /* check next segment */
384 blkaddr
= next_blkaddr_of_node(page
);
385 f2fs_put_page(page
, 1);
387 f2fs_ra_meta_pages_cond(sbi
, blkaddr
);
392 static void destroy_fsync_dnodes(struct list_head
*head
, int drop
)
394 struct fsync_inode_entry
*entry
, *tmp
;
396 list_for_each_entry_safe(entry
, tmp
, head
, list
)
397 del_fsync_inode(entry
, drop
);
400 static int check_index_in_prev_nodes(struct f2fs_sb_info
*sbi
,
401 block_t blkaddr
, struct dnode_of_data
*dn
)
403 struct seg_entry
*sentry
;
404 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
405 unsigned short blkoff
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
406 struct f2fs_summary_block
*sum_node
;
407 struct f2fs_summary sum
;
408 struct page
*sum_page
, *node_page
;
409 struct dnode_of_data tdn
= *dn
;
416 sentry
= get_seg_entry(sbi
, segno
);
417 if (!f2fs_test_bit(blkoff
, sentry
->cur_valid_map
))
420 /* Get the previous summary */
421 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
422 struct curseg_info
*curseg
= CURSEG_I(sbi
, i
);
423 if (curseg
->segno
== segno
) {
424 sum
= curseg
->sum_blk
->entries
[blkoff
];
429 sum_page
= f2fs_get_sum_page(sbi
, segno
);
430 if (IS_ERR(sum_page
))
431 return PTR_ERR(sum_page
);
432 sum_node
= (struct f2fs_summary_block
*)page_address(sum_page
);
433 sum
= sum_node
->entries
[blkoff
];
434 f2fs_put_page(sum_page
, 1);
436 /* Use the locked dnode page and inode */
437 nid
= le32_to_cpu(sum
.nid
);
438 if (dn
->inode
->i_ino
== nid
) {
440 if (!dn
->inode_page_locked
)
441 lock_page(dn
->inode_page
);
442 tdn
.node_page
= dn
->inode_page
;
443 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
445 } else if (dn
->nid
== nid
) {
446 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
450 /* Get the node page */
451 node_page
= f2fs_get_node_page(sbi
, nid
);
452 if (IS_ERR(node_page
))
453 return PTR_ERR(node_page
);
455 offset
= ofs_of_node(node_page
);
456 ino
= ino_of_node(node_page
);
457 f2fs_put_page(node_page
, 1);
459 if (ino
!= dn
->inode
->i_ino
) {
462 /* Deallocate previous index in the node page */
463 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
465 return PTR_ERR(inode
);
467 ret
= dquot_initialize(inode
);
476 bidx
= f2fs_start_bidx_of_node(offset
, inode
) +
477 le16_to_cpu(sum
.ofs_in_node
);
480 * if inode page is locked, unlock temporarily, but its reference
483 if (ino
== dn
->inode
->i_ino
&& dn
->inode_page_locked
)
484 unlock_page(dn
->inode_page
);
486 set_new_dnode(&tdn
, inode
, NULL
, NULL
, 0);
487 if (f2fs_get_dnode_of_data(&tdn
, bidx
, LOOKUP_NODE
))
490 if (tdn
.data_blkaddr
== blkaddr
)
491 f2fs_truncate_data_blocks_range(&tdn
, 1);
493 f2fs_put_dnode(&tdn
);
495 if (ino
!= dn
->inode
->i_ino
)
497 else if (dn
->inode_page_locked
)
498 lock_page(dn
->inode_page
);
502 if (datablock_addr(tdn
.inode
, tdn
.node_page
,
503 tdn
.ofs_in_node
) == blkaddr
)
504 f2fs_truncate_data_blocks_range(&tdn
, 1);
505 if (dn
->inode
->i_ino
== nid
&& !dn
->inode_page_locked
)
506 unlock_page(dn
->inode_page
);
510 static int do_recover_data(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
513 struct dnode_of_data dn
;
515 unsigned int start
, end
;
516 int err
= 0, recovered
= 0;
518 /* step 1: recover xattr */
519 if (IS_INODE(page
)) {
520 f2fs_recover_inline_xattr(inode
, page
);
521 } else if (f2fs_has_xattr_block(ofs_of_node(page
))) {
522 err
= f2fs_recover_xattr_data(inode
, page
);
528 /* step 2: recover inline data */
529 if (f2fs_recover_inline_data(inode
, page
))
532 /* step 3: recover data indices */
533 start
= f2fs_start_bidx_of_node(ofs_of_node(page
), inode
);
534 end
= start
+ ADDRS_PER_PAGE(page
, inode
);
536 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
538 err
= f2fs_get_dnode_of_data(&dn
, start
, ALLOC_NODE
);
540 if (err
== -ENOMEM
) {
541 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
547 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
, true, true);
549 err
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
553 f2fs_bug_on(sbi
, ni
.ino
!= ino_of_node(page
));
555 if (ofs_of_node(dn
.node_page
) != ofs_of_node(page
)) {
556 f2fs_msg(sbi
->sb
, KERN_WARNING
,
557 "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
558 inode
->i_ino
, ofs_of_node(dn
.node_page
),
564 for (; start
< end
; start
++, dn
.ofs_in_node
++) {
567 src
= datablock_addr(dn
.inode
, dn
.node_page
, dn
.ofs_in_node
);
568 dest
= datablock_addr(dn
.inode
, page
, dn
.ofs_in_node
);
570 if (__is_valid_data_blkaddr(src
) &&
571 !f2fs_is_valid_blkaddr(sbi
, src
, META_POR
)) {
576 if (__is_valid_data_blkaddr(dest
) &&
577 !f2fs_is_valid_blkaddr(sbi
, dest
, META_POR
)) {
582 /* skip recovering if dest is the same as src */
586 /* dest is invalid, just invalidate src block */
587 if (dest
== NULL_ADDR
) {
588 f2fs_truncate_data_blocks_range(&dn
, 1);
592 if (!file_keep_isize(inode
) &&
593 (i_size_read(inode
) <= ((loff_t
)start
<< PAGE_SHIFT
)))
594 f2fs_i_size_write(inode
,
595 (loff_t
)(start
+ 1) << PAGE_SHIFT
);
598 * dest is reserved block, invalidate src block
599 * and then reserve one new block in dnode page.
601 if (dest
== NEW_ADDR
) {
602 f2fs_truncate_data_blocks_range(&dn
, 1);
603 f2fs_reserve_new_block(&dn
);
607 /* dest is valid block, try to recover from src to dest */
608 if (f2fs_is_valid_blkaddr(sbi
, dest
, META_POR
)) {
610 if (src
== NULL_ADDR
) {
611 err
= f2fs_reserve_new_block(&dn
);
613 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION
))
614 err
= f2fs_reserve_new_block(&dn
);
615 /* We should not get -ENOSPC */
616 f2fs_bug_on(sbi
, err
);
621 /* Check the previous node page having this index */
622 err
= check_index_in_prev_nodes(sbi
, dest
, &dn
);
624 if (err
== -ENOMEM
) {
625 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
631 /* write dummy data page */
632 f2fs_replace_block(sbi
, &dn
, src
, dest
,
633 ni
.version
, false, false);
638 copy_node_footer(dn
.node_page
, page
);
639 fill_node_footer(dn
.node_page
, dn
.nid
, ni
.ino
,
640 ofs_of_node(page
), false);
641 set_page_dirty(dn
.node_page
);
645 f2fs_msg(sbi
->sb
, KERN_NOTICE
,
646 "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
648 file_keep_isize(inode
) ? "keep" : "recover",
653 static int recover_data(struct f2fs_sb_info
*sbi
, struct list_head
*inode_list
,
654 struct list_head
*tmp_inode_list
, struct list_head
*dir_list
)
656 struct curseg_info
*curseg
;
657 struct page
*page
= NULL
;
661 /* get node pages in the current segment */
662 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
663 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
666 struct fsync_inode_entry
*entry
;
668 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
671 f2fs_ra_meta_pages_cond(sbi
, blkaddr
);
673 page
= f2fs_get_tmp_page(sbi
, blkaddr
);
679 if (!is_recoverable_dnode(page
)) {
680 f2fs_put_page(page
, 1);
684 entry
= get_fsync_inode(inode_list
, ino_of_node(page
));
688 * inode(x) | CP | inode(x) | dnode(F)
689 * In this case, we can lose the latest inode(x).
690 * So, call recover_inode for the inode update.
692 if (IS_INODE(page
)) {
693 err
= recover_inode(entry
->inode
, page
);
695 f2fs_put_page(page
, 1);
699 if (entry
->last_dentry
== blkaddr
) {
700 err
= recover_dentry(entry
->inode
, page
, dir_list
);
702 f2fs_put_page(page
, 1);
706 err
= do_recover_data(sbi
, entry
->inode
, page
);
708 f2fs_put_page(page
, 1);
712 if (entry
->blkaddr
== blkaddr
)
713 list_move_tail(&entry
->list
, tmp_inode_list
);
715 /* check next segment */
716 blkaddr
= next_blkaddr_of_node(page
);
717 f2fs_put_page(page
, 1);
720 f2fs_allocate_new_segments(sbi
);
724 int f2fs_recover_fsync_data(struct f2fs_sb_info
*sbi
, bool check_only
)
726 struct list_head inode_list
, tmp_inode_list
;
727 struct list_head dir_list
;
730 unsigned long s_flags
= sbi
->sb
->s_flags
;
731 bool need_writecp
= false;
736 if (s_flags
& SB_RDONLY
) {
737 f2fs_msg(sbi
->sb
, KERN_INFO
,
738 "recover fsync data on readonly fs");
739 sbi
->sb
->s_flags
&= ~SB_RDONLY
;
743 /* Needed for iput() to work correctly and not trash data */
744 sbi
->sb
->s_flags
|= SB_ACTIVE
;
745 /* Turn on quotas so that they are updated correctly */
746 quota_enabled
= f2fs_enable_quota_files(sbi
, s_flags
& SB_RDONLY
);
749 fsync_entry_slab
= f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
750 sizeof(struct fsync_inode_entry
));
751 if (!fsync_entry_slab
) {
756 INIT_LIST_HEAD(&inode_list
);
757 INIT_LIST_HEAD(&tmp_inode_list
);
758 INIT_LIST_HEAD(&dir_list
);
760 /* prevent checkpoint */
761 mutex_lock(&sbi
->cp_mutex
);
763 /* step #1: find fsynced inode numbers */
764 err
= find_fsync_dnodes(sbi
, &inode_list
, check_only
);
765 if (err
|| list_empty(&inode_list
))
775 /* step #2: recover data */
776 err
= recover_data(sbi
, &inode_list
, &tmp_inode_list
, &dir_list
);
778 f2fs_bug_on(sbi
, !list_empty(&inode_list
));
780 /* restore s_flags to let iput() trash data */
781 sbi
->sb
->s_flags
= s_flags
;
784 destroy_fsync_dnodes(&inode_list
, err
);
785 destroy_fsync_dnodes(&tmp_inode_list
, err
);
787 /* truncate meta pages to be used by the recovery */
788 truncate_inode_pages_range(META_MAPPING(sbi
),
789 (loff_t
)MAIN_BLKADDR(sbi
) << PAGE_SHIFT
, -1);
792 truncate_inode_pages_final(NODE_MAPPING(sbi
));
793 truncate_inode_pages_final(META_MAPPING(sbi
));
795 clear_sbi_flag(sbi
, SBI_POR_DOING
);
797 mutex_unlock(&sbi
->cp_mutex
);
799 /* let's drop all the directory inodes for clean checkpoint */
800 destroy_fsync_dnodes(&dir_list
, err
);
803 set_sbi_flag(sbi
, SBI_IS_RECOVERED
);
806 struct cp_control cpc
= {
807 .reason
= CP_RECOVERY
,
809 err
= f2fs_write_checkpoint(sbi
, &cpc
);
813 kmem_cache_destroy(fsync_entry_slab
);
816 /* Turn quotas off */
818 f2fs_quota_off_umount(sbi
->sb
);
820 sbi
->sb
->s_flags
= s_flags
; /* Restore SB_RDONLY status */
822 return ret
? ret
: err
;