4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache
*fsync_entry_slab
;
50 bool space_for_roll_forward(struct f2fs_sb_info
*sbi
)
52 s64 nalloc
= percpu_counter_sum_positive(&sbi
->alloc_valid_block_count
);
54 if (sbi
->last_valid_block_count
+ nalloc
> sbi
->user_block_count
)
59 static struct fsync_inode_entry
*get_fsync_inode(struct list_head
*head
,
62 struct fsync_inode_entry
*entry
;
64 list_for_each_entry(entry
, head
, list
)
65 if (entry
->inode
->i_ino
== ino
)
71 static struct fsync_inode_entry
*add_fsync_inode(struct f2fs_sb_info
*sbi
,
72 struct list_head
*head
, nid_t ino
, bool quota_inode
)
75 struct fsync_inode_entry
*entry
;
78 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
80 return ERR_CAST(inode
);
82 err
= dquot_initialize(inode
);
87 err
= dquot_alloc_inode(inode
);
92 entry
= f2fs_kmem_cache_alloc(fsync_entry_slab
, GFP_F2FS_ZERO
);
94 list_add_tail(&entry
->list
, head
);
102 static void del_fsync_inode(struct fsync_inode_entry
*entry
)
105 list_del(&entry
->list
);
106 kmem_cache_free(fsync_entry_slab
, entry
);
109 static int recover_dentry(struct inode
*inode
, struct page
*ipage
,
110 struct list_head
*dir_list
)
112 struct f2fs_inode
*raw_inode
= F2FS_INODE(ipage
);
113 nid_t pino
= le32_to_cpu(raw_inode
->i_pino
);
114 struct f2fs_dir_entry
*de
;
115 struct fscrypt_name fname
;
117 struct inode
*dir
, *einode
;
118 struct fsync_inode_entry
*entry
;
122 entry
= get_fsync_inode(dir_list
, pino
);
124 entry
= add_fsync_inode(F2FS_I_SB(inode
), dir_list
,
127 dir
= ERR_CAST(entry
);
128 err
= PTR_ERR(entry
);
135 memset(&fname
, 0, sizeof(struct fscrypt_name
));
136 fname
.disk_name
.len
= le32_to_cpu(raw_inode
->i_namelen
);
137 fname
.disk_name
.name
= raw_inode
->i_name
;
139 if (unlikely(fname
.disk_name
.len
> F2FS_NAME_LEN
)) {
145 de
= __f2fs_find_entry(dir
, &fname
, &page
);
146 if (de
&& inode
->i_ino
== le32_to_cpu(de
->ino
))
150 einode
= f2fs_iget_retry(inode
->i_sb
, le32_to_cpu(de
->ino
));
151 if (IS_ERR(einode
)) {
153 err
= PTR_ERR(einode
);
159 err
= dquot_initialize(einode
);
165 err
= acquire_orphan_inode(F2FS_I_SB(inode
));
170 f2fs_delete_entry(de
, page
, dir
, einode
);
173 } else if (IS_ERR(page
)) {
176 err
= __f2fs_do_add_link(dir
, &fname
, inode
,
177 inode
->i_ino
, inode
->i_mode
);
184 f2fs_dentry_kunmap(dir
, page
);
185 f2fs_put_page(page
, 0);
187 if (file_enc_name(inode
))
188 name
= "<encrypted>";
190 name
= raw_inode
->i_name
;
191 f2fs_msg(inode
->i_sb
, KERN_NOTICE
,
192 "%s: ino = %x, name = %s, dir = %lx, err = %d",
193 __func__
, ino_of_node(ipage
), name
,
194 IS_ERR(dir
) ? 0 : dir
->i_ino
, err
);
198 static void recover_inline_flags(struct inode
*inode
, struct f2fs_inode
*ri
)
200 if (ri
->i_inline
& F2FS_PIN_FILE
)
201 set_inode_flag(inode
, FI_PIN_FILE
);
203 clear_inode_flag(inode
, FI_PIN_FILE
);
204 if (ri
->i_inline
& F2FS_DATA_EXIST
)
205 set_inode_flag(inode
, FI_DATA_EXIST
);
207 clear_inode_flag(inode
, FI_DATA_EXIST
);
208 if (!(ri
->i_inline
& F2FS_INLINE_DOTS
))
209 clear_inode_flag(inode
, FI_INLINE_DOTS
);
212 static void recover_inode(struct inode
*inode
, struct page
*page
)
214 struct f2fs_inode
*raw
= F2FS_INODE(page
);
217 inode
->i_mode
= le16_to_cpu(raw
->i_mode
);
218 f2fs_i_size_write(inode
, le64_to_cpu(raw
->i_size
));
219 inode
->i_atime
.tv_sec
= le64_to_cpu(raw
->i_atime
);
220 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw
->i_ctime
);
221 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw
->i_mtime
);
222 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw
->i_atime_nsec
);
223 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw
->i_ctime_nsec
);
224 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw
->i_mtime_nsec
);
226 F2FS_I(inode
)->i_advise
= raw
->i_advise
;
228 recover_inline_flags(inode
, raw
);
230 if (file_enc_name(inode
))
231 name
= "<encrypted>";
233 name
= F2FS_INODE(page
)->i_name
;
235 f2fs_msg(inode
->i_sb
, KERN_NOTICE
,
236 "recover_inode: ino = %x, name = %s, inline = %x",
237 ino_of_node(page
), name
, raw
->i_inline
);
240 static int find_fsync_dnodes(struct f2fs_sb_info
*sbi
, struct list_head
*head
,
243 struct curseg_info
*curseg
;
244 struct page
*page
= NULL
;
248 /* get node pages in the current segment */
249 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
250 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
253 struct fsync_inode_entry
*entry
;
255 if (!is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
258 page
= get_tmp_page(sbi
, blkaddr
);
260 if (!is_recoverable_dnode(page
))
263 if (!is_fsync_dnode(page
))
266 entry
= get_fsync_inode(head
, ino_of_node(page
));
268 bool quota_inode
= false;
271 IS_INODE(page
) && is_dent_dnode(page
)) {
272 err
= recover_inode_page(sbi
, page
);
279 * CP | dnode(F) | inode(DF)
280 * For this case, we should not give up now.
282 entry
= add_fsync_inode(sbi
, head
, ino_of_node(page
),
285 err
= PTR_ERR(entry
);
286 if (err
== -ENOENT
) {
293 entry
->blkaddr
= blkaddr
;
295 if (IS_INODE(page
) && is_dent_dnode(page
))
296 entry
->last_dentry
= blkaddr
;
298 /* check next segment */
299 blkaddr
= next_blkaddr_of_node(page
);
300 f2fs_put_page(page
, 1);
302 ra_meta_pages_cond(sbi
, blkaddr
);
304 f2fs_put_page(page
, 1);
308 static void destroy_fsync_dnodes(struct list_head
*head
)
310 struct fsync_inode_entry
*entry
, *tmp
;
312 list_for_each_entry_safe(entry
, tmp
, head
, list
)
313 del_fsync_inode(entry
);
316 static int check_index_in_prev_nodes(struct f2fs_sb_info
*sbi
,
317 block_t blkaddr
, struct dnode_of_data
*dn
)
319 struct seg_entry
*sentry
;
320 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
321 unsigned short blkoff
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
322 struct f2fs_summary_block
*sum_node
;
323 struct f2fs_summary sum
;
324 struct page
*sum_page
, *node_page
;
325 struct dnode_of_data tdn
= *dn
;
332 sentry
= get_seg_entry(sbi
, segno
);
333 if (!f2fs_test_bit(blkoff
, sentry
->cur_valid_map
))
336 /* Get the previous summary */
337 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
338 struct curseg_info
*curseg
= CURSEG_I(sbi
, i
);
339 if (curseg
->segno
== segno
) {
340 sum
= curseg
->sum_blk
->entries
[blkoff
];
345 sum_page
= get_sum_page(sbi
, segno
);
346 sum_node
= (struct f2fs_summary_block
*)page_address(sum_page
);
347 sum
= sum_node
->entries
[blkoff
];
348 f2fs_put_page(sum_page
, 1);
350 /* Use the locked dnode page and inode */
351 nid
= le32_to_cpu(sum
.nid
);
352 if (dn
->inode
->i_ino
== nid
) {
354 if (!dn
->inode_page_locked
)
355 lock_page(dn
->inode_page
);
356 tdn
.node_page
= dn
->inode_page
;
357 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
359 } else if (dn
->nid
== nid
) {
360 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
364 /* Get the node page */
365 node_page
= get_node_page(sbi
, nid
);
366 if (IS_ERR(node_page
))
367 return PTR_ERR(node_page
);
369 offset
= ofs_of_node(node_page
);
370 ino
= ino_of_node(node_page
);
371 f2fs_put_page(node_page
, 1);
373 if (ino
!= dn
->inode
->i_ino
) {
376 /* Deallocate previous index in the node page */
377 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
379 return PTR_ERR(inode
);
381 ret
= dquot_initialize(inode
);
390 bidx
= start_bidx_of_node(offset
, inode
) + le16_to_cpu(sum
.ofs_in_node
);
393 * if inode page is locked, unlock temporarily, but its reference
396 if (ino
== dn
->inode
->i_ino
&& dn
->inode_page_locked
)
397 unlock_page(dn
->inode_page
);
399 set_new_dnode(&tdn
, inode
, NULL
, NULL
, 0);
400 if (get_dnode_of_data(&tdn
, bidx
, LOOKUP_NODE
))
403 if (tdn
.data_blkaddr
== blkaddr
)
404 truncate_data_blocks_range(&tdn
, 1);
406 f2fs_put_dnode(&tdn
);
408 if (ino
!= dn
->inode
->i_ino
)
410 else if (dn
->inode_page_locked
)
411 lock_page(dn
->inode_page
);
415 if (datablock_addr(tdn
.inode
, tdn
.node_page
,
416 tdn
.ofs_in_node
) == blkaddr
)
417 truncate_data_blocks_range(&tdn
, 1);
418 if (dn
->inode
->i_ino
== nid
&& !dn
->inode_page_locked
)
419 unlock_page(dn
->inode_page
);
423 static int do_recover_data(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
426 struct dnode_of_data dn
;
428 unsigned int start
, end
;
429 int err
= 0, recovered
= 0;
431 /* step 1: recover xattr */
432 if (IS_INODE(page
)) {
433 recover_inline_xattr(inode
, page
);
434 } else if (f2fs_has_xattr_block(ofs_of_node(page
))) {
435 err
= recover_xattr_data(inode
, page
);
441 /* step 2: recover inline data */
442 if (recover_inline_data(inode
, page
))
445 /* step 3: recover data indices */
446 start
= start_bidx_of_node(ofs_of_node(page
), inode
);
447 end
= start
+ ADDRS_PER_PAGE(page
, inode
);
449 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
451 err
= get_dnode_of_data(&dn
, start
, ALLOC_NODE
);
453 if (err
== -ENOMEM
) {
454 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
460 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
, true);
462 get_node_info(sbi
, dn
.nid
, &ni
);
463 f2fs_bug_on(sbi
, ni
.ino
!= ino_of_node(page
));
464 f2fs_bug_on(sbi
, ofs_of_node(dn
.node_page
) != ofs_of_node(page
));
466 for (; start
< end
; start
++, dn
.ofs_in_node
++) {
469 src
= datablock_addr(dn
.inode
, dn
.node_page
, dn
.ofs_in_node
);
470 dest
= datablock_addr(dn
.inode
, page
, dn
.ofs_in_node
);
472 /* skip recovering if dest is the same as src */
476 /* dest is invalid, just invalidate src block */
477 if (dest
== NULL_ADDR
) {
478 truncate_data_blocks_range(&dn
, 1);
482 if (!file_keep_isize(inode
) &&
483 (i_size_read(inode
) <= ((loff_t
)start
<< PAGE_SHIFT
)))
484 f2fs_i_size_write(inode
,
485 (loff_t
)(start
+ 1) << PAGE_SHIFT
);
488 * dest is reserved block, invalidate src block
489 * and then reserve one new block in dnode page.
491 if (dest
== NEW_ADDR
) {
492 truncate_data_blocks_range(&dn
, 1);
493 reserve_new_block(&dn
);
497 /* dest is valid block, try to recover from src to dest */
498 if (is_valid_blkaddr(sbi
, dest
, META_POR
)) {
500 if (src
== NULL_ADDR
) {
501 err
= reserve_new_block(&dn
);
502 #ifdef CONFIG_F2FS_FAULT_INJECTION
504 err
= reserve_new_block(&dn
);
506 /* We should not get -ENOSPC */
507 f2fs_bug_on(sbi
, err
);
512 /* Check the previous node page having this index */
513 err
= check_index_in_prev_nodes(sbi
, dest
, &dn
);
515 if (err
== -ENOMEM
) {
516 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
522 /* write dummy data page */
523 f2fs_replace_block(sbi
, &dn
, src
, dest
,
524 ni
.version
, false, false);
529 copy_node_footer(dn
.node_page
, page
);
530 fill_node_footer(dn
.node_page
, dn
.nid
, ni
.ino
,
531 ofs_of_node(page
), false);
532 set_page_dirty(dn
.node_page
);
536 f2fs_msg(sbi
->sb
, KERN_NOTICE
,
537 "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
539 file_keep_isize(inode
) ? "keep" : "recover",
544 static int recover_data(struct f2fs_sb_info
*sbi
, struct list_head
*inode_list
,
545 struct list_head
*dir_list
)
547 struct curseg_info
*curseg
;
548 struct page
*page
= NULL
;
552 /* get node pages in the current segment */
553 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
554 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
557 struct fsync_inode_entry
*entry
;
559 if (!is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
562 ra_meta_pages_cond(sbi
, blkaddr
);
564 page
= get_tmp_page(sbi
, blkaddr
);
566 if (!is_recoverable_dnode(page
)) {
567 f2fs_put_page(page
, 1);
571 entry
= get_fsync_inode(inode_list
, ino_of_node(page
));
575 * inode(x) | CP | inode(x) | dnode(F)
576 * In this case, we can lose the latest inode(x).
577 * So, call recover_inode for the inode update.
580 recover_inode(entry
->inode
, page
);
581 if (entry
->last_dentry
== blkaddr
) {
582 err
= recover_dentry(entry
->inode
, page
, dir_list
);
584 f2fs_put_page(page
, 1);
588 err
= do_recover_data(sbi
, entry
->inode
, page
);
590 f2fs_put_page(page
, 1);
594 if (entry
->blkaddr
== blkaddr
)
595 del_fsync_inode(entry
);
597 /* check next segment */
598 blkaddr
= next_blkaddr_of_node(page
);
599 f2fs_put_page(page
, 1);
602 allocate_new_segments(sbi
);
606 int recover_fsync_data(struct f2fs_sb_info
*sbi
, bool check_only
)
608 struct list_head inode_list
;
609 struct list_head dir_list
;
612 unsigned long s_flags
= sbi
->sb
->s_flags
;
613 bool need_writecp
= false;
618 if (s_flags
& SB_RDONLY
) {
619 f2fs_msg(sbi
->sb
, KERN_INFO
, "orphan cleanup on readonly fs");
620 sbi
->sb
->s_flags
&= ~SB_RDONLY
;
624 /* Needed for iput() to work correctly and not trash data */
625 sbi
->sb
->s_flags
|= SB_ACTIVE
;
626 /* Turn on quotas so that they are updated correctly */
627 quota_enabled
= f2fs_enable_quota_files(sbi
, s_flags
& SB_RDONLY
);
630 fsync_entry_slab
= f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
631 sizeof(struct fsync_inode_entry
));
632 if (!fsync_entry_slab
) {
637 INIT_LIST_HEAD(&inode_list
);
638 INIT_LIST_HEAD(&dir_list
);
640 /* prevent checkpoint */
641 mutex_lock(&sbi
->cp_mutex
);
643 /* step #1: find fsynced inode numbers */
644 err
= find_fsync_dnodes(sbi
, &inode_list
, check_only
);
645 if (err
|| list_empty(&inode_list
))
655 /* step #2: recover data */
656 err
= recover_data(sbi
, &inode_list
, &dir_list
);
658 f2fs_bug_on(sbi
, !list_empty(&inode_list
));
660 destroy_fsync_dnodes(&inode_list
);
662 /* truncate meta pages to be used by the recovery */
663 truncate_inode_pages_range(META_MAPPING(sbi
),
664 (loff_t
)MAIN_BLKADDR(sbi
) << PAGE_SHIFT
, -1);
667 truncate_inode_pages_final(NODE_MAPPING(sbi
));
668 truncate_inode_pages_final(META_MAPPING(sbi
));
671 clear_sbi_flag(sbi
, SBI_POR_DOING
);
672 mutex_unlock(&sbi
->cp_mutex
);
674 /* let's drop all the directory inodes for clean checkpoint */
675 destroy_fsync_dnodes(&dir_list
);
677 if (!err
&& need_writecp
) {
678 struct cp_control cpc
= {
679 .reason
= CP_RECOVERY
,
681 err
= write_checkpoint(sbi
, &cpc
);
684 kmem_cache_destroy(fsync_entry_slab
);
687 /* Turn quotas off */
689 f2fs_quota_off_umount(sbi
->sb
);
691 sbi
->sb
->s_flags
= s_flags
; /* Restore SB_RDONLY status */
693 return ret
? ret
: err
;