4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache
*fsync_entry_slab
;
50 bool space_for_roll_forward(struct f2fs_sb_info
*sbi
)
52 s64 nalloc
= percpu_counter_sum_positive(&sbi
->alloc_valid_block_count
);
54 if (sbi
->last_valid_block_count
+ nalloc
> sbi
->user_block_count
)
59 static struct fsync_inode_entry
*get_fsync_inode(struct list_head
*head
,
62 struct fsync_inode_entry
*entry
;
64 list_for_each_entry(entry
, head
, list
)
65 if (entry
->inode
->i_ino
== ino
)
71 static struct fsync_inode_entry
*add_fsync_inode(struct f2fs_sb_info
*sbi
,
72 struct list_head
*head
, nid_t ino
, bool quota_inode
)
75 struct fsync_inode_entry
*entry
;
78 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
80 return ERR_CAST(inode
);
82 err
= dquot_initialize(inode
);
87 err
= dquot_alloc_inode(inode
);
92 entry
= f2fs_kmem_cache_alloc(fsync_entry_slab
, GFP_F2FS_ZERO
);
94 list_add_tail(&entry
->list
, head
);
102 static void del_fsync_inode(struct fsync_inode_entry
*entry
)
105 list_del(&entry
->list
);
106 kmem_cache_free(fsync_entry_slab
, entry
);
109 static int recover_dentry(struct inode
*inode
, struct page
*ipage
,
110 struct list_head
*dir_list
)
112 struct f2fs_inode
*raw_inode
= F2FS_INODE(ipage
);
113 nid_t pino
= le32_to_cpu(raw_inode
->i_pino
);
114 struct f2fs_dir_entry
*de
;
115 struct fscrypt_name fname
;
117 struct inode
*dir
, *einode
;
118 struct fsync_inode_entry
*entry
;
122 entry
= get_fsync_inode(dir_list
, pino
);
124 entry
= add_fsync_inode(F2FS_I_SB(inode
), dir_list
,
127 dir
= ERR_CAST(entry
);
128 err
= PTR_ERR(entry
);
135 memset(&fname
, 0, sizeof(struct fscrypt_name
));
136 fname
.disk_name
.len
= le32_to_cpu(raw_inode
->i_namelen
);
137 fname
.disk_name
.name
= raw_inode
->i_name
;
139 if (unlikely(fname
.disk_name
.len
> F2FS_NAME_LEN
)) {
145 de
= __f2fs_find_entry(dir
, &fname
, &page
);
146 if (de
&& inode
->i_ino
== le32_to_cpu(de
->ino
))
150 einode
= f2fs_iget_retry(inode
->i_sb
, le32_to_cpu(de
->ino
));
151 if (IS_ERR(einode
)) {
153 err
= PTR_ERR(einode
);
159 err
= dquot_initialize(einode
);
165 err
= acquire_orphan_inode(F2FS_I_SB(inode
));
170 f2fs_delete_entry(de
, page
, dir
, einode
);
173 } else if (IS_ERR(page
)) {
176 err
= __f2fs_do_add_link(dir
, &fname
, inode
,
177 inode
->i_ino
, inode
->i_mode
);
184 f2fs_dentry_kunmap(dir
, page
);
185 f2fs_put_page(page
, 0);
187 if (file_enc_name(inode
))
188 name
= "<encrypted>";
190 name
= raw_inode
->i_name
;
191 f2fs_msg(inode
->i_sb
, KERN_NOTICE
,
192 "%s: ino = %x, name = %s, dir = %lx, err = %d",
193 __func__
, ino_of_node(ipage
), name
,
194 IS_ERR(dir
) ? 0 : dir
->i_ino
, err
);
198 static void recover_inode(struct inode
*inode
, struct page
*page
)
200 struct f2fs_inode
*raw
= F2FS_INODE(page
);
203 inode
->i_mode
= le16_to_cpu(raw
->i_mode
);
204 f2fs_i_size_write(inode
, le64_to_cpu(raw
->i_size
));
205 inode
->i_atime
.tv_sec
= le64_to_cpu(raw
->i_atime
);
206 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw
->i_ctime
);
207 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw
->i_mtime
);
208 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw
->i_atime_nsec
);
209 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw
->i_ctime_nsec
);
210 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw
->i_mtime_nsec
);
212 F2FS_I(inode
)->i_advise
= raw
->i_advise
;
214 if (file_enc_name(inode
))
215 name
= "<encrypted>";
217 name
= F2FS_INODE(page
)->i_name
;
219 f2fs_msg(inode
->i_sb
, KERN_NOTICE
, "recover_inode: ino = %x, name = %s",
220 ino_of_node(page
), name
);
223 static int find_fsync_dnodes(struct f2fs_sb_info
*sbi
, struct list_head
*head
,
226 struct curseg_info
*curseg
;
227 struct page
*page
= NULL
;
231 /* get node pages in the current segment */
232 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
233 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
236 struct fsync_inode_entry
*entry
;
238 if (!is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
241 page
= get_tmp_page(sbi
, blkaddr
);
243 if (!is_recoverable_dnode(page
))
246 if (!is_fsync_dnode(page
))
249 entry
= get_fsync_inode(head
, ino_of_node(page
));
251 bool quota_inode
= false;
254 IS_INODE(page
) && is_dent_dnode(page
)) {
255 err
= recover_inode_page(sbi
, page
);
262 * CP | dnode(F) | inode(DF)
263 * For this case, we should not give up now.
265 entry
= add_fsync_inode(sbi
, head
, ino_of_node(page
),
268 err
= PTR_ERR(entry
);
269 if (err
== -ENOENT
) {
276 entry
->blkaddr
= blkaddr
;
278 if (IS_INODE(page
) && is_dent_dnode(page
))
279 entry
->last_dentry
= blkaddr
;
281 /* check next segment */
282 blkaddr
= next_blkaddr_of_node(page
);
283 f2fs_put_page(page
, 1);
285 ra_meta_pages_cond(sbi
, blkaddr
);
287 f2fs_put_page(page
, 1);
291 static void destroy_fsync_dnodes(struct list_head
*head
)
293 struct fsync_inode_entry
*entry
, *tmp
;
295 list_for_each_entry_safe(entry
, tmp
, head
, list
)
296 del_fsync_inode(entry
);
299 static int check_index_in_prev_nodes(struct f2fs_sb_info
*sbi
,
300 block_t blkaddr
, struct dnode_of_data
*dn
)
302 struct seg_entry
*sentry
;
303 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
304 unsigned short blkoff
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
305 struct f2fs_summary_block
*sum_node
;
306 struct f2fs_summary sum
;
307 struct page
*sum_page
, *node_page
;
308 struct dnode_of_data tdn
= *dn
;
315 sentry
= get_seg_entry(sbi
, segno
);
316 if (!f2fs_test_bit(blkoff
, sentry
->cur_valid_map
))
319 /* Get the previous summary */
320 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
321 struct curseg_info
*curseg
= CURSEG_I(sbi
, i
);
322 if (curseg
->segno
== segno
) {
323 sum
= curseg
->sum_blk
->entries
[blkoff
];
328 sum_page
= get_sum_page(sbi
, segno
);
329 sum_node
= (struct f2fs_summary_block
*)page_address(sum_page
);
330 sum
= sum_node
->entries
[blkoff
];
331 f2fs_put_page(sum_page
, 1);
333 /* Use the locked dnode page and inode */
334 nid
= le32_to_cpu(sum
.nid
);
335 if (dn
->inode
->i_ino
== nid
) {
337 if (!dn
->inode_page_locked
)
338 lock_page(dn
->inode_page
);
339 tdn
.node_page
= dn
->inode_page
;
340 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
342 } else if (dn
->nid
== nid
) {
343 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
347 /* Get the node page */
348 node_page
= get_node_page(sbi
, nid
);
349 if (IS_ERR(node_page
))
350 return PTR_ERR(node_page
);
352 offset
= ofs_of_node(node_page
);
353 ino
= ino_of_node(node_page
);
354 f2fs_put_page(node_page
, 1);
356 if (ino
!= dn
->inode
->i_ino
) {
359 /* Deallocate previous index in the node page */
360 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
362 return PTR_ERR(inode
);
364 ret
= dquot_initialize(inode
);
373 bidx
= start_bidx_of_node(offset
, inode
) + le16_to_cpu(sum
.ofs_in_node
);
376 * if inode page is locked, unlock temporarily, but its reference
379 if (ino
== dn
->inode
->i_ino
&& dn
->inode_page_locked
)
380 unlock_page(dn
->inode_page
);
382 set_new_dnode(&tdn
, inode
, NULL
, NULL
, 0);
383 if (get_dnode_of_data(&tdn
, bidx
, LOOKUP_NODE
))
386 if (tdn
.data_blkaddr
== blkaddr
)
387 truncate_data_blocks_range(&tdn
, 1);
389 f2fs_put_dnode(&tdn
);
391 if (ino
!= dn
->inode
->i_ino
)
393 else if (dn
->inode_page_locked
)
394 lock_page(dn
->inode_page
);
398 if (datablock_addr(tdn
.inode
, tdn
.node_page
,
399 tdn
.ofs_in_node
) == blkaddr
)
400 truncate_data_blocks_range(&tdn
, 1);
401 if (dn
->inode
->i_ino
== nid
&& !dn
->inode_page_locked
)
402 unlock_page(dn
->inode_page
);
406 static int do_recover_data(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
407 struct page
*page
, block_t blkaddr
)
409 struct dnode_of_data dn
;
411 unsigned int start
, end
;
412 int err
= 0, recovered
= 0;
414 /* step 1: recover xattr */
415 if (IS_INODE(page
)) {
416 recover_inline_xattr(inode
, page
);
417 } else if (f2fs_has_xattr_block(ofs_of_node(page
))) {
418 err
= recover_xattr_data(inode
, page
, blkaddr
);
424 /* step 2: recover inline data */
425 if (recover_inline_data(inode
, page
))
428 /* step 3: recover data indices */
429 start
= start_bidx_of_node(ofs_of_node(page
), inode
);
430 end
= start
+ ADDRS_PER_PAGE(page
, inode
);
432 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
434 err
= get_dnode_of_data(&dn
, start
, ALLOC_NODE
);
436 if (err
== -ENOMEM
) {
437 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
443 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
, true);
445 get_node_info(sbi
, dn
.nid
, &ni
);
446 f2fs_bug_on(sbi
, ni
.ino
!= ino_of_node(page
));
447 f2fs_bug_on(sbi
, ofs_of_node(dn
.node_page
) != ofs_of_node(page
));
449 for (; start
< end
; start
++, dn
.ofs_in_node
++) {
452 src
= datablock_addr(dn
.inode
, dn
.node_page
, dn
.ofs_in_node
);
453 dest
= datablock_addr(dn
.inode
, page
, dn
.ofs_in_node
);
455 /* skip recovering if dest is the same as src */
459 /* dest is invalid, just invalidate src block */
460 if (dest
== NULL_ADDR
) {
461 truncate_data_blocks_range(&dn
, 1);
465 if (!file_keep_isize(inode
) &&
466 (i_size_read(inode
) <= ((loff_t
)start
<< PAGE_SHIFT
)))
467 f2fs_i_size_write(inode
,
468 (loff_t
)(start
+ 1) << PAGE_SHIFT
);
471 * dest is reserved block, invalidate src block
472 * and then reserve one new block in dnode page.
474 if (dest
== NEW_ADDR
) {
475 truncate_data_blocks_range(&dn
, 1);
476 reserve_new_block(&dn
);
480 /* dest is valid block, try to recover from src to dest */
481 if (is_valid_blkaddr(sbi
, dest
, META_POR
)) {
483 if (src
== NULL_ADDR
) {
484 err
= reserve_new_block(&dn
);
485 #ifdef CONFIG_F2FS_FAULT_INJECTION
487 err
= reserve_new_block(&dn
);
489 /* We should not get -ENOSPC */
490 f2fs_bug_on(sbi
, err
);
495 /* Check the previous node page having this index */
496 err
= check_index_in_prev_nodes(sbi
, dest
, &dn
);
498 if (err
== -ENOMEM
) {
499 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
505 /* write dummy data page */
506 f2fs_replace_block(sbi
, &dn
, src
, dest
,
507 ni
.version
, false, false);
512 copy_node_footer(dn
.node_page
, page
);
513 fill_node_footer(dn
.node_page
, dn
.nid
, ni
.ino
,
514 ofs_of_node(page
), false);
515 set_page_dirty(dn
.node_page
);
519 f2fs_msg(sbi
->sb
, KERN_NOTICE
,
520 "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
522 file_keep_isize(inode
) ? "keep" : "recover",
527 static int recover_data(struct f2fs_sb_info
*sbi
, struct list_head
*inode_list
,
528 struct list_head
*dir_list
)
530 struct curseg_info
*curseg
;
531 struct page
*page
= NULL
;
535 /* get node pages in the current segment */
536 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
537 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
540 struct fsync_inode_entry
*entry
;
542 if (!is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
545 ra_meta_pages_cond(sbi
, blkaddr
);
547 page
= get_tmp_page(sbi
, blkaddr
);
549 if (!is_recoverable_dnode(page
)) {
550 f2fs_put_page(page
, 1);
554 entry
= get_fsync_inode(inode_list
, ino_of_node(page
));
558 * inode(x) | CP | inode(x) | dnode(F)
559 * In this case, we can lose the latest inode(x).
560 * So, call recover_inode for the inode update.
563 recover_inode(entry
->inode
, page
);
564 if (entry
->last_dentry
== blkaddr
) {
565 err
= recover_dentry(entry
->inode
, page
, dir_list
);
567 f2fs_put_page(page
, 1);
571 err
= do_recover_data(sbi
, entry
->inode
, page
, blkaddr
);
573 f2fs_put_page(page
, 1);
577 if (entry
->blkaddr
== blkaddr
)
578 del_fsync_inode(entry
);
580 /* check next segment */
581 blkaddr
= next_blkaddr_of_node(page
);
582 f2fs_put_page(page
, 1);
585 allocate_new_segments(sbi
);
589 int recover_fsync_data(struct f2fs_sb_info
*sbi
, bool check_only
)
591 struct list_head inode_list
;
592 struct list_head dir_list
;
595 unsigned long s_flags
= sbi
->sb
->s_flags
;
596 bool need_writecp
= false;
598 if (s_flags
& MS_RDONLY
) {
599 f2fs_msg(sbi
->sb
, KERN_INFO
, "orphan cleanup on readonly fs");
600 sbi
->sb
->s_flags
&= ~MS_RDONLY
;
604 /* Needed for iput() to work correctly and not trash data */
605 sbi
->sb
->s_flags
|= MS_ACTIVE
;
606 /* Turn on quotas so that they are updated correctly */
607 f2fs_enable_quota_files(sbi
);
610 fsync_entry_slab
= f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
611 sizeof(struct fsync_inode_entry
));
612 if (!fsync_entry_slab
) {
617 INIT_LIST_HEAD(&inode_list
);
618 INIT_LIST_HEAD(&dir_list
);
620 /* prevent checkpoint */
621 mutex_lock(&sbi
->cp_mutex
);
623 /* step #1: find fsynced inode numbers */
624 err
= find_fsync_dnodes(sbi
, &inode_list
, check_only
);
625 if (err
|| list_empty(&inode_list
))
635 /* step #2: recover data */
636 err
= recover_data(sbi
, &inode_list
, &dir_list
);
638 f2fs_bug_on(sbi
, !list_empty(&inode_list
));
640 destroy_fsync_dnodes(&inode_list
);
642 /* truncate meta pages to be used by the recovery */
643 truncate_inode_pages_range(META_MAPPING(sbi
),
644 (loff_t
)MAIN_BLKADDR(sbi
) << PAGE_SHIFT
, -1);
647 truncate_inode_pages_final(NODE_MAPPING(sbi
));
648 truncate_inode_pages_final(META_MAPPING(sbi
));
651 clear_sbi_flag(sbi
, SBI_POR_DOING
);
652 mutex_unlock(&sbi
->cp_mutex
);
654 /* let's drop all the directory inodes for clean checkpoint */
655 destroy_fsync_dnodes(&dir_list
);
657 if (!err
&& need_writecp
) {
658 struct cp_control cpc
= {
659 .reason
= CP_RECOVERY
,
661 err
= write_checkpoint(sbi
, &cpc
);
664 kmem_cache_destroy(fsync_entry_slab
);
667 /* Turn quotas off */
668 f2fs_quota_off_umount(sbi
->sb
);
670 sbi
->sb
->s_flags
= s_flags
; /* Restore MS_RDONLY status */
672 return ret
? ret
: err
;