4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache
*fsync_entry_slab
;
50 bool space_for_roll_forward(struct f2fs_sb_info
*sbi
)
52 if (sbi
->last_valid_block_count
+ sbi
->alloc_valid_block_count
53 > sbi
->user_block_count
)
58 static struct fsync_inode_entry
*get_fsync_inode(struct list_head
*head
,
61 struct fsync_inode_entry
*entry
;
63 list_for_each_entry(entry
, head
, list
)
64 if (entry
->inode
->i_ino
== ino
)
70 static int recover_dentry(struct inode
*inode
, struct page
*ipage
)
72 struct f2fs_inode
*raw_inode
= F2FS_INODE(ipage
);
73 nid_t pino
= le32_to_cpu(raw_inode
->i_pino
);
74 struct f2fs_dir_entry
*de
;
77 struct inode
*dir
, *einode
;
80 dir
= f2fs_iget(inode
->i_sb
, pino
);
86 name
.len
= le32_to_cpu(raw_inode
->i_namelen
);
87 name
.name
= raw_inode
->i_name
;
89 if (unlikely(name
.len
> F2FS_NAME_LEN
)) {
95 de
= f2fs_find_entry(dir
, &name
, &page
);
96 if (de
&& inode
->i_ino
== le32_to_cpu(de
->ino
)) {
97 clear_inode_flag(F2FS_I(inode
), FI_INC_LINK
);
101 einode
= f2fs_iget(inode
->i_sb
, le32_to_cpu(de
->ino
));
102 if (IS_ERR(einode
)) {
104 err
= PTR_ERR(einode
);
109 err
= acquire_orphan_inode(F2FS_I_SB(inode
));
114 f2fs_delete_entry(de
, page
, dir
, einode
);
118 err
= __f2fs_add_link(dir
, &name
, inode
);
122 if (is_inode_flag_set(F2FS_I(dir
), FI_DELAY_IPUT
)) {
125 add_dirty_dir_inode(dir
);
126 set_inode_flag(F2FS_I(dir
), FI_DELAY_IPUT
);
132 f2fs_dentry_kunmap(dir
, page
);
133 f2fs_put_page(page
, 0);
137 f2fs_msg(inode
->i_sb
, KERN_NOTICE
,
138 "%s: ino = %x, name = %s, dir = %lx, err = %d",
139 __func__
, ino_of_node(ipage
), raw_inode
->i_name
,
140 IS_ERR(dir
) ? 0 : dir
->i_ino
, err
);
144 static void recover_inode(struct inode
*inode
, struct page
*page
)
146 struct f2fs_inode
*raw
= F2FS_INODE(page
);
148 inode
->i_mode
= le16_to_cpu(raw
->i_mode
);
149 i_size_write(inode
, le64_to_cpu(raw
->i_size
));
150 inode
->i_atime
.tv_sec
= le64_to_cpu(raw
->i_mtime
);
151 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw
->i_ctime
);
152 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw
->i_mtime
);
153 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw
->i_mtime_nsec
);
154 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw
->i_ctime_nsec
);
155 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw
->i_mtime_nsec
);
157 f2fs_msg(inode
->i_sb
, KERN_NOTICE
, "recover_inode: ino = %x, name = %s",
158 ino_of_node(page
), F2FS_INODE(page
)->i_name
);
161 static int find_fsync_dnodes(struct f2fs_sb_info
*sbi
, struct list_head
*head
)
163 unsigned long long cp_ver
= cur_cp_version(F2FS_CKPT(sbi
));
164 struct curseg_info
*curseg
;
165 struct page
*page
= NULL
;
169 /* get node pages in the current segment */
170 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
171 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
173 ra_meta_pages(sbi
, blkaddr
, 1, META_POR
);
176 struct fsync_inode_entry
*entry
;
178 if (blkaddr
< MAIN_BLKADDR(sbi
) || blkaddr
>= MAX_BLKADDR(sbi
))
181 page
= get_meta_page(sbi
, blkaddr
);
183 if (cp_ver
!= cpver_of_node(page
))
186 if (!is_fsync_dnode(page
))
189 entry
= get_fsync_inode(head
, ino_of_node(page
));
191 if (IS_INODE(page
) && is_dent_dnode(page
))
192 set_inode_flag(F2FS_I(entry
->inode
),
195 if (IS_INODE(page
) && is_dent_dnode(page
)) {
196 err
= recover_inode_page(sbi
, page
);
201 /* add this fsync inode to the list */
202 entry
= kmem_cache_alloc(fsync_entry_slab
, GFP_F2FS_ZERO
);
208 * CP | dnode(F) | inode(DF)
209 * For this case, we should not give up now.
211 entry
->inode
= f2fs_iget(sbi
->sb
, ino_of_node(page
));
212 if (IS_ERR(entry
->inode
)) {
213 err
= PTR_ERR(entry
->inode
);
214 kmem_cache_free(fsync_entry_slab
, entry
);
219 list_add_tail(&entry
->list
, head
);
221 entry
->blkaddr
= blkaddr
;
223 if (IS_INODE(page
)) {
224 entry
->last_inode
= blkaddr
;
225 if (is_dent_dnode(page
))
226 entry
->last_dentry
= blkaddr
;
229 /* check next segment */
230 blkaddr
= next_blkaddr_of_node(page
);
231 f2fs_put_page(page
, 1);
233 ra_meta_pages_cond(sbi
, blkaddr
);
235 f2fs_put_page(page
, 1);
239 static void destroy_fsync_dnodes(struct list_head
*head
)
241 struct fsync_inode_entry
*entry
, *tmp
;
243 list_for_each_entry_safe(entry
, tmp
, head
, list
) {
245 list_del(&entry
->list
);
246 kmem_cache_free(fsync_entry_slab
, entry
);
250 static int check_index_in_prev_nodes(struct f2fs_sb_info
*sbi
,
251 block_t blkaddr
, struct dnode_of_data
*dn
)
253 struct seg_entry
*sentry
;
254 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
255 unsigned short blkoff
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
256 struct f2fs_summary_block
*sum_node
;
257 struct f2fs_summary sum
;
258 struct page
*sum_page
, *node_page
;
265 sentry
= get_seg_entry(sbi
, segno
);
266 if (!f2fs_test_bit(blkoff
, sentry
->cur_valid_map
))
269 /* Get the previous summary */
270 for (i
= CURSEG_WARM_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
271 struct curseg_info
*curseg
= CURSEG_I(sbi
, i
);
272 if (curseg
->segno
== segno
) {
273 sum
= curseg
->sum_blk
->entries
[blkoff
];
278 sum_page
= get_sum_page(sbi
, segno
);
279 sum_node
= (struct f2fs_summary_block
*)page_address(sum_page
);
280 sum
= sum_node
->entries
[blkoff
];
281 f2fs_put_page(sum_page
, 1);
283 /* Use the locked dnode page and inode */
284 nid
= le32_to_cpu(sum
.nid
);
285 if (dn
->inode
->i_ino
== nid
) {
286 struct dnode_of_data tdn
= *dn
;
288 tdn
.node_page
= dn
->inode_page
;
289 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
290 truncate_data_blocks_range(&tdn
, 1);
292 } else if (dn
->nid
== nid
) {
293 struct dnode_of_data tdn
= *dn
;
294 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
295 truncate_data_blocks_range(&tdn
, 1);
299 /* Get the node page */
300 node_page
= get_node_page(sbi
, nid
);
301 if (IS_ERR(node_page
))
302 return PTR_ERR(node_page
);
304 offset
= ofs_of_node(node_page
);
305 ino
= ino_of_node(node_page
);
306 f2fs_put_page(node_page
, 1);
308 if (ino
!= dn
->inode
->i_ino
) {
309 /* Deallocate previous index in the node page */
310 inode
= f2fs_iget(sbi
->sb
, ino
);
312 return PTR_ERR(inode
);
317 bidx
= start_bidx_of_node(offset
, F2FS_I(inode
)) +
318 le16_to_cpu(sum
.ofs_in_node
);
320 if (ino
!= dn
->inode
->i_ino
) {
321 truncate_hole(inode
, bidx
, bidx
+ 1);
324 struct dnode_of_data tdn
;
325 set_new_dnode(&tdn
, inode
, dn
->inode_page
, NULL
, 0);
326 if (get_dnode_of_data(&tdn
, bidx
, LOOKUP_NODE
))
328 if (tdn
.data_blkaddr
!= NULL_ADDR
)
329 truncate_data_blocks_range(&tdn
, 1);
330 f2fs_put_page(tdn
.node_page
, 1);
335 static int do_recover_data(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
336 struct page
*page
, block_t blkaddr
)
338 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
339 unsigned int start
, end
;
340 struct dnode_of_data dn
;
341 struct f2fs_summary sum
;
343 int err
= 0, recovered
= 0;
345 /* step 1: recover xattr */
346 if (IS_INODE(page
)) {
347 recover_inline_xattr(inode
, page
);
348 } else if (f2fs_has_xattr_block(ofs_of_node(page
))) {
350 * Deprecated; xattr blocks should be found from cold log.
351 * But, we should remain this for backward compatibility.
353 recover_xattr_data(inode
, page
, blkaddr
);
357 /* step 2: recover inline data */
358 if (recover_inline_data(inode
, page
))
361 /* step 3: recover data indices */
362 start
= start_bidx_of_node(ofs_of_node(page
), fi
);
363 end
= start
+ ADDRS_PER_PAGE(page
, fi
);
367 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
369 err
= get_dnode_of_data(&dn
, start
, ALLOC_NODE
);
375 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
);
377 get_node_info(sbi
, dn
.nid
, &ni
);
378 f2fs_bug_on(sbi
, ni
.ino
!= ino_of_node(page
));
379 f2fs_bug_on(sbi
, ofs_of_node(dn
.node_page
) != ofs_of_node(page
));
381 for (; start
< end
; start
++) {
384 src
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
385 dest
= datablock_addr(page
, dn
.ofs_in_node
);
387 if (src
!= dest
&& dest
!= NEW_ADDR
&& dest
!= NULL_ADDR
) {
388 if (src
== NULL_ADDR
) {
389 err
= reserve_new_block(&dn
);
390 /* We should not get -ENOSPC */
391 f2fs_bug_on(sbi
, err
);
394 /* Check the previous node page having this index */
395 err
= check_index_in_prev_nodes(sbi
, dest
, &dn
);
399 set_summary(&sum
, dn
.nid
, dn
.ofs_in_node
, ni
.version
);
401 /* write dummy data page */
402 recover_data_page(sbi
, NULL
, &sum
, src
, dest
);
403 dn
.data_blkaddr
= dest
;
404 update_extent_cache(&dn
);
410 /* write node page in place */
411 set_summary(&sum
, dn
.nid
, 0, 0);
412 if (IS_INODE(dn
.node_page
))
413 sync_inode_page(&dn
);
415 copy_node_footer(dn
.node_page
, page
);
416 fill_node_footer(dn
.node_page
, dn
.nid
, ni
.ino
,
417 ofs_of_node(page
), false);
418 set_page_dirty(dn
.node_page
);
423 f2fs_msg(sbi
->sb
, KERN_NOTICE
,
424 "recover_data: ino = %lx, recovered = %d blocks, err = %d",
425 inode
->i_ino
, recovered
, err
);
429 static int recover_data(struct f2fs_sb_info
*sbi
,
430 struct list_head
*head
, int type
)
432 unsigned long long cp_ver
= cur_cp_version(F2FS_CKPT(sbi
));
433 struct curseg_info
*curseg
;
434 struct page
*page
= NULL
;
438 /* get node pages in the current segment */
439 curseg
= CURSEG_I(sbi
, type
);
440 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
443 struct fsync_inode_entry
*entry
;
445 if (blkaddr
< MAIN_BLKADDR(sbi
) || blkaddr
>= MAX_BLKADDR(sbi
))
448 ra_meta_pages_cond(sbi
, blkaddr
);
450 page
= get_meta_page(sbi
, blkaddr
);
452 if (cp_ver
!= cpver_of_node(page
)) {
453 f2fs_put_page(page
, 1);
457 entry
= get_fsync_inode(head
, ino_of_node(page
));
461 * inode(x) | CP | inode(x) | dnode(F)
462 * In this case, we can lose the latest inode(x).
463 * So, call recover_inode for the inode update.
465 if (entry
->last_inode
== blkaddr
)
466 recover_inode(entry
->inode
, page
);
467 if (entry
->last_dentry
== blkaddr
) {
468 err
= recover_dentry(entry
->inode
, page
);
470 f2fs_put_page(page
, 1);
474 err
= do_recover_data(sbi
, entry
->inode
, page
, blkaddr
);
476 f2fs_put_page(page
, 1);
480 if (entry
->blkaddr
== blkaddr
) {
482 list_del(&entry
->list
);
483 kmem_cache_free(fsync_entry_slab
, entry
);
486 /* check next segment */
487 blkaddr
= next_blkaddr_of_node(page
);
488 f2fs_put_page(page
, 1);
491 allocate_new_segments(sbi
);
495 int recover_fsync_data(struct f2fs_sb_info
*sbi
)
497 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
498 struct list_head inode_list
;
501 bool need_writecp
= false;
503 fsync_entry_slab
= f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
504 sizeof(struct fsync_inode_entry
));
505 if (!fsync_entry_slab
)
508 INIT_LIST_HEAD(&inode_list
);
510 /* step #1: find fsynced inode numbers */
511 set_sbi_flag(sbi
, SBI_POR_DOING
);
513 /* prevent checkpoint */
514 mutex_lock(&sbi
->cp_mutex
);
516 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
518 err
= find_fsync_dnodes(sbi
, &inode_list
);
522 if (list_empty(&inode_list
))
527 /* step #2: recover data */
528 err
= recover_data(sbi
, &inode_list
, CURSEG_WARM_NODE
);
530 f2fs_bug_on(sbi
, !list_empty(&inode_list
));
532 destroy_fsync_dnodes(&inode_list
);
533 kmem_cache_destroy(fsync_entry_slab
);
535 /* truncate meta pages to be used by the recovery */
536 truncate_inode_pages_range(META_MAPPING(sbi
),
537 MAIN_BLKADDR(sbi
) << PAGE_CACHE_SHIFT
, -1);
540 truncate_inode_pages_final(NODE_MAPPING(sbi
));
541 truncate_inode_pages_final(META_MAPPING(sbi
));
544 clear_sbi_flag(sbi
, SBI_POR_DOING
);
546 discard_next_dnode(sbi
, blkaddr
);
548 /* Flush all the NAT/SIT pages */
549 while (get_pages(sbi
, F2FS_DIRTY_META
))
550 sync_meta_pages(sbi
, META
, LONG_MAX
);
551 set_ckpt_flags(sbi
->ckpt
, CP_ERROR_FLAG
);
552 mutex_unlock(&sbi
->cp_mutex
);
553 } else if (need_writecp
) {
554 struct cp_control cpc
= {
557 mutex_unlock(&sbi
->cp_mutex
);
558 write_checkpoint(sbi
, &cpc
);
560 mutex_unlock(&sbi
->cp_mutex
);