capabilities: ambient capabilities
[linux/fpc-iii.git] / fs / f2fs / recovery.c
blobfaec2ca004b9e8266a8080a36a44dcc8f6b601b4
1 /*
2 * fs/f2fs/recovery.c
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include "f2fs.h"
14 #include "node.h"
15 #include "segment.h"
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
26 * -> No problem.
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
32 * -> No problem.
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
38 * -> No problem.
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache *fsync_entry_slab;
50 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
52 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
53 > sbi->user_block_count)
54 return false;
55 return true;
58 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
59 nid_t ino)
61 struct fsync_inode_entry *entry;
63 list_for_each_entry(entry, head, list)
64 if (entry->inode->i_ino == ino)
65 return entry;
67 return NULL;
70 static int recover_dentry(struct inode *inode, struct page *ipage)
72 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
73 nid_t pino = le32_to_cpu(raw_inode->i_pino);
74 struct f2fs_dir_entry *de;
75 struct qstr name;
76 struct page *page;
77 struct inode *dir, *einode;
78 int err = 0;
80 dir = f2fs_iget(inode->i_sb, pino);
81 if (IS_ERR(dir)) {
82 err = PTR_ERR(dir);
83 goto out;
86 if (file_enc_name(inode)) {
87 iput(dir);
88 return 0;
91 name.len = le32_to_cpu(raw_inode->i_namelen);
92 name.name = raw_inode->i_name;
94 if (unlikely(name.len > F2FS_NAME_LEN)) {
95 WARN_ON(1);
96 err = -ENAMETOOLONG;
97 goto out_err;
99 retry:
100 de = f2fs_find_entry(dir, &name, &page);
101 if (de && inode->i_ino == le32_to_cpu(de->ino))
102 goto out_unmap_put;
104 if (de) {
105 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
106 if (IS_ERR(einode)) {
107 WARN_ON(1);
108 err = PTR_ERR(einode);
109 if (err == -ENOENT)
110 err = -EEXIST;
111 goto out_unmap_put;
113 err = acquire_orphan_inode(F2FS_I_SB(inode));
114 if (err) {
115 iput(einode);
116 goto out_unmap_put;
118 f2fs_delete_entry(de, page, dir, einode);
119 iput(einode);
120 goto retry;
122 err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
123 if (err)
124 goto out_err;
126 if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
127 iput(dir);
128 } else {
129 add_dirty_dir_inode(dir);
130 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
133 goto out;
135 out_unmap_put:
136 f2fs_dentry_kunmap(dir, page);
137 f2fs_put_page(page, 0);
138 out_err:
139 iput(dir);
140 out:
141 f2fs_msg(inode->i_sb, KERN_NOTICE,
142 "%s: ino = %x, name = %s, dir = %lx, err = %d",
143 __func__, ino_of_node(ipage), raw_inode->i_name,
144 IS_ERR(dir) ? 0 : dir->i_ino, err);
145 return err;
148 static void recover_inode(struct inode *inode, struct page *page)
150 struct f2fs_inode *raw = F2FS_INODE(page);
151 char *name;
153 inode->i_mode = le16_to_cpu(raw->i_mode);
154 i_size_write(inode, le64_to_cpu(raw->i_size));
155 inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
156 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
157 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
158 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
159 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
160 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
162 if (file_enc_name(inode))
163 name = "<encrypted>";
164 else
165 name = F2FS_INODE(page)->i_name;
167 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
168 ino_of_node(page), name);
171 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
173 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
174 struct curseg_info *curseg;
175 struct page *page = NULL;
176 block_t blkaddr;
177 int err = 0;
179 /* get node pages in the current segment */
180 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
181 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
183 ra_meta_pages(sbi, blkaddr, 1, META_POR);
185 while (1) {
186 struct fsync_inode_entry *entry;
188 if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
189 return 0;
191 page = get_meta_page(sbi, blkaddr);
193 if (cp_ver != cpver_of_node(page))
194 break;
196 if (!is_fsync_dnode(page))
197 goto next;
199 entry = get_fsync_inode(head, ino_of_node(page));
200 if (!entry) {
201 if (IS_INODE(page) && is_dent_dnode(page)) {
202 err = recover_inode_page(sbi, page);
203 if (err)
204 break;
207 /* add this fsync inode to the list */
208 entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
209 if (!entry) {
210 err = -ENOMEM;
211 break;
214 * CP | dnode(F) | inode(DF)
215 * For this case, we should not give up now.
217 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
218 if (IS_ERR(entry->inode)) {
219 err = PTR_ERR(entry->inode);
220 kmem_cache_free(fsync_entry_slab, entry);
221 if (err == -ENOENT) {
222 err = 0;
223 goto next;
225 break;
227 list_add_tail(&entry->list, head);
229 entry->blkaddr = blkaddr;
231 if (IS_INODE(page)) {
232 entry->last_inode = blkaddr;
233 if (is_dent_dnode(page))
234 entry->last_dentry = blkaddr;
236 next:
237 /* check next segment */
238 blkaddr = next_blkaddr_of_node(page);
239 f2fs_put_page(page, 1);
241 ra_meta_pages_cond(sbi, blkaddr);
243 f2fs_put_page(page, 1);
244 return err;
247 static void destroy_fsync_dnodes(struct list_head *head)
249 struct fsync_inode_entry *entry, *tmp;
251 list_for_each_entry_safe(entry, tmp, head, list) {
252 iput(entry->inode);
253 list_del(&entry->list);
254 kmem_cache_free(fsync_entry_slab, entry);
258 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
259 block_t blkaddr, struct dnode_of_data *dn)
261 struct seg_entry *sentry;
262 unsigned int segno = GET_SEGNO(sbi, blkaddr);
263 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
264 struct f2fs_summary_block *sum_node;
265 struct f2fs_summary sum;
266 struct page *sum_page, *node_page;
267 struct dnode_of_data tdn = *dn;
268 nid_t ino, nid;
269 struct inode *inode;
270 unsigned int offset;
271 block_t bidx;
272 int i;
274 sentry = get_seg_entry(sbi, segno);
275 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
276 return 0;
278 /* Get the previous summary */
279 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
280 struct curseg_info *curseg = CURSEG_I(sbi, i);
281 if (curseg->segno == segno) {
282 sum = curseg->sum_blk->entries[blkoff];
283 goto got_it;
287 sum_page = get_sum_page(sbi, segno);
288 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
289 sum = sum_node->entries[blkoff];
290 f2fs_put_page(sum_page, 1);
291 got_it:
292 /* Use the locked dnode page and inode */
293 nid = le32_to_cpu(sum.nid);
294 if (dn->inode->i_ino == nid) {
295 tdn.nid = nid;
296 if (!dn->inode_page_locked)
297 lock_page(dn->inode_page);
298 tdn.node_page = dn->inode_page;
299 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
300 goto truncate_out;
301 } else if (dn->nid == nid) {
302 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
303 goto truncate_out;
306 /* Get the node page */
307 node_page = get_node_page(sbi, nid);
308 if (IS_ERR(node_page))
309 return PTR_ERR(node_page);
311 offset = ofs_of_node(node_page);
312 ino = ino_of_node(node_page);
313 f2fs_put_page(node_page, 1);
315 if (ino != dn->inode->i_ino) {
316 /* Deallocate previous index in the node page */
317 inode = f2fs_iget(sbi->sb, ino);
318 if (IS_ERR(inode))
319 return PTR_ERR(inode);
320 } else {
321 inode = dn->inode;
324 bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
325 le16_to_cpu(sum.ofs_in_node);
328 * if inode page is locked, unlock temporarily, but its reference
329 * count keeps alive.
331 if (ino == dn->inode->i_ino && dn->inode_page_locked)
332 unlock_page(dn->inode_page);
334 set_new_dnode(&tdn, inode, NULL, NULL, 0);
335 if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
336 goto out;
338 if (tdn.data_blkaddr == blkaddr)
339 truncate_data_blocks_range(&tdn, 1);
341 f2fs_put_dnode(&tdn);
342 out:
343 if (ino != dn->inode->i_ino)
344 iput(inode);
345 else if (dn->inode_page_locked)
346 lock_page(dn->inode_page);
347 return 0;
349 truncate_out:
350 if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
351 truncate_data_blocks_range(&tdn, 1);
352 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
353 unlock_page(dn->inode_page);
354 return 0;
357 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
358 struct page *page, block_t blkaddr)
360 struct f2fs_inode_info *fi = F2FS_I(inode);
361 unsigned int start, end;
362 struct dnode_of_data dn;
363 struct node_info ni;
364 int err = 0, recovered = 0;
366 /* step 1: recover xattr */
367 if (IS_INODE(page)) {
368 recover_inline_xattr(inode, page);
369 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
371 * Deprecated; xattr blocks should be found from cold log.
372 * But, we should remain this for backward compatibility.
374 recover_xattr_data(inode, page, blkaddr);
375 goto out;
378 /* step 2: recover inline data */
379 if (recover_inline_data(inode, page))
380 goto out;
382 /* step 3: recover data indices */
383 start = start_bidx_of_node(ofs_of_node(page), fi);
384 end = start + ADDRS_PER_PAGE(page, fi);
386 f2fs_lock_op(sbi);
388 set_new_dnode(&dn, inode, NULL, NULL, 0);
390 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
391 if (err) {
392 f2fs_unlock_op(sbi);
393 goto out;
396 f2fs_wait_on_page_writeback(dn.node_page, NODE);
398 get_node_info(sbi, dn.nid, &ni);
399 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
400 f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
402 for (; start < end; start++, dn.ofs_in_node++) {
403 block_t src, dest;
405 src = datablock_addr(dn.node_page, dn.ofs_in_node);
406 dest = datablock_addr(page, dn.ofs_in_node);
408 /* skip recovering if dest is the same as src */
409 if (src == dest)
410 continue;
412 /* dest is invalid, just invalidate src block */
413 if (dest == NULL_ADDR) {
414 truncate_data_blocks_range(&dn, 1);
415 continue;
419 * dest is reserved block, invalidate src block
420 * and then reserve one new block in dnode page.
422 if (dest == NEW_ADDR) {
423 truncate_data_blocks_range(&dn, 1);
424 err = reserve_new_block(&dn);
425 f2fs_bug_on(sbi, err);
426 continue;
429 /* dest is valid block, try to recover from src to dest */
430 if (is_valid_blkaddr(sbi, dest, META_POR)) {
432 if (src == NULL_ADDR) {
433 err = reserve_new_block(&dn);
434 /* We should not get -ENOSPC */
435 f2fs_bug_on(sbi, err);
438 /* Check the previous node page having this index */
439 err = check_index_in_prev_nodes(sbi, dest, &dn);
440 if (err)
441 goto err;
443 /* write dummy data page */
444 f2fs_replace_block(sbi, &dn, src, dest,
445 ni.version, false);
446 recovered++;
450 if (IS_INODE(dn.node_page))
451 sync_inode_page(&dn);
453 copy_node_footer(dn.node_page, page);
454 fill_node_footer(dn.node_page, dn.nid, ni.ino,
455 ofs_of_node(page), false);
456 set_page_dirty(dn.node_page);
457 err:
458 f2fs_put_dnode(&dn);
459 f2fs_unlock_op(sbi);
460 out:
461 f2fs_msg(sbi->sb, KERN_NOTICE,
462 "recover_data: ino = %lx, recovered = %d blocks, err = %d",
463 inode->i_ino, recovered, err);
464 return err;
467 static int recover_data(struct f2fs_sb_info *sbi,
468 struct list_head *head, int type)
470 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
471 struct curseg_info *curseg;
472 struct page *page = NULL;
473 int err = 0;
474 block_t blkaddr;
476 /* get node pages in the current segment */
477 curseg = CURSEG_I(sbi, type);
478 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
480 while (1) {
481 struct fsync_inode_entry *entry;
483 if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
484 break;
486 ra_meta_pages_cond(sbi, blkaddr);
488 page = get_meta_page(sbi, blkaddr);
490 if (cp_ver != cpver_of_node(page)) {
491 f2fs_put_page(page, 1);
492 break;
495 entry = get_fsync_inode(head, ino_of_node(page));
496 if (!entry)
497 goto next;
499 * inode(x) | CP | inode(x) | dnode(F)
500 * In this case, we can lose the latest inode(x).
501 * So, call recover_inode for the inode update.
503 if (entry->last_inode == blkaddr)
504 recover_inode(entry->inode, page);
505 if (entry->last_dentry == blkaddr) {
506 err = recover_dentry(entry->inode, page);
507 if (err) {
508 f2fs_put_page(page, 1);
509 break;
512 err = do_recover_data(sbi, entry->inode, page, blkaddr);
513 if (err) {
514 f2fs_put_page(page, 1);
515 break;
518 if (entry->blkaddr == blkaddr) {
519 iput(entry->inode);
520 list_del(&entry->list);
521 kmem_cache_free(fsync_entry_slab, entry);
523 next:
524 /* check next segment */
525 blkaddr = next_blkaddr_of_node(page);
526 f2fs_put_page(page, 1);
528 if (!err)
529 allocate_new_segments(sbi);
530 return err;
533 int recover_fsync_data(struct f2fs_sb_info *sbi)
535 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
536 struct list_head inode_list;
537 block_t blkaddr;
538 int err;
539 bool need_writecp = false;
541 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
542 sizeof(struct fsync_inode_entry));
543 if (!fsync_entry_slab)
544 return -ENOMEM;
546 INIT_LIST_HEAD(&inode_list);
548 /* prevent checkpoint */
549 mutex_lock(&sbi->cp_mutex);
551 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
553 /* step #1: find fsynced inode numbers */
554 err = find_fsync_dnodes(sbi, &inode_list);
555 if (err)
556 goto out;
558 if (list_empty(&inode_list))
559 goto out;
561 need_writecp = true;
563 /* step #2: recover data */
564 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
565 if (!err)
566 f2fs_bug_on(sbi, !list_empty(&inode_list));
567 out:
568 destroy_fsync_dnodes(&inode_list);
569 kmem_cache_destroy(fsync_entry_slab);
571 /* truncate meta pages to be used by the recovery */
572 truncate_inode_pages_range(META_MAPPING(sbi),
573 MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
575 if (err) {
576 truncate_inode_pages_final(NODE_MAPPING(sbi));
577 truncate_inode_pages_final(META_MAPPING(sbi));
580 clear_sbi_flag(sbi, SBI_POR_DOING);
581 if (err) {
582 bool invalidate = false;
584 if (discard_next_dnode(sbi, blkaddr))
585 invalidate = true;
587 /* Flush all the NAT/SIT pages */
588 while (get_pages(sbi, F2FS_DIRTY_META))
589 sync_meta_pages(sbi, META, LONG_MAX);
591 /* invalidate temporary meta page */
592 if (invalidate)
593 invalidate_mapping_pages(META_MAPPING(sbi),
594 blkaddr, blkaddr);
596 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
597 mutex_unlock(&sbi->cp_mutex);
598 } else if (need_writecp) {
599 struct cp_control cpc = {
600 .reason = CP_RECOVERY,
602 mutex_unlock(&sbi->cp_mutex);
603 write_checkpoint(sbi, &cpc);
604 } else {
605 mutex_unlock(&sbi->cp_mutex);
607 return err;