4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
17 static struct kmem_cache
*fsync_entry_slab
;
19 bool space_for_roll_forward(struct f2fs_sb_info
*sbi
)
21 if (sbi
->last_valid_block_count
+ sbi
->alloc_valid_block_count
22 > sbi
->user_block_count
)
27 static struct fsync_inode_entry
*get_fsync_inode(struct list_head
*head
,
30 struct fsync_inode_entry
*entry
;
32 list_for_each_entry(entry
, head
, list
)
33 if (entry
->inode
->i_ino
== ino
)
39 static int recover_dentry(struct page
*ipage
, struct inode
*inode
)
41 struct f2fs_inode
*raw_inode
= F2FS_INODE(ipage
);
42 nid_t pino
= le32_to_cpu(raw_inode
->i_pino
);
43 struct f2fs_dir_entry
*de
;
46 struct inode
*dir
, *einode
;
49 dir
= f2fs_iget(inode
->i_sb
, pino
);
55 name
.len
= le32_to_cpu(raw_inode
->i_namelen
);
56 name
.name
= raw_inode
->i_name
;
58 if (unlikely(name
.len
> F2FS_NAME_LEN
)) {
64 de
= f2fs_find_entry(dir
, &name
, &page
);
65 if (de
&& inode
->i_ino
== le32_to_cpu(de
->ino
))
68 einode
= f2fs_iget(inode
->i_sb
, le32_to_cpu(de
->ino
));
71 err
= PTR_ERR(einode
);
76 err
= acquire_orphan_inode(F2FS_SB(inode
->i_sb
));
81 f2fs_delete_entry(de
, page
, einode
);
85 err
= __f2fs_add_link(dir
, &name
, inode
);
89 if (is_inode_flag_set(F2FS_I(dir
), FI_DELAY_IPUT
)) {
92 add_dirty_dir_inode(dir
);
93 set_inode_flag(F2FS_I(dir
), FI_DELAY_IPUT
);
100 f2fs_put_page(page
, 0);
104 f2fs_msg(inode
->i_sb
, KERN_NOTICE
,
105 "%s: ino = %x, name = %s, dir = %lx, err = %d",
106 __func__
, ino_of_node(ipage
), raw_inode
->i_name
,
107 IS_ERR(dir
) ? 0 : dir
->i_ino
, err
);
111 static int recover_inode(struct inode
*inode
, struct page
*node_page
)
113 struct f2fs_inode
*raw_inode
= F2FS_INODE(node_page
);
115 if (!IS_INODE(node_page
))
118 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
119 i_size_write(inode
, le64_to_cpu(raw_inode
->i_size
));
120 inode
->i_atime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
121 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw_inode
->i_ctime
);
122 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
123 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
124 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw_inode
->i_ctime_nsec
);
125 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
127 if (is_dent_dnode(node_page
))
128 return recover_dentry(node_page
, inode
);
130 f2fs_msg(inode
->i_sb
, KERN_NOTICE
, "recover_inode: ino = %x, name = %s",
131 ino_of_node(node_page
), raw_inode
->i_name
);
135 static int find_fsync_dnodes(struct f2fs_sb_info
*sbi
, struct list_head
*head
)
137 unsigned long long cp_ver
= cur_cp_version(F2FS_CKPT(sbi
));
138 struct curseg_info
*curseg
;
143 /* get node pages in the current segment */
144 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
145 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
148 page
= alloc_page(GFP_F2FS_ZERO
);
154 struct fsync_inode_entry
*entry
;
156 err
= f2fs_submit_page_bio(sbi
, page
, blkaddr
, READ_SYNC
);
162 if (cp_ver
!= cpver_of_node(page
))
165 if (!is_fsync_dnode(page
))
168 entry
= get_fsync_inode(head
, ino_of_node(page
));
170 if (IS_INODE(page
) && is_dent_dnode(page
))
171 set_inode_flag(F2FS_I(entry
->inode
),
174 if (IS_INODE(page
) && is_dent_dnode(page
)) {
175 err
= recover_inode_page(sbi
, page
);
180 /* add this fsync inode to the list */
181 entry
= kmem_cache_alloc(fsync_entry_slab
, GFP_NOFS
);
187 entry
->inode
= f2fs_iget(sbi
->sb
, ino_of_node(page
));
188 if (IS_ERR(entry
->inode
)) {
189 err
= PTR_ERR(entry
->inode
);
190 kmem_cache_free(fsync_entry_slab
, entry
);
193 list_add_tail(&entry
->list
, head
);
195 entry
->blkaddr
= blkaddr
;
197 err
= recover_inode(entry
->inode
, page
);
198 if (err
&& err
!= -ENOENT
)
201 /* check next segment */
202 blkaddr
= next_blkaddr_of_node(page
);
206 __free_pages(page
, 0);
211 static void destroy_fsync_dnodes(struct list_head
*head
)
213 struct fsync_inode_entry
*entry
, *tmp
;
215 list_for_each_entry_safe(entry
, tmp
, head
, list
) {
217 list_del(&entry
->list
);
218 kmem_cache_free(fsync_entry_slab
, entry
);
222 static int check_index_in_prev_nodes(struct f2fs_sb_info
*sbi
,
223 block_t blkaddr
, struct dnode_of_data
*dn
)
225 struct seg_entry
*sentry
;
226 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
227 unsigned short blkoff
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
228 struct f2fs_summary_block
*sum_node
;
229 struct f2fs_summary sum
;
230 struct page
*sum_page
, *node_page
;
237 sentry
= get_seg_entry(sbi
, segno
);
238 if (!f2fs_test_bit(blkoff
, sentry
->cur_valid_map
))
241 /* Get the previous summary */
242 for (i
= CURSEG_WARM_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
243 struct curseg_info
*curseg
= CURSEG_I(sbi
, i
);
244 if (curseg
->segno
== segno
) {
245 sum
= curseg
->sum_blk
->entries
[blkoff
];
250 sum_page
= get_sum_page(sbi
, segno
);
251 sum_node
= (struct f2fs_summary_block
*)page_address(sum_page
);
252 sum
= sum_node
->entries
[blkoff
];
253 f2fs_put_page(sum_page
, 1);
255 /* Use the locked dnode page and inode */
256 nid
= le32_to_cpu(sum
.nid
);
257 if (dn
->inode
->i_ino
== nid
) {
258 struct dnode_of_data tdn
= *dn
;
260 tdn
.node_page
= dn
->inode_page
;
261 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
262 truncate_data_blocks_range(&tdn
, 1);
264 } else if (dn
->nid
== nid
) {
265 struct dnode_of_data tdn
= *dn
;
266 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
267 truncate_data_blocks_range(&tdn
, 1);
271 /* Get the node page */
272 node_page
= get_node_page(sbi
, nid
);
273 if (IS_ERR(node_page
))
274 return PTR_ERR(node_page
);
276 offset
= ofs_of_node(node_page
);
277 ino
= ino_of_node(node_page
);
278 f2fs_put_page(node_page
, 1);
280 /* Deallocate previous index in the node page */
281 inode
= f2fs_iget(sbi
->sb
, ino
);
283 return PTR_ERR(inode
);
285 bidx
= start_bidx_of_node(offset
, F2FS_I(inode
)) +
286 le16_to_cpu(sum
.ofs_in_node
);
288 truncate_hole(inode
, bidx
, bidx
+ 1);
293 static int do_recover_data(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
294 struct page
*page
, block_t blkaddr
)
296 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
297 unsigned int start
, end
;
298 struct dnode_of_data dn
;
299 struct f2fs_summary sum
;
301 int err
= 0, recovered
= 0;
303 if (recover_inline_data(inode
, page
))
306 if (recover_xattr_data(inode
, page
, blkaddr
))
309 start
= start_bidx_of_node(ofs_of_node(page
), fi
);
310 end
= start
+ ADDRS_PER_PAGE(page
, fi
);
314 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
316 err
= get_dnode_of_data(&dn
, start
, ALLOC_NODE
);
322 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
);
324 get_node_info(sbi
, dn
.nid
, &ni
);
325 f2fs_bug_on(ni
.ino
!= ino_of_node(page
));
326 f2fs_bug_on(ofs_of_node(dn
.node_page
) != ofs_of_node(page
));
328 for (; start
< end
; start
++) {
331 src
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
332 dest
= datablock_addr(page
, dn
.ofs_in_node
);
334 if (src
!= dest
&& dest
!= NEW_ADDR
&& dest
!= NULL_ADDR
) {
335 if (src
== NULL_ADDR
) {
336 err
= reserve_new_block(&dn
);
337 /* We should not get -ENOSPC */
341 /* Check the previous node page having this index */
342 err
= check_index_in_prev_nodes(sbi
, dest
, &dn
);
346 set_summary(&sum
, dn
.nid
, dn
.ofs_in_node
, ni
.version
);
348 /* write dummy data page */
349 recover_data_page(sbi
, NULL
, &sum
, src
, dest
);
350 update_extent_cache(dest
, &dn
);
356 /* write node page in place */
357 set_summary(&sum
, dn
.nid
, 0, 0);
358 if (IS_INODE(dn
.node_page
))
359 sync_inode_page(&dn
);
361 copy_node_footer(dn
.node_page
, page
);
362 fill_node_footer(dn
.node_page
, dn
.nid
, ni
.ino
,
363 ofs_of_node(page
), false);
364 set_page_dirty(dn
.node_page
);
366 recover_node_page(sbi
, dn
.node_page
, &sum
, &ni
, blkaddr
);
371 f2fs_msg(sbi
->sb
, KERN_NOTICE
,
372 "recover_data: ino = %lx, recovered = %d blocks, err = %d",
373 inode
->i_ino
, recovered
, err
);
377 static int recover_data(struct f2fs_sb_info
*sbi
,
378 struct list_head
*head
, int type
)
380 unsigned long long cp_ver
= cur_cp_version(F2FS_CKPT(sbi
));
381 struct curseg_info
*curseg
;
386 /* get node pages in the current segment */
387 curseg
= CURSEG_I(sbi
, type
);
388 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
391 page
= alloc_page(GFP_F2FS_ZERO
);
398 struct fsync_inode_entry
*entry
;
400 err
= f2fs_submit_page_bio(sbi
, page
, blkaddr
, READ_SYNC
);
406 if (cp_ver
!= cpver_of_node(page
))
409 entry
= get_fsync_inode(head
, ino_of_node(page
));
413 err
= do_recover_data(sbi
, entry
->inode
, page
, blkaddr
);
417 if (entry
->blkaddr
== blkaddr
) {
419 list_del(&entry
->list
);
420 kmem_cache_free(fsync_entry_slab
, entry
);
423 /* check next segment */
424 blkaddr
= next_blkaddr_of_node(page
);
428 __free_pages(page
, 0);
431 allocate_new_segments(sbi
);
435 int recover_fsync_data(struct f2fs_sb_info
*sbi
)
437 struct list_head inode_list
;
439 bool need_writecp
= false;
441 fsync_entry_slab
= f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
442 sizeof(struct fsync_inode_entry
));
443 if (!fsync_entry_slab
)
446 INIT_LIST_HEAD(&inode_list
);
448 /* step #1: find fsynced inode numbers */
449 sbi
->por_doing
= true;
450 err
= find_fsync_dnodes(sbi
, &inode_list
);
454 if (list_empty(&inode_list
))
459 /* step #2: recover data */
460 err
= recover_data(sbi
, &inode_list
, CURSEG_WARM_NODE
);
461 f2fs_bug_on(!list_empty(&inode_list
));
463 destroy_fsync_dnodes(&inode_list
);
464 kmem_cache_destroy(fsync_entry_slab
);
465 sbi
->por_doing
= false;
466 if (!err
&& need_writecp
)
467 write_checkpoint(sbi
, false);