4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
22 #include <trace/events/f2fs.h>
24 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
26 static struct kmem_cache
*nat_entry_slab
;
27 static struct kmem_cache
*free_nid_slab
;
28 static struct kmem_cache
*nat_entry_set_slab
;
30 bool available_free_memory(struct f2fs_sb_info
*sbi
, int type
)
32 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
34 unsigned long mem_size
= 0;
38 /* give 25%, 25%, 50% memory for each components respectively */
39 if (type
== FREE_NIDS
) {
40 mem_size
= (nm_i
->fcnt
* sizeof(struct free_nid
)) >> 12;
41 res
= mem_size
< ((val
.totalram
* nm_i
->ram_thresh
/ 100) >> 2);
42 } else if (type
== NAT_ENTRIES
) {
43 mem_size
= (nm_i
->nat_cnt
* sizeof(struct nat_entry
)) >> 12;
44 res
= mem_size
< ((val
.totalram
* nm_i
->ram_thresh
/ 100) >> 2);
45 } else if (type
== DIRTY_DENTS
) {
46 if (sbi
->sb
->s_bdi
->dirty_exceeded
)
48 mem_size
= get_pages(sbi
, F2FS_DIRTY_DENTS
);
49 res
= mem_size
< ((val
.totalram
* nm_i
->ram_thresh
/ 100) >> 1);
54 static void clear_node_page_dirty(struct page
*page
)
56 struct address_space
*mapping
= page
->mapping
;
57 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
58 unsigned int long flags
;
60 if (PageDirty(page
)) {
61 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
62 radix_tree_tag_clear(&mapping
->page_tree
,
65 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
67 clear_page_dirty_for_io(page
);
68 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
70 ClearPageUptodate(page
);
73 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
75 pgoff_t index
= current_nat_addr(sbi
, nid
);
76 return get_meta_page(sbi
, index
);
79 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
81 struct page
*src_page
;
82 struct page
*dst_page
;
87 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
89 src_off
= current_nat_addr(sbi
, nid
);
90 dst_off
= next_nat_addr(sbi
, src_off
);
92 /* get current nat block page with lock */
93 src_page
= get_meta_page(sbi
, src_off
);
94 dst_page
= grab_meta_page(sbi
, dst_off
);
95 f2fs_bug_on(PageDirty(src_page
));
97 src_addr
= page_address(src_page
);
98 dst_addr
= page_address(dst_page
);
99 memcpy(dst_addr
, src_addr
, PAGE_CACHE_SIZE
);
100 set_page_dirty(dst_page
);
101 f2fs_put_page(src_page
, 1);
103 set_to_next_nat(nm_i
, nid
);
108 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
110 return radix_tree_lookup(&nm_i
->nat_root
, n
);
113 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
114 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
116 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
119 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
122 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
124 kmem_cache_free(nat_entry_slab
, e
);
127 int is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
129 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
133 read_lock(&nm_i
->nat_tree_lock
);
134 e
= __lookup_nat_cache(nm_i
, nid
);
135 if (e
&& !e
->checkpointed
)
137 read_unlock(&nm_i
->nat_tree_lock
);
141 bool fsync_mark_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
143 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
145 bool fsync_done
= false;
147 read_lock(&nm_i
->nat_tree_lock
);
148 e
= __lookup_nat_cache(nm_i
, nid
);
150 fsync_done
= e
->fsync_done
;
151 read_unlock(&nm_i
->nat_tree_lock
);
155 void fsync_mark_clear(struct f2fs_sb_info
*sbi
, nid_t nid
)
157 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
160 write_lock(&nm_i
->nat_tree_lock
);
161 e
= __lookup_nat_cache(nm_i
, nid
);
163 e
->fsync_done
= false;
164 write_unlock(&nm_i
->nat_tree_lock
);
167 static struct nat_entry
*grab_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
)
169 struct nat_entry
*new;
171 new = kmem_cache_alloc(nat_entry_slab
, GFP_ATOMIC
);
174 if (radix_tree_insert(&nm_i
->nat_root
, nid
, new)) {
175 kmem_cache_free(nat_entry_slab
, new);
178 memset(new, 0, sizeof(struct nat_entry
));
179 nat_set_nid(new, nid
);
180 new->checkpointed
= true;
181 list_add_tail(&new->list
, &nm_i
->nat_entries
);
186 static void cache_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
,
187 struct f2fs_nat_entry
*ne
)
191 write_lock(&nm_i
->nat_tree_lock
);
192 e
= __lookup_nat_cache(nm_i
, nid
);
194 e
= grab_nat_entry(nm_i
, nid
);
196 write_unlock(&nm_i
->nat_tree_lock
);
199 node_info_from_raw_nat(&e
->ni
, ne
);
201 write_unlock(&nm_i
->nat_tree_lock
);
204 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
205 block_t new_blkaddr
, bool fsync_done
)
207 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
210 write_lock(&nm_i
->nat_tree_lock
);
211 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
213 e
= grab_nat_entry(nm_i
, ni
->nid
);
215 write_unlock(&nm_i
->nat_tree_lock
);
219 f2fs_bug_on(ni
->blk_addr
== NEW_ADDR
);
220 } else if (new_blkaddr
== NEW_ADDR
) {
222 * when nid is reallocated,
223 * previous nat entry can be remained in nat cache.
224 * So, reinitialize it with new information.
227 f2fs_bug_on(ni
->blk_addr
!= NULL_ADDR
);
231 f2fs_bug_on(nat_get_blkaddr(e
) != ni
->blk_addr
);
232 f2fs_bug_on(nat_get_blkaddr(e
) == NULL_ADDR
&&
233 new_blkaddr
== NULL_ADDR
);
234 f2fs_bug_on(nat_get_blkaddr(e
) == NEW_ADDR
&&
235 new_blkaddr
== NEW_ADDR
);
236 f2fs_bug_on(nat_get_blkaddr(e
) != NEW_ADDR
&&
237 nat_get_blkaddr(e
) != NULL_ADDR
&&
238 new_blkaddr
== NEW_ADDR
);
240 /* increament version no as node is removed */
241 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
242 unsigned char version
= nat_get_version(e
);
243 nat_set_version(e
, inc_node_version(version
));
247 nat_set_blkaddr(e
, new_blkaddr
);
248 __set_nat_cache_dirty(nm_i
, e
);
250 /* update fsync_mark if its inode nat entry is still alive */
251 e
= __lookup_nat_cache(nm_i
, ni
->ino
);
253 e
->fsync_done
= fsync_done
;
254 write_unlock(&nm_i
->nat_tree_lock
);
257 int try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
259 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
261 if (available_free_memory(sbi
, NAT_ENTRIES
))
264 write_lock(&nm_i
->nat_tree_lock
);
265 while (nr_shrink
&& !list_empty(&nm_i
->nat_entries
)) {
266 struct nat_entry
*ne
;
267 ne
= list_first_entry(&nm_i
->nat_entries
,
268 struct nat_entry
, list
);
269 __del_from_nat_cache(nm_i
, ne
);
272 write_unlock(&nm_i
->nat_tree_lock
);
277 * This function returns always success
279 void get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
, struct node_info
*ni
)
281 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
282 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
283 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
284 nid_t start_nid
= START_NID(nid
);
285 struct f2fs_nat_block
*nat_blk
;
286 struct page
*page
= NULL
;
287 struct f2fs_nat_entry ne
;
291 memset(&ne
, 0, sizeof(struct f2fs_nat_entry
));
294 /* Check nat cache */
295 read_lock(&nm_i
->nat_tree_lock
);
296 e
= __lookup_nat_cache(nm_i
, nid
);
298 ni
->ino
= nat_get_ino(e
);
299 ni
->blk_addr
= nat_get_blkaddr(e
);
300 ni
->version
= nat_get_version(e
);
302 read_unlock(&nm_i
->nat_tree_lock
);
306 /* Check current segment summary */
307 mutex_lock(&curseg
->curseg_mutex
);
308 i
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 0);
310 ne
= nat_in_journal(sum
, i
);
311 node_info_from_raw_nat(ni
, &ne
);
313 mutex_unlock(&curseg
->curseg_mutex
);
317 /* Fill node_info from nat page */
318 page
= get_current_nat_page(sbi
, start_nid
);
319 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
320 ne
= nat_blk
->entries
[nid
- start_nid
];
321 node_info_from_raw_nat(ni
, &ne
);
322 f2fs_put_page(page
, 1);
324 /* cache nat entry */
325 cache_nat_entry(NM_I(sbi
), nid
, &ne
);
329 * The maximum depth is four.
330 * Offset[0] will have raw inode offset.
332 static int get_node_path(struct f2fs_inode_info
*fi
, long block
,
333 int offset
[4], unsigned int noffset
[4])
335 const long direct_index
= ADDRS_PER_INODE(fi
);
336 const long direct_blks
= ADDRS_PER_BLOCK
;
337 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
338 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
339 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
345 if (block
< direct_index
) {
349 block
-= direct_index
;
350 if (block
< direct_blks
) {
351 offset
[n
++] = NODE_DIR1_BLOCK
;
357 block
-= direct_blks
;
358 if (block
< direct_blks
) {
359 offset
[n
++] = NODE_DIR2_BLOCK
;
365 block
-= direct_blks
;
366 if (block
< indirect_blks
) {
367 offset
[n
++] = NODE_IND1_BLOCK
;
369 offset
[n
++] = block
/ direct_blks
;
370 noffset
[n
] = 4 + offset
[n
- 1];
371 offset
[n
] = block
% direct_blks
;
375 block
-= indirect_blks
;
376 if (block
< indirect_blks
) {
377 offset
[n
++] = NODE_IND2_BLOCK
;
378 noffset
[n
] = 4 + dptrs_per_blk
;
379 offset
[n
++] = block
/ direct_blks
;
380 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
381 offset
[n
] = block
% direct_blks
;
385 block
-= indirect_blks
;
386 if (block
< dindirect_blks
) {
387 offset
[n
++] = NODE_DIND_BLOCK
;
388 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
389 offset
[n
++] = block
/ indirect_blks
;
390 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
391 offset
[n
- 1] * (dptrs_per_blk
+ 1);
392 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
393 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
394 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
396 offset
[n
] = block
% direct_blks
;
407 * Caller should call f2fs_put_dnode(dn).
408 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
409 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
410 * In the case of RDONLY_NODE, we don't need to care about mutex.
412 int get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
414 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
415 struct page
*npage
[4];
418 unsigned int noffset
[4];
423 level
= get_node_path(F2FS_I(dn
->inode
), index
, offset
, noffset
);
425 nids
[0] = dn
->inode
->i_ino
;
426 npage
[0] = dn
->inode_page
;
429 npage
[0] = get_node_page(sbi
, nids
[0]);
430 if (IS_ERR(npage
[0]))
431 return PTR_ERR(npage
[0]);
435 nids
[1] = get_nid(parent
, offset
[0], true);
436 dn
->inode_page
= npage
[0];
437 dn
->inode_page_locked
= true;
439 /* get indirect or direct nodes */
440 for (i
= 1; i
<= level
; i
++) {
443 if (!nids
[i
] && mode
== ALLOC_NODE
) {
445 if (!alloc_nid(sbi
, &(nids
[i
]))) {
451 npage
[i
] = new_node_page(dn
, noffset
[i
], NULL
);
452 if (IS_ERR(npage
[i
])) {
453 alloc_nid_failed(sbi
, nids
[i
]);
454 err
= PTR_ERR(npage
[i
]);
458 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
459 alloc_nid_done(sbi
, nids
[i
]);
461 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
462 npage
[i
] = get_node_page_ra(parent
, offset
[i
- 1]);
463 if (IS_ERR(npage
[i
])) {
464 err
= PTR_ERR(npage
[i
]);
470 dn
->inode_page_locked
= false;
473 f2fs_put_page(parent
, 1);
477 npage
[i
] = get_node_page(sbi
, nids
[i
]);
478 if (IS_ERR(npage
[i
])) {
479 err
= PTR_ERR(npage
[i
]);
480 f2fs_put_page(npage
[0], 0);
486 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
489 dn
->nid
= nids
[level
];
490 dn
->ofs_in_node
= offset
[level
];
491 dn
->node_page
= npage
[level
];
492 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
496 f2fs_put_page(parent
, 1);
498 f2fs_put_page(npage
[0], 0);
500 dn
->inode_page
= NULL
;
501 dn
->node_page
= NULL
;
505 static void truncate_node(struct dnode_of_data
*dn
)
507 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
510 get_node_info(sbi
, dn
->nid
, &ni
);
511 if (dn
->inode
->i_blocks
== 0) {
512 f2fs_bug_on(ni
.blk_addr
!= NULL_ADDR
);
515 f2fs_bug_on(ni
.blk_addr
== NULL_ADDR
);
517 /* Deallocate node address */
518 invalidate_blocks(sbi
, ni
.blk_addr
);
519 dec_valid_node_count(sbi
, dn
->inode
);
520 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
522 if (dn
->nid
== dn
->inode
->i_ino
) {
523 remove_orphan_inode(sbi
, dn
->nid
);
524 dec_valid_inode_count(sbi
);
529 clear_node_page_dirty(dn
->node_page
);
530 F2FS_SET_SB_DIRT(sbi
);
532 f2fs_put_page(dn
->node_page
, 1);
534 invalidate_mapping_pages(NODE_MAPPING(sbi
),
535 dn
->node_page
->index
, dn
->node_page
->index
);
537 dn
->node_page
= NULL
;
538 trace_f2fs_truncate_node(dn
->inode
, dn
->nid
, ni
.blk_addr
);
541 static int truncate_dnode(struct dnode_of_data
*dn
)
543 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
549 /* get direct node */
550 page
= get_node_page(sbi
, dn
->nid
);
551 if (IS_ERR(page
) && PTR_ERR(page
) == -ENOENT
)
553 else if (IS_ERR(page
))
554 return PTR_ERR(page
);
556 /* Make dnode_of_data for parameter */
557 dn
->node_page
= page
;
559 truncate_data_blocks(dn
);
564 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
567 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
568 struct dnode_of_data rdn
= *dn
;
570 struct f2fs_node
*rn
;
572 unsigned int child_nofs
;
577 return NIDS_PER_BLOCK
+ 1;
579 trace_f2fs_truncate_nodes_enter(dn
->inode
, dn
->nid
, dn
->data_blkaddr
);
581 page
= get_node_page(sbi
, dn
->nid
);
583 trace_f2fs_truncate_nodes_exit(dn
->inode
, PTR_ERR(page
));
584 return PTR_ERR(page
);
587 rn
= F2FS_NODE(page
);
589 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
590 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
594 ret
= truncate_dnode(&rdn
);
597 set_nid(page
, i
, 0, false);
600 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
601 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
602 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
603 if (child_nid
== 0) {
604 child_nofs
+= NIDS_PER_BLOCK
+ 1;
608 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
609 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
610 set_nid(page
, i
, 0, false);
612 } else if (ret
< 0 && ret
!= -ENOENT
) {
620 /* remove current indirect node */
621 dn
->node_page
= page
;
625 f2fs_put_page(page
, 1);
627 trace_f2fs_truncate_nodes_exit(dn
->inode
, freed
);
631 f2fs_put_page(page
, 1);
632 trace_f2fs_truncate_nodes_exit(dn
->inode
, ret
);
636 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
637 struct f2fs_inode
*ri
, int *offset
, int depth
)
639 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
640 struct page
*pages
[2];
647 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
651 /* get indirect nodes in the path */
652 for (i
= 0; i
< idx
+ 1; i
++) {
653 /* refernece count'll be increased */
654 pages
[i
] = get_node_page(sbi
, nid
[i
]);
655 if (IS_ERR(pages
[i
])) {
656 err
= PTR_ERR(pages
[i
]);
660 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
663 /* free direct nodes linked to a partial indirect node */
664 for (i
= offset
[idx
+ 1]; i
< NIDS_PER_BLOCK
; i
++) {
665 child_nid
= get_nid(pages
[idx
], i
, false);
669 err
= truncate_dnode(dn
);
672 set_nid(pages
[idx
], i
, 0, false);
675 if (offset
[idx
+ 1] == 0) {
676 dn
->node_page
= pages
[idx
];
680 f2fs_put_page(pages
[idx
], 1);
686 for (i
= idx
; i
>= 0; i
--)
687 f2fs_put_page(pages
[i
], 1);
689 trace_f2fs_truncate_partial_nodes(dn
->inode
, nid
, depth
, err
);
695 * All the block addresses of data and nodes should be nullified.
697 int truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
699 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
700 int err
= 0, cont
= 1;
701 int level
, offset
[4], noffset
[4];
702 unsigned int nofs
= 0;
703 struct f2fs_inode
*ri
;
704 struct dnode_of_data dn
;
707 trace_f2fs_truncate_inode_blocks_enter(inode
, from
);
709 level
= get_node_path(F2FS_I(inode
), from
, offset
, noffset
);
711 page
= get_node_page(sbi
, inode
->i_ino
);
713 trace_f2fs_truncate_inode_blocks_exit(inode
, PTR_ERR(page
));
714 return PTR_ERR(page
);
717 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
720 ri
= F2FS_INODE(page
);
728 if (!offset
[level
- 1])
730 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
731 if (err
< 0 && err
!= -ENOENT
)
733 nofs
+= 1 + NIDS_PER_BLOCK
;
736 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
737 if (!offset
[level
- 1])
739 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
740 if (err
< 0 && err
!= -ENOENT
)
749 dn
.nid
= le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
751 case NODE_DIR1_BLOCK
:
752 case NODE_DIR2_BLOCK
:
753 err
= truncate_dnode(&dn
);
756 case NODE_IND1_BLOCK
:
757 case NODE_IND2_BLOCK
:
758 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
761 case NODE_DIND_BLOCK
:
762 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
769 if (err
< 0 && err
!= -ENOENT
)
771 if (offset
[1] == 0 &&
772 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
774 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
775 f2fs_put_page(page
, 1);
778 f2fs_wait_on_page_writeback(page
, NODE
);
779 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
780 set_page_dirty(page
);
788 f2fs_put_page(page
, 0);
789 trace_f2fs_truncate_inode_blocks_exit(inode
, err
);
790 return err
> 0 ? 0 : err
;
793 int truncate_xattr_node(struct inode
*inode
, struct page
*page
)
795 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
796 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
797 struct dnode_of_data dn
;
803 npage
= get_node_page(sbi
, nid
);
805 return PTR_ERR(npage
);
807 F2FS_I(inode
)->i_xattr_nid
= 0;
809 /* need to do checkpoint during fsync */
810 F2FS_I(inode
)->xattr_ver
= cur_cp_version(F2FS_CKPT(sbi
));
812 set_new_dnode(&dn
, inode
, page
, npage
, nid
);
815 dn
.inode_page_locked
= true;
821 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
824 void remove_inode_page(struct inode
*inode
)
826 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
828 nid_t ino
= inode
->i_ino
;
829 struct dnode_of_data dn
;
831 page
= get_node_page(sbi
, ino
);
835 if (truncate_xattr_node(inode
, page
)) {
836 f2fs_put_page(page
, 1);
839 /* 0 is possible, after f2fs_new_inode() is failed */
840 f2fs_bug_on(inode
->i_blocks
!= 0 && inode
->i_blocks
!= 1);
841 set_new_dnode(&dn
, inode
, page
, page
, ino
);
845 struct page
*new_inode_page(struct inode
*inode
)
847 struct dnode_of_data dn
;
849 /* allocate inode page for new inode */
850 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
852 /* caller should f2fs_put_page(page, 1); */
853 return new_node_page(&dn
, 0, NULL
);
856 struct page
*new_node_page(struct dnode_of_data
*dn
,
857 unsigned int ofs
, struct page
*ipage
)
859 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
860 struct node_info old_ni
, new_ni
;
864 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
865 return ERR_PTR(-EPERM
);
867 page
= grab_cache_page(NODE_MAPPING(sbi
), dn
->nid
);
869 return ERR_PTR(-ENOMEM
);
871 if (unlikely(!inc_valid_node_count(sbi
, dn
->inode
))) {
876 get_node_info(sbi
, dn
->nid
, &old_ni
);
878 /* Reinitialize old_ni with new node page */
879 f2fs_bug_on(old_ni
.blk_addr
!= NULL_ADDR
);
881 new_ni
.ino
= dn
->inode
->i_ino
;
882 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
884 f2fs_wait_on_page_writeback(page
, NODE
);
885 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
886 set_cold_node(dn
->inode
, page
);
887 SetPageUptodate(page
);
888 set_page_dirty(page
);
890 if (f2fs_has_xattr_block(ofs
))
891 F2FS_I(dn
->inode
)->i_xattr_nid
= dn
->nid
;
893 dn
->node_page
= page
;
895 update_inode(dn
->inode
, ipage
);
899 inc_valid_inode_count(sbi
);
904 clear_node_page_dirty(page
);
905 f2fs_put_page(page
, 1);
910 * Caller should do after getting the following values.
911 * 0: f2fs_put_page(page, 0)
912 * LOCKED_PAGE: f2fs_put_page(page, 1)
915 static int read_node_page(struct page
*page
, int rw
)
917 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
920 get_node_info(sbi
, page
->index
, &ni
);
922 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
923 f2fs_put_page(page
, 1);
927 if (PageUptodate(page
))
930 return f2fs_submit_page_bio(sbi
, page
, ni
.blk_addr
, rw
);
934 * Readahead a node page
936 void ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
941 apage
= find_get_page(NODE_MAPPING(sbi
), nid
);
942 if (apage
&& PageUptodate(apage
)) {
943 f2fs_put_page(apage
, 0);
946 f2fs_put_page(apage
, 0);
948 apage
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
952 err
= read_node_page(apage
, READA
);
954 f2fs_put_page(apage
, 0);
955 else if (err
== LOCKED_PAGE
)
956 f2fs_put_page(apage
, 1);
959 struct page
*get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
964 page
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
966 return ERR_PTR(-ENOMEM
);
968 err
= read_node_page(page
, READ_SYNC
);
971 else if (err
== LOCKED_PAGE
)
975 if (unlikely(!PageUptodate(page
) || nid
!= nid_of_node(page
))) {
976 f2fs_put_page(page
, 1);
977 return ERR_PTR(-EIO
);
979 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
980 f2fs_put_page(page
, 1);
988 * Return a locked page for the desired node page.
989 * And, readahead MAX_RA_NODE number of node pages.
991 struct page
*get_node_page_ra(struct page
*parent
, int start
)
993 struct f2fs_sb_info
*sbi
= F2FS_SB(parent
->mapping
->host
->i_sb
);
994 struct blk_plug plug
;
999 /* First, try getting the desired direct node. */
1000 nid
= get_nid(parent
, start
, false);
1002 return ERR_PTR(-ENOENT
);
1004 page
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
1006 return ERR_PTR(-ENOMEM
);
1008 err
= read_node_page(page
, READ_SYNC
);
1010 return ERR_PTR(err
);
1011 else if (err
== LOCKED_PAGE
)
1014 blk_start_plug(&plug
);
1016 /* Then, try readahead for siblings of the desired node */
1017 end
= start
+ MAX_RA_NODE
;
1018 end
= min(end
, NIDS_PER_BLOCK
);
1019 for (i
= start
+ 1; i
< end
; i
++) {
1020 nid
= get_nid(parent
, i
, false);
1023 ra_node_page(sbi
, nid
);
1026 blk_finish_plug(&plug
);
1029 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1030 f2fs_put_page(page
, 1);
1034 if (unlikely(!PageUptodate(page
))) {
1035 f2fs_put_page(page
, 1);
1036 return ERR_PTR(-EIO
);
1041 void sync_inode_page(struct dnode_of_data
*dn
)
1043 if (IS_INODE(dn
->node_page
) || dn
->inode_page
== dn
->node_page
) {
1044 update_inode(dn
->inode
, dn
->node_page
);
1045 } else if (dn
->inode_page
) {
1046 if (!dn
->inode_page_locked
)
1047 lock_page(dn
->inode_page
);
1048 update_inode(dn
->inode
, dn
->inode_page
);
1049 if (!dn
->inode_page_locked
)
1050 unlock_page(dn
->inode_page
);
1052 update_inode_page(dn
->inode
);
1056 int sync_node_pages(struct f2fs_sb_info
*sbi
, nid_t ino
,
1057 struct writeback_control
*wbc
)
1060 struct pagevec pvec
;
1061 int step
= ino
? 2 : 0;
1062 int nwritten
= 0, wrote
= 0;
1064 pagevec_init(&pvec
, 0);
1070 while (index
<= end
) {
1072 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1073 PAGECACHE_TAG_DIRTY
,
1074 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1078 for (i
= 0; i
< nr_pages
; i
++) {
1079 struct page
*page
= pvec
.pages
[i
];
1082 * flushing sequence with step:
1087 if (step
== 0 && IS_DNODE(page
))
1089 if (step
== 1 && (!IS_DNODE(page
) ||
1090 is_cold_node(page
)))
1092 if (step
== 2 && (!IS_DNODE(page
) ||
1093 !is_cold_node(page
)))
1098 * we should not skip writing node pages.
1100 if (ino
&& ino_of_node(page
) == ino
)
1102 else if (!trylock_page(page
))
1105 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1110 if (ino
&& ino_of_node(page
) != ino
)
1111 goto continue_unlock
;
1113 if (!PageDirty(page
)) {
1114 /* someone wrote it for us */
1115 goto continue_unlock
;
1118 if (!clear_page_dirty_for_io(page
))
1119 goto continue_unlock
;
1121 /* called by fsync() */
1122 if (ino
&& IS_DNODE(page
)) {
1123 int mark
= !is_checkpointed_node(sbi
, ino
);
1124 set_fsync_mark(page
, 1);
1126 set_dentry_mark(page
, mark
);
1129 set_fsync_mark(page
, 0);
1130 set_dentry_mark(page
, 0);
1132 NODE_MAPPING(sbi
)->a_ops
->writepage(page
, wbc
);
1135 if (--wbc
->nr_to_write
== 0)
1138 pagevec_release(&pvec
);
1141 if (wbc
->nr_to_write
== 0) {
1153 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
1157 int wait_on_node_pages_writeback(struct f2fs_sb_info
*sbi
, nid_t ino
)
1159 pgoff_t index
= 0, end
= LONG_MAX
;
1160 struct pagevec pvec
;
1161 int ret2
= 0, ret
= 0;
1163 pagevec_init(&pvec
, 0);
1165 while (index
<= end
) {
1167 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1168 PAGECACHE_TAG_WRITEBACK
,
1169 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1173 for (i
= 0; i
< nr_pages
; i
++) {
1174 struct page
*page
= pvec
.pages
[i
];
1176 /* until radix tree lookup accepts end_index */
1177 if (unlikely(page
->index
> end
))
1180 if (ino
&& ino_of_node(page
) == ino
) {
1181 f2fs_wait_on_page_writeback(page
, NODE
);
1182 if (TestClearPageError(page
))
1186 pagevec_release(&pvec
);
1190 if (unlikely(test_and_clear_bit(AS_ENOSPC
, &NODE_MAPPING(sbi
)->flags
)))
1192 if (unlikely(test_and_clear_bit(AS_EIO
, &NODE_MAPPING(sbi
)->flags
)))
1199 static int f2fs_write_node_page(struct page
*page
,
1200 struct writeback_control
*wbc
)
1202 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
1205 struct node_info ni
;
1206 struct f2fs_io_info fio
= {
1208 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1211 trace_f2fs_writepage(page
, NODE
);
1213 if (unlikely(sbi
->por_doing
))
1216 f2fs_wait_on_page_writeback(page
, NODE
);
1218 /* get old block addr of this node page */
1219 nid
= nid_of_node(page
);
1220 f2fs_bug_on(page
->index
!= nid
);
1222 get_node_info(sbi
, nid
, &ni
);
1224 /* This page is already truncated */
1225 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1226 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1231 if (wbc
->for_reclaim
)
1234 down_read(&sbi
->node_write
);
1235 set_page_writeback(page
);
1236 write_node_page(sbi
, page
, &fio
, nid
, ni
.blk_addr
, &new_addr
);
1237 set_node_addr(sbi
, &ni
, new_addr
, is_fsync_dnode(page
));
1238 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1239 up_read(&sbi
->node_write
);
1244 redirty_page_for_writepage(wbc
, page
);
1245 return AOP_WRITEPAGE_ACTIVATE
;
1248 static int f2fs_write_node_pages(struct address_space
*mapping
,
1249 struct writeback_control
*wbc
)
1251 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1254 trace_f2fs_writepages(mapping
->host
, wbc
, NODE
);
1256 /* balancing f2fs's metadata in background */
1257 f2fs_balance_fs_bg(sbi
);
1259 /* collect a number of dirty node pages and write together */
1260 if (get_pages(sbi
, F2FS_DIRTY_NODES
) < nr_pages_to_skip(sbi
, NODE
))
1263 diff
= nr_pages_to_write(sbi
, NODE
, wbc
);
1264 wbc
->sync_mode
= WB_SYNC_NONE
;
1265 sync_node_pages(sbi
, 0, wbc
);
1266 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1270 wbc
->pages_skipped
+= get_pages(sbi
, F2FS_DIRTY_NODES
);
1274 static int f2fs_set_node_page_dirty(struct page
*page
)
1276 struct address_space
*mapping
= page
->mapping
;
1277 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1279 trace_f2fs_set_page_dirty(page
, NODE
);
1281 SetPageUptodate(page
);
1282 if (!PageDirty(page
)) {
1283 __set_page_dirty_nobuffers(page
);
1284 inc_page_count(sbi
, F2FS_DIRTY_NODES
);
1285 SetPagePrivate(page
);
1291 static void f2fs_invalidate_node_page(struct page
*page
, unsigned int offset
,
1292 unsigned int length
)
1294 struct inode
*inode
= page
->mapping
->host
;
1295 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1296 if (PageDirty(page
))
1297 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1298 ClearPagePrivate(page
);
1301 static int f2fs_release_node_page(struct page
*page
, gfp_t wait
)
1303 ClearPagePrivate(page
);
1308 * Structure of the f2fs node operations
1310 const struct address_space_operations f2fs_node_aops
= {
1311 .writepage
= f2fs_write_node_page
,
1312 .writepages
= f2fs_write_node_pages
,
1313 .set_page_dirty
= f2fs_set_node_page_dirty
,
1314 .invalidatepage
= f2fs_invalidate_node_page
,
1315 .releasepage
= f2fs_release_node_page
,
1318 static struct free_nid
*__lookup_free_nid_list(struct f2fs_nm_info
*nm_i
,
1321 return radix_tree_lookup(&nm_i
->free_nid_root
, n
);
1324 static void __del_from_free_nid_list(struct f2fs_nm_info
*nm_i
,
1328 radix_tree_delete(&nm_i
->free_nid_root
, i
->nid
);
1331 static int add_free_nid(struct f2fs_sb_info
*sbi
, nid_t nid
, bool build
)
1333 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1335 struct nat_entry
*ne
;
1336 bool allocated
= false;
1338 if (!available_free_memory(sbi
, FREE_NIDS
))
1341 /* 0 nid should not be used */
1342 if (unlikely(nid
== 0))
1346 /* do not add allocated nids */
1347 read_lock(&nm_i
->nat_tree_lock
);
1348 ne
= __lookup_nat_cache(nm_i
, nid
);
1350 (!ne
->checkpointed
|| nat_get_blkaddr(ne
) != NULL_ADDR
))
1352 read_unlock(&nm_i
->nat_tree_lock
);
1357 i
= f2fs_kmem_cache_alloc(free_nid_slab
, GFP_NOFS
);
1361 spin_lock(&nm_i
->free_nid_list_lock
);
1362 if (radix_tree_insert(&nm_i
->free_nid_root
, i
->nid
, i
)) {
1363 spin_unlock(&nm_i
->free_nid_list_lock
);
1364 kmem_cache_free(free_nid_slab
, i
);
1367 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
1369 spin_unlock(&nm_i
->free_nid_list_lock
);
1373 static void remove_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
)
1376 bool need_free
= false;
1378 spin_lock(&nm_i
->free_nid_list_lock
);
1379 i
= __lookup_free_nid_list(nm_i
, nid
);
1380 if (i
&& i
->state
== NID_NEW
) {
1381 __del_from_free_nid_list(nm_i
, i
);
1385 spin_unlock(&nm_i
->free_nid_list_lock
);
1388 kmem_cache_free(free_nid_slab
, i
);
1391 static void scan_nat_page(struct f2fs_sb_info
*sbi
,
1392 struct page
*nat_page
, nid_t start_nid
)
1394 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1395 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
1399 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
1401 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
1403 if (unlikely(start_nid
>= nm_i
->max_nid
))
1406 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
1407 f2fs_bug_on(blk_addr
== NEW_ADDR
);
1408 if (blk_addr
== NULL_ADDR
) {
1409 if (add_free_nid(sbi
, start_nid
, true) < 0)
1415 static void build_free_nids(struct f2fs_sb_info
*sbi
)
1417 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1418 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1419 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1421 nid_t nid
= nm_i
->next_scan_nid
;
1423 /* Enough entries */
1424 if (nm_i
->fcnt
> NAT_ENTRY_PER_BLOCK
)
1427 /* readahead nat pages to be scanned */
1428 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), FREE_NID_PAGES
, META_NAT
);
1431 struct page
*page
= get_current_nat_page(sbi
, nid
);
1433 scan_nat_page(sbi
, page
, nid
);
1434 f2fs_put_page(page
, 1);
1436 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
1437 if (unlikely(nid
>= nm_i
->max_nid
))
1440 if (i
++ == FREE_NID_PAGES
)
1444 /* go to the next free nat pages to find free nids abundantly */
1445 nm_i
->next_scan_nid
= nid
;
1447 /* find free nids from current sum_pages */
1448 mutex_lock(&curseg
->curseg_mutex
);
1449 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1450 block_t addr
= le32_to_cpu(nat_in_journal(sum
, i
).block_addr
);
1451 nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1452 if (addr
== NULL_ADDR
)
1453 add_free_nid(sbi
, nid
, true);
1455 remove_free_nid(nm_i
, nid
);
1457 mutex_unlock(&curseg
->curseg_mutex
);
1461 * If this function returns success, caller can obtain a new nid
1462 * from second parameter of this function.
1463 * The returned nid could be used ino as well as nid when inode is created.
1465 bool alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
1467 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1468 struct free_nid
*i
= NULL
;
1470 if (unlikely(sbi
->total_valid_node_count
+ 1 > nm_i
->available_nids
))
1473 spin_lock(&nm_i
->free_nid_list_lock
);
1475 /* We should not use stale free nids created by build_free_nids */
1476 if (nm_i
->fcnt
&& !on_build_free_nids(nm_i
)) {
1477 f2fs_bug_on(list_empty(&nm_i
->free_nid_list
));
1478 list_for_each_entry(i
, &nm_i
->free_nid_list
, list
)
1479 if (i
->state
== NID_NEW
)
1482 f2fs_bug_on(i
->state
!= NID_NEW
);
1484 i
->state
= NID_ALLOC
;
1486 spin_unlock(&nm_i
->free_nid_list_lock
);
1489 spin_unlock(&nm_i
->free_nid_list_lock
);
1491 /* Let's scan nat pages and its caches to get free nids */
1492 mutex_lock(&nm_i
->build_lock
);
1493 build_free_nids(sbi
);
1494 mutex_unlock(&nm_i
->build_lock
);
1499 * alloc_nid() should be called prior to this function.
1501 void alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
1503 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1506 spin_lock(&nm_i
->free_nid_list_lock
);
1507 i
= __lookup_free_nid_list(nm_i
, nid
);
1508 f2fs_bug_on(!i
|| i
->state
!= NID_ALLOC
);
1509 __del_from_free_nid_list(nm_i
, i
);
1510 spin_unlock(&nm_i
->free_nid_list_lock
);
1512 kmem_cache_free(free_nid_slab
, i
);
1516 * alloc_nid() should be called prior to this function.
1518 void alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
1520 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1522 bool need_free
= false;
1527 spin_lock(&nm_i
->free_nid_list_lock
);
1528 i
= __lookup_free_nid_list(nm_i
, nid
);
1529 f2fs_bug_on(!i
|| i
->state
!= NID_ALLOC
);
1530 if (!available_free_memory(sbi
, FREE_NIDS
)) {
1531 __del_from_free_nid_list(nm_i
, i
);
1537 spin_unlock(&nm_i
->free_nid_list_lock
);
1540 kmem_cache_free(free_nid_slab
, i
);
1543 void recover_node_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
1544 struct f2fs_summary
*sum
, struct node_info
*ni
,
1545 block_t new_blkaddr
)
1547 rewrite_node_page(sbi
, page
, sum
, ni
->blk_addr
, new_blkaddr
);
1548 set_node_addr(sbi
, ni
, new_blkaddr
, false);
1549 clear_node_page_dirty(page
);
1552 void recover_inline_xattr(struct inode
*inode
, struct page
*page
)
1554 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1555 void *src_addr
, *dst_addr
;
1558 struct f2fs_inode
*ri
;
1560 if (!f2fs_has_inline_xattr(inode
))
1563 if (!IS_INODE(page
))
1566 ri
= F2FS_INODE(page
);
1567 if (!(ri
->i_inline
& F2FS_INLINE_XATTR
))
1570 ipage
= get_node_page(sbi
, inode
->i_ino
);
1571 f2fs_bug_on(IS_ERR(ipage
));
1573 dst_addr
= inline_xattr_addr(ipage
);
1574 src_addr
= inline_xattr_addr(page
);
1575 inline_size
= inline_xattr_size(inode
);
1577 f2fs_wait_on_page_writeback(ipage
, NODE
);
1578 memcpy(dst_addr
, src_addr
, inline_size
);
1580 update_inode(inode
, ipage
);
1581 f2fs_put_page(ipage
, 1);
1584 bool recover_xattr_data(struct inode
*inode
, struct page
*page
, block_t blkaddr
)
1586 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1587 nid_t prev_xnid
= F2FS_I(inode
)->i_xattr_nid
;
1588 nid_t new_xnid
= nid_of_node(page
);
1589 struct node_info ni
;
1591 if (!f2fs_has_xattr_block(ofs_of_node(page
)))
1594 /* 1: invalidate the previous xattr nid */
1598 /* Deallocate node address */
1599 get_node_info(sbi
, prev_xnid
, &ni
);
1600 f2fs_bug_on(ni
.blk_addr
== NULL_ADDR
);
1601 invalidate_blocks(sbi
, ni
.blk_addr
);
1602 dec_valid_node_count(sbi
, inode
);
1603 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
1606 /* 2: allocate new xattr nid */
1607 if (unlikely(!inc_valid_node_count(sbi
, inode
)))
1610 remove_free_nid(NM_I(sbi
), new_xnid
);
1611 get_node_info(sbi
, new_xnid
, &ni
);
1612 ni
.ino
= inode
->i_ino
;
1613 set_node_addr(sbi
, &ni
, NEW_ADDR
, false);
1614 F2FS_I(inode
)->i_xattr_nid
= new_xnid
;
1616 /* 3: update xattr blkaddr */
1617 refresh_sit_entry(sbi
, NEW_ADDR
, blkaddr
);
1618 set_node_addr(sbi
, &ni
, blkaddr
, false);
1620 update_inode_page(inode
);
1624 int recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
1626 struct f2fs_inode
*src
, *dst
;
1627 nid_t ino
= ino_of_node(page
);
1628 struct node_info old_ni
, new_ni
;
1631 get_node_info(sbi
, ino
, &old_ni
);
1633 if (unlikely(old_ni
.blk_addr
!= NULL_ADDR
))
1636 ipage
= grab_cache_page(NODE_MAPPING(sbi
), ino
);
1640 /* Should not use this inode from free nid list */
1641 remove_free_nid(NM_I(sbi
), ino
);
1643 SetPageUptodate(ipage
);
1644 fill_node_footer(ipage
, ino
, ino
, 0, true);
1646 src
= F2FS_INODE(page
);
1647 dst
= F2FS_INODE(ipage
);
1649 memcpy(dst
, src
, (unsigned long)&src
->i_ext
- (unsigned long)src
);
1651 dst
->i_blocks
= cpu_to_le64(1);
1652 dst
->i_links
= cpu_to_le32(1);
1653 dst
->i_xattr_nid
= 0;
1658 if (unlikely(!inc_valid_node_count(sbi
, NULL
)))
1660 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
1661 inc_valid_inode_count(sbi
);
1662 f2fs_put_page(ipage
, 1);
1667 * ra_sum_pages() merge contiguous pages into one bio and submit.
1668 * these pre-readed pages are alloced in bd_inode's mapping tree.
1670 static int ra_sum_pages(struct f2fs_sb_info
*sbi
, struct page
**pages
,
1671 int start
, int nrpages
)
1673 struct inode
*inode
= sbi
->sb
->s_bdev
->bd_inode
;
1674 struct address_space
*mapping
= inode
->i_mapping
;
1675 int i
, page_idx
= start
;
1676 struct f2fs_io_info fio
= {
1678 .rw
= READ_SYNC
| REQ_META
| REQ_PRIO
1681 for (i
= 0; page_idx
< start
+ nrpages
; page_idx
++, i
++) {
1682 /* alloc page in bd_inode for reading node summary info */
1683 pages
[i
] = grab_cache_page(mapping
, page_idx
);
1686 f2fs_submit_page_mbio(sbi
, pages
[i
], page_idx
, &fio
);
1689 f2fs_submit_merged_bio(sbi
, META
, READ
);
1693 int restore_node_summary(struct f2fs_sb_info
*sbi
,
1694 unsigned int segno
, struct f2fs_summary_block
*sum
)
1696 struct f2fs_node
*rn
;
1697 struct f2fs_summary
*sum_entry
;
1698 struct inode
*inode
= sbi
->sb
->s_bdev
->bd_inode
;
1700 int bio_blocks
= MAX_BIO_BLOCKS(max_hw_blocks(sbi
));
1701 struct page
*pages
[bio_blocks
];
1702 int i
, idx
, last_offset
, nrpages
, err
= 0;
1704 /* scan the node segment */
1705 last_offset
= sbi
->blocks_per_seg
;
1706 addr
= START_BLOCK(sbi
, segno
);
1707 sum_entry
= &sum
->entries
[0];
1709 for (i
= 0; !err
&& i
< last_offset
; i
+= nrpages
, addr
+= nrpages
) {
1710 nrpages
= min(last_offset
- i
, bio_blocks
);
1712 /* read ahead node pages */
1713 nrpages
= ra_sum_pages(sbi
, pages
, addr
, nrpages
);
1717 for (idx
= 0; idx
< nrpages
; idx
++) {
1721 lock_page(pages
[idx
]);
1722 if (unlikely(!PageUptodate(pages
[idx
]))) {
1725 rn
= F2FS_NODE(pages
[idx
]);
1726 sum_entry
->nid
= rn
->footer
.nid
;
1727 sum_entry
->version
= 0;
1728 sum_entry
->ofs_in_node
= 0;
1731 unlock_page(pages
[idx
]);
1733 page_cache_release(pages
[idx
]);
1736 invalidate_mapping_pages(inode
->i_mapping
, addr
,
1742 static struct nat_entry_set
*grab_nat_entry_set(void)
1744 struct nat_entry_set
*nes
=
1745 f2fs_kmem_cache_alloc(nat_entry_set_slab
, GFP_ATOMIC
);
1748 INIT_LIST_HEAD(&nes
->set_list
);
1749 INIT_LIST_HEAD(&nes
->entry_list
);
1753 static void release_nat_entry_set(struct nat_entry_set
*nes
,
1754 struct f2fs_nm_info
*nm_i
)
1756 f2fs_bug_on(!list_empty(&nes
->entry_list
));
1758 nm_i
->dirty_nat_cnt
-= nes
->entry_cnt
;
1759 list_del(&nes
->set_list
);
1760 kmem_cache_free(nat_entry_set_slab
, nes
);
1763 static void adjust_nat_entry_set(struct nat_entry_set
*nes
,
1764 struct list_head
*head
)
1766 struct nat_entry_set
*next
= nes
;
1768 if (list_is_last(&nes
->set_list
, head
))
1771 list_for_each_entry_continue(next
, head
, set_list
)
1772 if (nes
->entry_cnt
<= next
->entry_cnt
)
1775 list_move_tail(&nes
->set_list
, &next
->set_list
);
1778 static void add_nat_entry(struct nat_entry
*ne
, struct list_head
*head
)
1780 struct nat_entry_set
*nes
;
1781 nid_t start_nid
= START_NID(ne
->ni
.nid
);
1783 list_for_each_entry(nes
, head
, set_list
) {
1784 if (nes
->start_nid
== start_nid
) {
1785 list_move_tail(&ne
->list
, &nes
->entry_list
);
1787 adjust_nat_entry_set(nes
, head
);
1792 nes
= grab_nat_entry_set();
1794 nes
->start_nid
= start_nid
;
1795 list_move_tail(&ne
->list
, &nes
->entry_list
);
1797 list_add(&nes
->set_list
, head
);
1800 static void merge_nats_in_set(struct f2fs_sb_info
*sbi
)
1802 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1803 struct list_head
*dirty_list
= &nm_i
->dirty_nat_entries
;
1804 struct list_head
*set_list
= &nm_i
->nat_entry_set
;
1805 struct nat_entry
*ne
, *tmp
;
1807 write_lock(&nm_i
->nat_tree_lock
);
1808 list_for_each_entry_safe(ne
, tmp
, dirty_list
, list
) {
1809 if (nat_get_blkaddr(ne
) == NEW_ADDR
)
1811 add_nat_entry(ne
, set_list
);
1812 nm_i
->dirty_nat_cnt
++;
1814 write_unlock(&nm_i
->nat_tree_lock
);
1817 static bool __has_cursum_space(struct f2fs_summary_block
*sum
, int size
)
1819 if (nats_in_cursum(sum
) + size
<= NAT_JOURNAL_ENTRIES
)
1825 static void remove_nats_in_journal(struct f2fs_sb_info
*sbi
)
1827 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1828 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1829 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1832 mutex_lock(&curseg
->curseg_mutex
);
1833 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1834 struct nat_entry
*ne
;
1835 struct f2fs_nat_entry raw_ne
;
1836 nid_t nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1838 raw_ne
= nat_in_journal(sum
, i
);
1840 write_lock(&nm_i
->nat_tree_lock
);
1841 ne
= __lookup_nat_cache(nm_i
, nid
);
1845 ne
= grab_nat_entry(nm_i
, nid
);
1847 write_unlock(&nm_i
->nat_tree_lock
);
1850 node_info_from_raw_nat(&ne
->ni
, &raw_ne
);
1852 __set_nat_cache_dirty(nm_i
, ne
);
1853 write_unlock(&nm_i
->nat_tree_lock
);
1855 update_nats_in_cursum(sum
, -i
);
1856 mutex_unlock(&curseg
->curseg_mutex
);
1860 * This function is called during the checkpointing process.
1862 void flush_nat_entries(struct f2fs_sb_info
*sbi
)
1864 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1865 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1866 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1867 struct nat_entry_set
*nes
, *tmp
;
1868 struct list_head
*head
= &nm_i
->nat_entry_set
;
1869 bool to_journal
= true;
1871 /* merge nat entries of dirty list to nat entry set temporarily */
1872 merge_nats_in_set(sbi
);
1875 * if there are no enough space in journal to store dirty nat
1876 * entries, remove all entries from journal and merge them
1877 * into nat entry set.
1879 if (!__has_cursum_space(sum
, nm_i
->dirty_nat_cnt
)) {
1880 remove_nats_in_journal(sbi
);
1883 * merge nat entries of dirty list to nat entry set temporarily
1885 merge_nats_in_set(sbi
);
1888 if (!nm_i
->dirty_nat_cnt
)
1892 * there are two steps to flush nat entries:
1893 * #1, flush nat entries to journal in current hot data summary block.
1894 * #2, flush nat entries to nat page.
1896 list_for_each_entry_safe(nes
, tmp
, head
, set_list
) {
1897 struct f2fs_nat_block
*nat_blk
;
1898 struct nat_entry
*ne
, *cur
;
1900 nid_t start_nid
= nes
->start_nid
;
1902 if (to_journal
&& !__has_cursum_space(sum
, nes
->entry_cnt
))
1906 mutex_lock(&curseg
->curseg_mutex
);
1908 page
= get_next_nat_page(sbi
, start_nid
);
1909 nat_blk
= page_address(page
);
1910 f2fs_bug_on(!nat_blk
);
1913 /* flush dirty nats in nat entry set */
1914 list_for_each_entry_safe(ne
, cur
, &nes
->entry_list
, list
) {
1915 struct f2fs_nat_entry
*raw_ne
;
1916 nid_t nid
= nat_get_nid(ne
);
1920 offset
= lookup_journal_in_cursum(sum
,
1921 NAT_JOURNAL
, nid
, 1);
1922 f2fs_bug_on(offset
< 0);
1923 raw_ne
= &nat_in_journal(sum
, offset
);
1924 nid_in_journal(sum
, offset
) = cpu_to_le32(nid
);
1926 raw_ne
= &nat_blk
->entries
[nid
- start_nid
];
1928 raw_nat_from_node_info(raw_ne
, &ne
->ni
);
1930 if (nat_get_blkaddr(ne
) == NULL_ADDR
&&
1931 add_free_nid(sbi
, nid
, false) <= 0) {
1932 write_lock(&nm_i
->nat_tree_lock
);
1933 __del_from_nat_cache(nm_i
, ne
);
1934 write_unlock(&nm_i
->nat_tree_lock
);
1936 write_lock(&nm_i
->nat_tree_lock
);
1937 __clear_nat_cache_dirty(nm_i
, ne
);
1938 write_unlock(&nm_i
->nat_tree_lock
);
1943 mutex_unlock(&curseg
->curseg_mutex
);
1945 f2fs_put_page(page
, 1);
1947 release_nat_entry_set(nes
, nm_i
);
1950 f2fs_bug_on(!list_empty(head
));
1951 f2fs_bug_on(nm_i
->dirty_nat_cnt
);
1954 static int init_node_manager(struct f2fs_sb_info
*sbi
)
1956 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
1957 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1958 unsigned char *version_bitmap
;
1959 unsigned int nat_segs
, nat_blocks
;
1961 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
1963 /* segment_count_nat includes pair segment so divide to 2. */
1964 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
1965 nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
1967 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nat_blocks
;
1969 /* not used nids: 0, node, meta, (and root counted as valid node) */
1970 nm_i
->available_nids
= nm_i
->max_nid
- 3;
1973 nm_i
->ram_thresh
= DEF_RAM_THRESHOLD
;
1975 INIT_RADIX_TREE(&nm_i
->free_nid_root
, GFP_ATOMIC
);
1976 INIT_LIST_HEAD(&nm_i
->free_nid_list
);
1977 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_ATOMIC
);
1978 INIT_LIST_HEAD(&nm_i
->nat_entries
);
1979 INIT_LIST_HEAD(&nm_i
->dirty_nat_entries
);
1980 INIT_LIST_HEAD(&nm_i
->nat_entry_set
);
1982 mutex_init(&nm_i
->build_lock
);
1983 spin_lock_init(&nm_i
->free_nid_list_lock
);
1984 rwlock_init(&nm_i
->nat_tree_lock
);
1986 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
1987 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
1988 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
1989 if (!version_bitmap
)
1992 nm_i
->nat_bitmap
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
1994 if (!nm_i
->nat_bitmap
)
1999 int build_node_manager(struct f2fs_sb_info
*sbi
)
2003 sbi
->nm_info
= kzalloc(sizeof(struct f2fs_nm_info
), GFP_KERNEL
);
2007 err
= init_node_manager(sbi
);
2011 build_free_nids(sbi
);
2015 void destroy_node_manager(struct f2fs_sb_info
*sbi
)
2017 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2018 struct free_nid
*i
, *next_i
;
2019 struct nat_entry
*natvec
[NATVEC_SIZE
];
2026 /* destroy free nid list */
2027 spin_lock(&nm_i
->free_nid_list_lock
);
2028 list_for_each_entry_safe(i
, next_i
, &nm_i
->free_nid_list
, list
) {
2029 f2fs_bug_on(i
->state
== NID_ALLOC
);
2030 __del_from_free_nid_list(nm_i
, i
);
2032 spin_unlock(&nm_i
->free_nid_list_lock
);
2033 kmem_cache_free(free_nid_slab
, i
);
2034 spin_lock(&nm_i
->free_nid_list_lock
);
2036 f2fs_bug_on(nm_i
->fcnt
);
2037 spin_unlock(&nm_i
->free_nid_list_lock
);
2039 /* destroy nat cache */
2040 write_lock(&nm_i
->nat_tree_lock
);
2041 while ((found
= __gang_lookup_nat_cache(nm_i
,
2042 nid
, NATVEC_SIZE
, natvec
))) {
2044 nid
= nat_get_nid(natvec
[found
- 1]) + 1;
2045 for (idx
= 0; idx
< found
; idx
++)
2046 __del_from_nat_cache(nm_i
, natvec
[idx
]);
2048 f2fs_bug_on(nm_i
->nat_cnt
);
2049 write_unlock(&nm_i
->nat_tree_lock
);
2051 kfree(nm_i
->nat_bitmap
);
2052 sbi
->nm_info
= NULL
;
2056 int __init
create_node_manager_caches(void)
2058 nat_entry_slab
= f2fs_kmem_cache_create("nat_entry",
2059 sizeof(struct nat_entry
));
2060 if (!nat_entry_slab
)
2063 free_nid_slab
= f2fs_kmem_cache_create("free_nid",
2064 sizeof(struct free_nid
));
2066 goto destory_nat_entry
;
2068 nat_entry_set_slab
= f2fs_kmem_cache_create("nat_entry_set",
2069 sizeof(struct nat_entry_set
));
2070 if (!nat_entry_set_slab
)
2071 goto destory_free_nid
;
2075 kmem_cache_destroy(free_nid_slab
);
2077 kmem_cache_destroy(nat_entry_slab
);
2082 void destroy_node_manager_caches(void)
2084 kmem_cache_destroy(nat_entry_set_slab
);
2085 kmem_cache_destroy(free_nid_slab
);
2086 kmem_cache_destroy(nat_entry_slab
);