4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
22 #include <trace/events/f2fs.h>
24 static struct kmem_cache
*nat_entry_slab
;
25 static struct kmem_cache
*free_nid_slab
;
27 static void clear_node_page_dirty(struct page
*page
)
29 struct address_space
*mapping
= page
->mapping
;
30 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
31 unsigned int long flags
;
33 if (PageDirty(page
)) {
34 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
35 radix_tree_tag_clear(&mapping
->page_tree
,
38 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
40 clear_page_dirty_for_io(page
);
41 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
43 ClearPageUptodate(page
);
46 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
48 pgoff_t index
= current_nat_addr(sbi
, nid
);
49 return get_meta_page(sbi
, index
);
52 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
54 struct page
*src_page
;
55 struct page
*dst_page
;
60 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
62 src_off
= current_nat_addr(sbi
, nid
);
63 dst_off
= next_nat_addr(sbi
, src_off
);
65 /* get current nat block page with lock */
66 src_page
= get_meta_page(sbi
, src_off
);
68 /* Dirty src_page means that it is already the new target NAT page. */
69 if (PageDirty(src_page
))
72 dst_page
= grab_meta_page(sbi
, dst_off
);
74 src_addr
= page_address(src_page
);
75 dst_addr
= page_address(dst_page
);
76 memcpy(dst_addr
, src_addr
, PAGE_CACHE_SIZE
);
77 set_page_dirty(dst_page
);
78 f2fs_put_page(src_page
, 1);
80 set_to_next_nat(nm_i
, nid
);
88 static void ra_nat_pages(struct f2fs_sb_info
*sbi
, int nid
)
90 struct address_space
*mapping
= META_MAPPING(sbi
);
91 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
95 struct f2fs_io_info fio
= {
97 .rw
= READ_SYNC
| REQ_META
| REQ_PRIO
101 for (i
= 0; i
< FREE_NID_PAGES
; i
++, nid
+= NAT_ENTRY_PER_BLOCK
) {
102 if (unlikely(nid
>= nm_i
->max_nid
))
104 index
= current_nat_addr(sbi
, nid
);
106 page
= grab_cache_page(mapping
, index
);
109 if (PageUptodate(page
)) {
110 mark_page_accessed(page
);
111 f2fs_put_page(page
, 1);
114 f2fs_submit_page_mbio(sbi
, page
, index
, &fio
);
115 mark_page_accessed(page
);
116 f2fs_put_page(page
, 0);
118 f2fs_submit_merged_bio(sbi
, META
, READ
);
121 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
123 return radix_tree_lookup(&nm_i
->nat_root
, n
);
126 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
127 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
129 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
132 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
135 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
137 kmem_cache_free(nat_entry_slab
, e
);
140 int is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
142 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
146 read_lock(&nm_i
->nat_tree_lock
);
147 e
= __lookup_nat_cache(nm_i
, nid
);
148 if (e
&& !e
->checkpointed
)
150 read_unlock(&nm_i
->nat_tree_lock
);
154 static struct nat_entry
*grab_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
)
156 struct nat_entry
*new;
158 new = kmem_cache_alloc(nat_entry_slab
, GFP_ATOMIC
);
161 if (radix_tree_insert(&nm_i
->nat_root
, nid
, new)) {
162 kmem_cache_free(nat_entry_slab
, new);
165 memset(new, 0, sizeof(struct nat_entry
));
166 nat_set_nid(new, nid
);
167 list_add_tail(&new->list
, &nm_i
->nat_entries
);
172 static void cache_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
,
173 struct f2fs_nat_entry
*ne
)
177 write_lock(&nm_i
->nat_tree_lock
);
178 e
= __lookup_nat_cache(nm_i
, nid
);
180 e
= grab_nat_entry(nm_i
, nid
);
182 write_unlock(&nm_i
->nat_tree_lock
);
185 nat_set_blkaddr(e
, le32_to_cpu(ne
->block_addr
));
186 nat_set_ino(e
, le32_to_cpu(ne
->ino
));
187 nat_set_version(e
, ne
->version
);
188 e
->checkpointed
= true;
190 write_unlock(&nm_i
->nat_tree_lock
);
193 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
196 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
199 write_lock(&nm_i
->nat_tree_lock
);
200 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
202 e
= grab_nat_entry(nm_i
, ni
->nid
);
204 write_unlock(&nm_i
->nat_tree_lock
);
208 e
->checkpointed
= true;
209 f2fs_bug_on(ni
->blk_addr
== NEW_ADDR
);
210 } else if (new_blkaddr
== NEW_ADDR
) {
212 * when nid is reallocated,
213 * previous nat entry can be remained in nat cache.
214 * So, reinitialize it with new information.
217 f2fs_bug_on(ni
->blk_addr
!= NULL_ADDR
);
220 if (new_blkaddr
== NEW_ADDR
)
221 e
->checkpointed
= false;
224 f2fs_bug_on(nat_get_blkaddr(e
) != ni
->blk_addr
);
225 f2fs_bug_on(nat_get_blkaddr(e
) == NULL_ADDR
&&
226 new_blkaddr
== NULL_ADDR
);
227 f2fs_bug_on(nat_get_blkaddr(e
) == NEW_ADDR
&&
228 new_blkaddr
== NEW_ADDR
);
229 f2fs_bug_on(nat_get_blkaddr(e
) != NEW_ADDR
&&
230 nat_get_blkaddr(e
) != NULL_ADDR
&&
231 new_blkaddr
== NEW_ADDR
);
233 /* increament version no as node is removed */
234 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
235 unsigned char version
= nat_get_version(e
);
236 nat_set_version(e
, inc_node_version(version
));
240 nat_set_blkaddr(e
, new_blkaddr
);
241 __set_nat_cache_dirty(nm_i
, e
);
242 write_unlock(&nm_i
->nat_tree_lock
);
245 int try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
247 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
249 if (nm_i
->nat_cnt
<= NM_WOUT_THRESHOLD
)
252 write_lock(&nm_i
->nat_tree_lock
);
253 while (nr_shrink
&& !list_empty(&nm_i
->nat_entries
)) {
254 struct nat_entry
*ne
;
255 ne
= list_first_entry(&nm_i
->nat_entries
,
256 struct nat_entry
, list
);
257 __del_from_nat_cache(nm_i
, ne
);
260 write_unlock(&nm_i
->nat_tree_lock
);
265 * This function returns always success
267 void get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
, struct node_info
*ni
)
269 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
270 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
271 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
272 nid_t start_nid
= START_NID(nid
);
273 struct f2fs_nat_block
*nat_blk
;
274 struct page
*page
= NULL
;
275 struct f2fs_nat_entry ne
;
279 memset(&ne
, 0, sizeof(struct f2fs_nat_entry
));
282 /* Check nat cache */
283 read_lock(&nm_i
->nat_tree_lock
);
284 e
= __lookup_nat_cache(nm_i
, nid
);
286 ni
->ino
= nat_get_ino(e
);
287 ni
->blk_addr
= nat_get_blkaddr(e
);
288 ni
->version
= nat_get_version(e
);
290 read_unlock(&nm_i
->nat_tree_lock
);
294 /* Check current segment summary */
295 mutex_lock(&curseg
->curseg_mutex
);
296 i
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 0);
298 ne
= nat_in_journal(sum
, i
);
299 node_info_from_raw_nat(ni
, &ne
);
301 mutex_unlock(&curseg
->curseg_mutex
);
305 /* Fill node_info from nat page */
306 page
= get_current_nat_page(sbi
, start_nid
);
307 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
308 ne
= nat_blk
->entries
[nid
- start_nid
];
309 node_info_from_raw_nat(ni
, &ne
);
310 f2fs_put_page(page
, 1);
312 /* cache nat entry */
313 cache_nat_entry(NM_I(sbi
), nid
, &ne
);
317 * The maximum depth is four.
318 * Offset[0] will have raw inode offset.
320 static int get_node_path(struct f2fs_inode_info
*fi
, long block
,
321 int offset
[4], unsigned int noffset
[4])
323 const long direct_index
= ADDRS_PER_INODE(fi
);
324 const long direct_blks
= ADDRS_PER_BLOCK
;
325 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
326 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
327 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
333 if (block
< direct_index
) {
337 block
-= direct_index
;
338 if (block
< direct_blks
) {
339 offset
[n
++] = NODE_DIR1_BLOCK
;
345 block
-= direct_blks
;
346 if (block
< direct_blks
) {
347 offset
[n
++] = NODE_DIR2_BLOCK
;
353 block
-= direct_blks
;
354 if (block
< indirect_blks
) {
355 offset
[n
++] = NODE_IND1_BLOCK
;
357 offset
[n
++] = block
/ direct_blks
;
358 noffset
[n
] = 4 + offset
[n
- 1];
359 offset
[n
] = block
% direct_blks
;
363 block
-= indirect_blks
;
364 if (block
< indirect_blks
) {
365 offset
[n
++] = NODE_IND2_BLOCK
;
366 noffset
[n
] = 4 + dptrs_per_blk
;
367 offset
[n
++] = block
/ direct_blks
;
368 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
369 offset
[n
] = block
% direct_blks
;
373 block
-= indirect_blks
;
374 if (block
< dindirect_blks
) {
375 offset
[n
++] = NODE_DIND_BLOCK
;
376 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
377 offset
[n
++] = block
/ indirect_blks
;
378 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
379 offset
[n
- 1] * (dptrs_per_blk
+ 1);
380 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
381 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
382 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
384 offset
[n
] = block
% direct_blks
;
395 * Caller should call f2fs_put_dnode(dn).
396 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
397 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
398 * In the case of RDONLY_NODE, we don't need to care about mutex.
400 int get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
402 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
403 struct page
*npage
[4];
406 unsigned int noffset
[4];
411 level
= get_node_path(F2FS_I(dn
->inode
), index
, offset
, noffset
);
413 nids
[0] = dn
->inode
->i_ino
;
414 npage
[0] = dn
->inode_page
;
417 npage
[0] = get_node_page(sbi
, nids
[0]);
418 if (IS_ERR(npage
[0]))
419 return PTR_ERR(npage
[0]);
423 nids
[1] = get_nid(parent
, offset
[0], true);
424 dn
->inode_page
= npage
[0];
425 dn
->inode_page_locked
= true;
427 /* get indirect or direct nodes */
428 for (i
= 1; i
<= level
; i
++) {
431 if (!nids
[i
] && mode
== ALLOC_NODE
) {
433 if (!alloc_nid(sbi
, &(nids
[i
]))) {
439 npage
[i
] = new_node_page(dn
, noffset
[i
], NULL
);
440 if (IS_ERR(npage
[i
])) {
441 alloc_nid_failed(sbi
, nids
[i
]);
442 err
= PTR_ERR(npage
[i
]);
446 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
447 alloc_nid_done(sbi
, nids
[i
]);
449 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
450 npage
[i
] = get_node_page_ra(parent
, offset
[i
- 1]);
451 if (IS_ERR(npage
[i
])) {
452 err
= PTR_ERR(npage
[i
]);
458 dn
->inode_page_locked
= false;
461 f2fs_put_page(parent
, 1);
465 npage
[i
] = get_node_page(sbi
, nids
[i
]);
466 if (IS_ERR(npage
[i
])) {
467 err
= PTR_ERR(npage
[i
]);
468 f2fs_put_page(npage
[0], 0);
474 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
477 dn
->nid
= nids
[level
];
478 dn
->ofs_in_node
= offset
[level
];
479 dn
->node_page
= npage
[level
];
480 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
484 f2fs_put_page(parent
, 1);
486 f2fs_put_page(npage
[0], 0);
488 dn
->inode_page
= NULL
;
489 dn
->node_page
= NULL
;
493 static void truncate_node(struct dnode_of_data
*dn
)
495 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
498 get_node_info(sbi
, dn
->nid
, &ni
);
499 if (dn
->inode
->i_blocks
== 0) {
500 f2fs_bug_on(ni
.blk_addr
!= NULL_ADDR
);
503 f2fs_bug_on(ni
.blk_addr
== NULL_ADDR
);
505 /* Deallocate node address */
506 invalidate_blocks(sbi
, ni
.blk_addr
);
507 dec_valid_node_count(sbi
, dn
->inode
);
508 set_node_addr(sbi
, &ni
, NULL_ADDR
);
510 if (dn
->nid
== dn
->inode
->i_ino
) {
511 remove_orphan_inode(sbi
, dn
->nid
);
512 dec_valid_inode_count(sbi
);
517 clear_node_page_dirty(dn
->node_page
);
518 F2FS_SET_SB_DIRT(sbi
);
520 f2fs_put_page(dn
->node_page
, 1);
522 invalidate_mapping_pages(NODE_MAPPING(sbi
),
523 dn
->node_page
->index
, dn
->node_page
->index
);
525 dn
->node_page
= NULL
;
526 trace_f2fs_truncate_node(dn
->inode
, dn
->nid
, ni
.blk_addr
);
529 static int truncate_dnode(struct dnode_of_data
*dn
)
531 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
537 /* get direct node */
538 page
= get_node_page(sbi
, dn
->nid
);
539 if (IS_ERR(page
) && PTR_ERR(page
) == -ENOENT
)
541 else if (IS_ERR(page
))
542 return PTR_ERR(page
);
544 /* Make dnode_of_data for parameter */
545 dn
->node_page
= page
;
547 truncate_data_blocks(dn
);
552 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
555 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
556 struct dnode_of_data rdn
= *dn
;
558 struct f2fs_node
*rn
;
560 unsigned int child_nofs
;
565 return NIDS_PER_BLOCK
+ 1;
567 trace_f2fs_truncate_nodes_enter(dn
->inode
, dn
->nid
, dn
->data_blkaddr
);
569 page
= get_node_page(sbi
, dn
->nid
);
571 trace_f2fs_truncate_nodes_exit(dn
->inode
, PTR_ERR(page
));
572 return PTR_ERR(page
);
575 rn
= F2FS_NODE(page
);
577 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
578 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
582 ret
= truncate_dnode(&rdn
);
585 set_nid(page
, i
, 0, false);
588 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
589 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
590 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
591 if (child_nid
== 0) {
592 child_nofs
+= NIDS_PER_BLOCK
+ 1;
596 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
597 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
598 set_nid(page
, i
, 0, false);
600 } else if (ret
< 0 && ret
!= -ENOENT
) {
608 /* remove current indirect node */
609 dn
->node_page
= page
;
613 f2fs_put_page(page
, 1);
615 trace_f2fs_truncate_nodes_exit(dn
->inode
, freed
);
619 f2fs_put_page(page
, 1);
620 trace_f2fs_truncate_nodes_exit(dn
->inode
, ret
);
624 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
625 struct f2fs_inode
*ri
, int *offset
, int depth
)
627 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
628 struct page
*pages
[2];
635 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
639 /* get indirect nodes in the path */
640 for (i
= 0; i
< idx
+ 1; i
++) {
641 /* refernece count'll be increased */
642 pages
[i
] = get_node_page(sbi
, nid
[i
]);
643 if (IS_ERR(pages
[i
])) {
644 err
= PTR_ERR(pages
[i
]);
648 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
651 /* free direct nodes linked to a partial indirect node */
652 for (i
= offset
[idx
+ 1]; i
< NIDS_PER_BLOCK
; i
++) {
653 child_nid
= get_nid(pages
[idx
], i
, false);
657 err
= truncate_dnode(dn
);
660 set_nid(pages
[idx
], i
, 0, false);
663 if (offset
[idx
+ 1] == 0) {
664 dn
->node_page
= pages
[idx
];
668 f2fs_put_page(pages
[idx
], 1);
674 for (i
= idx
; i
>= 0; i
--)
675 f2fs_put_page(pages
[i
], 1);
677 trace_f2fs_truncate_partial_nodes(dn
->inode
, nid
, depth
, err
);
683 * All the block addresses of data and nodes should be nullified.
685 int truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
687 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
688 int err
= 0, cont
= 1;
689 int level
, offset
[4], noffset
[4];
690 unsigned int nofs
= 0;
691 struct f2fs_inode
*ri
;
692 struct dnode_of_data dn
;
695 trace_f2fs_truncate_inode_blocks_enter(inode
, from
);
697 level
= get_node_path(F2FS_I(inode
), from
, offset
, noffset
);
699 page
= get_node_page(sbi
, inode
->i_ino
);
701 trace_f2fs_truncate_inode_blocks_exit(inode
, PTR_ERR(page
));
702 return PTR_ERR(page
);
705 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
708 ri
= F2FS_INODE(page
);
716 if (!offset
[level
- 1])
718 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
719 if (err
< 0 && err
!= -ENOENT
)
721 nofs
+= 1 + NIDS_PER_BLOCK
;
724 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
725 if (!offset
[level
- 1])
727 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
728 if (err
< 0 && err
!= -ENOENT
)
737 dn
.nid
= le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
739 case NODE_DIR1_BLOCK
:
740 case NODE_DIR2_BLOCK
:
741 err
= truncate_dnode(&dn
);
744 case NODE_IND1_BLOCK
:
745 case NODE_IND2_BLOCK
:
746 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
749 case NODE_DIND_BLOCK
:
750 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
757 if (err
< 0 && err
!= -ENOENT
)
759 if (offset
[1] == 0 &&
760 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
762 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
763 f2fs_put_page(page
, 1);
766 wait_on_page_writeback(page
);
767 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
768 set_page_dirty(page
);
776 f2fs_put_page(page
, 0);
777 trace_f2fs_truncate_inode_blocks_exit(inode
, err
);
778 return err
> 0 ? 0 : err
;
781 int truncate_xattr_node(struct inode
*inode
, struct page
*page
)
783 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
784 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
785 struct dnode_of_data dn
;
791 npage
= get_node_page(sbi
, nid
);
793 return PTR_ERR(npage
);
795 F2FS_I(inode
)->i_xattr_nid
= 0;
797 /* need to do checkpoint during fsync */
798 F2FS_I(inode
)->xattr_ver
= cur_cp_version(F2FS_CKPT(sbi
));
800 set_new_dnode(&dn
, inode
, page
, npage
, nid
);
803 dn
.inode_page_locked
= true;
809 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
812 void remove_inode_page(struct inode
*inode
)
814 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
816 nid_t ino
= inode
->i_ino
;
817 struct dnode_of_data dn
;
819 page
= get_node_page(sbi
, ino
);
823 if (truncate_xattr_node(inode
, page
)) {
824 f2fs_put_page(page
, 1);
827 /* 0 is possible, after f2fs_new_inode() is failed */
828 f2fs_bug_on(inode
->i_blocks
!= 0 && inode
->i_blocks
!= 1);
829 set_new_dnode(&dn
, inode
, page
, page
, ino
);
833 struct page
*new_inode_page(struct inode
*inode
, const struct qstr
*name
)
835 struct dnode_of_data dn
;
837 /* allocate inode page for new inode */
838 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
840 /* caller should f2fs_put_page(page, 1); */
841 return new_node_page(&dn
, 0, NULL
);
844 struct page
*new_node_page(struct dnode_of_data
*dn
,
845 unsigned int ofs
, struct page
*ipage
)
847 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
848 struct node_info old_ni
, new_ni
;
852 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
853 return ERR_PTR(-EPERM
);
855 page
= grab_cache_page(NODE_MAPPING(sbi
), dn
->nid
);
857 return ERR_PTR(-ENOMEM
);
859 if (unlikely(!inc_valid_node_count(sbi
, dn
->inode
))) {
864 get_node_info(sbi
, dn
->nid
, &old_ni
);
866 /* Reinitialize old_ni with new node page */
867 f2fs_bug_on(old_ni
.blk_addr
!= NULL_ADDR
);
869 new_ni
.ino
= dn
->inode
->i_ino
;
870 set_node_addr(sbi
, &new_ni
, NEW_ADDR
);
872 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
873 set_cold_node(dn
->inode
, page
);
874 SetPageUptodate(page
);
875 set_page_dirty(page
);
877 if (ofs
== XATTR_NODE_OFFSET
)
878 F2FS_I(dn
->inode
)->i_xattr_nid
= dn
->nid
;
880 dn
->node_page
= page
;
882 update_inode(dn
->inode
, ipage
);
886 inc_valid_inode_count(sbi
);
891 clear_node_page_dirty(page
);
892 f2fs_put_page(page
, 1);
897 * Caller should do after getting the following values.
898 * 0: f2fs_put_page(page, 0)
899 * LOCKED_PAGE: f2fs_put_page(page, 1)
902 static int read_node_page(struct page
*page
, int rw
)
904 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
907 get_node_info(sbi
, page
->index
, &ni
);
909 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
910 f2fs_put_page(page
, 1);
914 if (PageUptodate(page
))
917 return f2fs_submit_page_bio(sbi
, page
, ni
.blk_addr
, rw
);
921 * Readahead a node page
923 void ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
928 apage
= find_get_page(NODE_MAPPING(sbi
), nid
);
929 if (apage
&& PageUptodate(apage
)) {
930 f2fs_put_page(apage
, 0);
933 f2fs_put_page(apage
, 0);
935 apage
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
939 err
= read_node_page(apage
, READA
);
941 f2fs_put_page(apage
, 0);
942 else if (err
== LOCKED_PAGE
)
943 f2fs_put_page(apage
, 1);
946 struct page
*get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
951 page
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
953 return ERR_PTR(-ENOMEM
);
955 err
= read_node_page(page
, READ_SYNC
);
958 else if (err
== LOCKED_PAGE
)
962 if (unlikely(!PageUptodate(page
))) {
963 f2fs_put_page(page
, 1);
964 return ERR_PTR(-EIO
);
966 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
967 f2fs_put_page(page
, 1);
971 f2fs_bug_on(nid
!= nid_of_node(page
));
972 mark_page_accessed(page
);
977 * Return a locked page for the desired node page.
978 * And, readahead MAX_RA_NODE number of node pages.
980 struct page
*get_node_page_ra(struct page
*parent
, int start
)
982 struct f2fs_sb_info
*sbi
= F2FS_SB(parent
->mapping
->host
->i_sb
);
983 struct blk_plug plug
;
988 /* First, try getting the desired direct node. */
989 nid
= get_nid(parent
, start
, false);
991 return ERR_PTR(-ENOENT
);
993 page
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
995 return ERR_PTR(-ENOMEM
);
997 err
= read_node_page(page
, READ_SYNC
);
1000 else if (err
== LOCKED_PAGE
)
1003 blk_start_plug(&plug
);
1005 /* Then, try readahead for siblings of the desired node */
1006 end
= start
+ MAX_RA_NODE
;
1007 end
= min(end
, NIDS_PER_BLOCK
);
1008 for (i
= start
+ 1; i
< end
; i
++) {
1009 nid
= get_nid(parent
, i
, false);
1012 ra_node_page(sbi
, nid
);
1015 blk_finish_plug(&plug
);
1018 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1019 f2fs_put_page(page
, 1);
1023 if (unlikely(!PageUptodate(page
))) {
1024 f2fs_put_page(page
, 1);
1025 return ERR_PTR(-EIO
);
1027 mark_page_accessed(page
);
1031 void sync_inode_page(struct dnode_of_data
*dn
)
1033 if (IS_INODE(dn
->node_page
) || dn
->inode_page
== dn
->node_page
) {
1034 update_inode(dn
->inode
, dn
->node_page
);
1035 } else if (dn
->inode_page
) {
1036 if (!dn
->inode_page_locked
)
1037 lock_page(dn
->inode_page
);
1038 update_inode(dn
->inode
, dn
->inode_page
);
1039 if (!dn
->inode_page_locked
)
1040 unlock_page(dn
->inode_page
);
1042 update_inode_page(dn
->inode
);
1046 int sync_node_pages(struct f2fs_sb_info
*sbi
, nid_t ino
,
1047 struct writeback_control
*wbc
)
1050 struct pagevec pvec
;
1051 int step
= ino
? 2 : 0;
1052 int nwritten
= 0, wrote
= 0;
1054 pagevec_init(&pvec
, 0);
1060 while (index
<= end
) {
1062 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1063 PAGECACHE_TAG_DIRTY
,
1064 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1068 for (i
= 0; i
< nr_pages
; i
++) {
1069 struct page
*page
= pvec
.pages
[i
];
1072 * flushing sequence with step:
1077 if (step
== 0 && IS_DNODE(page
))
1079 if (step
== 1 && (!IS_DNODE(page
) ||
1080 is_cold_node(page
)))
1082 if (step
== 2 && (!IS_DNODE(page
) ||
1083 !is_cold_node(page
)))
1088 * we should not skip writing node pages.
1090 if (ino
&& ino_of_node(page
) == ino
)
1092 else if (!trylock_page(page
))
1095 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1100 if (ino
&& ino_of_node(page
) != ino
)
1101 goto continue_unlock
;
1103 if (!PageDirty(page
)) {
1104 /* someone wrote it for us */
1105 goto continue_unlock
;
1108 if (!clear_page_dirty_for_io(page
))
1109 goto continue_unlock
;
1111 /* called by fsync() */
1112 if (ino
&& IS_DNODE(page
)) {
1113 int mark
= !is_checkpointed_node(sbi
, ino
);
1114 set_fsync_mark(page
, 1);
1116 set_dentry_mark(page
, mark
);
1119 set_fsync_mark(page
, 0);
1120 set_dentry_mark(page
, 0);
1122 NODE_MAPPING(sbi
)->a_ops
->writepage(page
, wbc
);
1125 if (--wbc
->nr_to_write
== 0)
1128 pagevec_release(&pvec
);
1131 if (wbc
->nr_to_write
== 0) {
1143 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
1147 int wait_on_node_pages_writeback(struct f2fs_sb_info
*sbi
, nid_t ino
)
1149 pgoff_t index
= 0, end
= LONG_MAX
;
1150 struct pagevec pvec
;
1151 int ret2
= 0, ret
= 0;
1153 pagevec_init(&pvec
, 0);
1155 while (index
<= end
) {
1157 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1158 PAGECACHE_TAG_WRITEBACK
,
1159 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1163 for (i
= 0; i
< nr_pages
; i
++) {
1164 struct page
*page
= pvec
.pages
[i
];
1166 /* until radix tree lookup accepts end_index */
1167 if (unlikely(page
->index
> end
))
1170 if (ino
&& ino_of_node(page
) == ino
) {
1171 wait_on_page_writeback(page
);
1172 if (TestClearPageError(page
))
1176 pagevec_release(&pvec
);
1180 if (unlikely(test_and_clear_bit(AS_ENOSPC
, &NODE_MAPPING(sbi
)->flags
)))
1182 if (unlikely(test_and_clear_bit(AS_EIO
, &NODE_MAPPING(sbi
)->flags
)))
1189 static int f2fs_write_node_page(struct page
*page
,
1190 struct writeback_control
*wbc
)
1192 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
1195 struct node_info ni
;
1196 struct f2fs_io_info fio
= {
1198 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1201 if (unlikely(sbi
->por_doing
))
1204 wait_on_page_writeback(page
);
1206 /* get old block addr of this node page */
1207 nid
= nid_of_node(page
);
1208 f2fs_bug_on(page
->index
!= nid
);
1210 get_node_info(sbi
, nid
, &ni
);
1212 /* This page is already truncated */
1213 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1214 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1219 if (wbc
->for_reclaim
)
1222 mutex_lock(&sbi
->node_write
);
1223 set_page_writeback(page
);
1224 write_node_page(sbi
, page
, &fio
, nid
, ni
.blk_addr
, &new_addr
);
1225 set_node_addr(sbi
, &ni
, new_addr
);
1226 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1227 mutex_unlock(&sbi
->node_write
);
1232 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1233 wbc
->pages_skipped
++;
1234 set_page_dirty(page
);
1235 return AOP_WRITEPAGE_ACTIVATE
;
1239 * It is very important to gather dirty pages and write at once, so that we can
1240 * submit a big bio without interfering other data writes.
1241 * Be default, 512 pages (2MB) * 3 node types, is more reasonable.
1243 #define COLLECT_DIRTY_NODES 1536
1244 static int f2fs_write_node_pages(struct address_space
*mapping
,
1245 struct writeback_control
*wbc
)
1247 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1248 long nr_to_write
= wbc
->nr_to_write
;
1250 /* balancing f2fs's metadata in background */
1251 f2fs_balance_fs_bg(sbi
);
1253 /* collect a number of dirty node pages and write together */
1254 if (get_pages(sbi
, F2FS_DIRTY_NODES
) < COLLECT_DIRTY_NODES
)
1257 /* if mounting is failed, skip writing node pages */
1258 wbc
->nr_to_write
= 3 * max_hw_blocks(sbi
);
1259 wbc
->sync_mode
= WB_SYNC_NONE
;
1260 sync_node_pages(sbi
, 0, wbc
);
1261 wbc
->nr_to_write
= nr_to_write
- (3 * max_hw_blocks(sbi
) -
1266 static int f2fs_set_node_page_dirty(struct page
*page
)
1268 struct address_space
*mapping
= page
->mapping
;
1269 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1271 trace_f2fs_set_page_dirty(page
, NODE
);
1273 SetPageUptodate(page
);
1274 if (!PageDirty(page
)) {
1275 __set_page_dirty_nobuffers(page
);
1276 inc_page_count(sbi
, F2FS_DIRTY_NODES
);
1277 SetPagePrivate(page
);
1283 static void f2fs_invalidate_node_page(struct page
*page
, unsigned int offset
,
1284 unsigned int length
)
1286 struct inode
*inode
= page
->mapping
->host
;
1287 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1288 if (PageDirty(page
))
1289 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1290 ClearPagePrivate(page
);
1293 static int f2fs_release_node_page(struct page
*page
, gfp_t wait
)
1295 ClearPagePrivate(page
);
1300 * Structure of the f2fs node operations
1302 const struct address_space_operations f2fs_node_aops
= {
1303 .writepage
= f2fs_write_node_page
,
1304 .writepages
= f2fs_write_node_pages
,
1305 .set_page_dirty
= f2fs_set_node_page_dirty
,
1306 .invalidatepage
= f2fs_invalidate_node_page
,
1307 .releasepage
= f2fs_release_node_page
,
1310 static struct free_nid
*__lookup_free_nid_list(nid_t n
, struct list_head
*head
)
1312 struct list_head
*this;
1314 list_for_each(this, head
) {
1315 i
= list_entry(this, struct free_nid
, list
);
1322 static void __del_from_free_nid_list(struct free_nid
*i
)
1325 kmem_cache_free(free_nid_slab
, i
);
1328 static int add_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
, bool build
)
1331 struct nat_entry
*ne
;
1332 bool allocated
= false;
1334 if (nm_i
->fcnt
> 2 * MAX_FREE_NIDS
)
1337 /* 0 nid should not be used */
1338 if (unlikely(nid
== 0))
1342 /* do not add allocated nids */
1343 read_lock(&nm_i
->nat_tree_lock
);
1344 ne
= __lookup_nat_cache(nm_i
, nid
);
1345 if (ne
&& nat_get_blkaddr(ne
) != NULL_ADDR
)
1347 read_unlock(&nm_i
->nat_tree_lock
);
1352 i
= f2fs_kmem_cache_alloc(free_nid_slab
, GFP_NOFS
);
1356 spin_lock(&nm_i
->free_nid_list_lock
);
1357 if (__lookup_free_nid_list(nid
, &nm_i
->free_nid_list
)) {
1358 spin_unlock(&nm_i
->free_nid_list_lock
);
1359 kmem_cache_free(free_nid_slab
, i
);
1362 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
1364 spin_unlock(&nm_i
->free_nid_list_lock
);
1368 static void remove_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
)
1371 spin_lock(&nm_i
->free_nid_list_lock
);
1372 i
= __lookup_free_nid_list(nid
, &nm_i
->free_nid_list
);
1373 if (i
&& i
->state
== NID_NEW
) {
1374 __del_from_free_nid_list(i
);
1377 spin_unlock(&nm_i
->free_nid_list_lock
);
1380 static void scan_nat_page(struct f2fs_nm_info
*nm_i
,
1381 struct page
*nat_page
, nid_t start_nid
)
1383 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
1387 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
1389 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
1391 if (unlikely(start_nid
>= nm_i
->max_nid
))
1394 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
1395 f2fs_bug_on(blk_addr
== NEW_ADDR
);
1396 if (blk_addr
== NULL_ADDR
) {
1397 if (add_free_nid(nm_i
, start_nid
, true) < 0)
1403 static void build_free_nids(struct f2fs_sb_info
*sbi
)
1405 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1406 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1407 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1409 nid_t nid
= nm_i
->next_scan_nid
;
1411 /* Enough entries */
1412 if (nm_i
->fcnt
> NAT_ENTRY_PER_BLOCK
)
1415 /* readahead nat pages to be scanned */
1416 ra_nat_pages(sbi
, nid
);
1419 struct page
*page
= get_current_nat_page(sbi
, nid
);
1421 scan_nat_page(nm_i
, page
, nid
);
1422 f2fs_put_page(page
, 1);
1424 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
1425 if (unlikely(nid
>= nm_i
->max_nid
))
1428 if (i
++ == FREE_NID_PAGES
)
1432 /* go to the next free nat pages to find free nids abundantly */
1433 nm_i
->next_scan_nid
= nid
;
1435 /* find free nids from current sum_pages */
1436 mutex_lock(&curseg
->curseg_mutex
);
1437 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1438 block_t addr
= le32_to_cpu(nat_in_journal(sum
, i
).block_addr
);
1439 nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1440 if (addr
== NULL_ADDR
)
1441 add_free_nid(nm_i
, nid
, true);
1443 remove_free_nid(nm_i
, nid
);
1445 mutex_unlock(&curseg
->curseg_mutex
);
1449 * If this function returns success, caller can obtain a new nid
1450 * from second parameter of this function.
1451 * The returned nid could be used ino as well as nid when inode is created.
1453 bool alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
1455 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1456 struct free_nid
*i
= NULL
;
1457 struct list_head
*this;
1459 if (unlikely(sbi
->total_valid_node_count
+ 1 >= nm_i
->max_nid
))
1462 spin_lock(&nm_i
->free_nid_list_lock
);
1464 /* We should not use stale free nids created by build_free_nids */
1465 if (nm_i
->fcnt
&& !sbi
->on_build_free_nids
) {
1466 f2fs_bug_on(list_empty(&nm_i
->free_nid_list
));
1467 list_for_each(this, &nm_i
->free_nid_list
) {
1468 i
= list_entry(this, struct free_nid
, list
);
1469 if (i
->state
== NID_NEW
)
1473 f2fs_bug_on(i
->state
!= NID_NEW
);
1475 i
->state
= NID_ALLOC
;
1477 spin_unlock(&nm_i
->free_nid_list_lock
);
1480 spin_unlock(&nm_i
->free_nid_list_lock
);
1482 /* Let's scan nat pages and its caches to get free nids */
1483 mutex_lock(&nm_i
->build_lock
);
1484 sbi
->on_build_free_nids
= true;
1485 build_free_nids(sbi
);
1486 sbi
->on_build_free_nids
= false;
1487 mutex_unlock(&nm_i
->build_lock
);
1492 * alloc_nid() should be called prior to this function.
1494 void alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
1496 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1499 spin_lock(&nm_i
->free_nid_list_lock
);
1500 i
= __lookup_free_nid_list(nid
, &nm_i
->free_nid_list
);
1501 f2fs_bug_on(!i
|| i
->state
!= NID_ALLOC
);
1502 __del_from_free_nid_list(i
);
1503 spin_unlock(&nm_i
->free_nid_list_lock
);
1507 * alloc_nid() should be called prior to this function.
1509 void alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
1511 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1517 spin_lock(&nm_i
->free_nid_list_lock
);
1518 i
= __lookup_free_nid_list(nid
, &nm_i
->free_nid_list
);
1519 f2fs_bug_on(!i
|| i
->state
!= NID_ALLOC
);
1520 if (nm_i
->fcnt
> 2 * MAX_FREE_NIDS
) {
1521 __del_from_free_nid_list(i
);
1526 spin_unlock(&nm_i
->free_nid_list_lock
);
1529 void recover_node_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
1530 struct f2fs_summary
*sum
, struct node_info
*ni
,
1531 block_t new_blkaddr
)
1533 rewrite_node_page(sbi
, page
, sum
, ni
->blk_addr
, new_blkaddr
);
1534 set_node_addr(sbi
, ni
, new_blkaddr
);
1535 clear_node_page_dirty(page
);
1538 int recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
1540 struct f2fs_inode
*src
, *dst
;
1541 nid_t ino
= ino_of_node(page
);
1542 struct node_info old_ni
, new_ni
;
1545 ipage
= grab_cache_page(NODE_MAPPING(sbi
), ino
);
1549 /* Should not use this inode from free nid list */
1550 remove_free_nid(NM_I(sbi
), ino
);
1552 get_node_info(sbi
, ino
, &old_ni
);
1553 SetPageUptodate(ipage
);
1554 fill_node_footer(ipage
, ino
, ino
, 0, true);
1556 src
= F2FS_INODE(page
);
1557 dst
= F2FS_INODE(ipage
);
1559 memcpy(dst
, src
, (unsigned long)&src
->i_ext
- (unsigned long)src
);
1561 dst
->i_blocks
= cpu_to_le64(1);
1562 dst
->i_links
= cpu_to_le32(1);
1563 dst
->i_xattr_nid
= 0;
1568 if (unlikely(!inc_valid_node_count(sbi
, NULL
)))
1570 set_node_addr(sbi
, &new_ni
, NEW_ADDR
);
1571 inc_valid_inode_count(sbi
);
1572 f2fs_put_page(ipage
, 1);
1577 * ra_sum_pages() merge contiguous pages into one bio and submit.
1578 * these pre-readed pages are linked in pages list.
1580 static int ra_sum_pages(struct f2fs_sb_info
*sbi
, struct list_head
*pages
,
1581 int start
, int nrpages
)
1584 int page_idx
= start
;
1585 struct f2fs_io_info fio
= {
1587 .rw
= READ_SYNC
| REQ_META
| REQ_PRIO
1590 for (; page_idx
< start
+ nrpages
; page_idx
++) {
1591 /* alloc temporal page for read node summary info*/
1592 page
= alloc_page(GFP_F2FS_ZERO
);
1595 list_for_each_entry_safe(page
, tmp
, pages
, lru
) {
1596 list_del(&page
->lru
);
1598 __free_pages(page
, 0);
1604 page
->index
= page_idx
;
1605 list_add_tail(&page
->lru
, pages
);
1608 list_for_each_entry(page
, pages
, lru
)
1609 f2fs_submit_page_mbio(sbi
, page
, page
->index
, &fio
);
1611 f2fs_submit_merged_bio(sbi
, META
, READ
);
1615 int restore_node_summary(struct f2fs_sb_info
*sbi
,
1616 unsigned int segno
, struct f2fs_summary_block
*sum
)
1618 struct f2fs_node
*rn
;
1619 struct f2fs_summary
*sum_entry
;
1620 struct page
*page
, *tmp
;
1622 int bio_blocks
= MAX_BIO_BLOCKS(max_hw_blocks(sbi
));
1623 int i
, last_offset
, nrpages
, err
= 0;
1624 LIST_HEAD(page_list
);
1626 /* scan the node segment */
1627 last_offset
= sbi
->blocks_per_seg
;
1628 addr
= START_BLOCK(sbi
, segno
);
1629 sum_entry
= &sum
->entries
[0];
1631 for (i
= 0; i
< last_offset
; i
+= nrpages
, addr
+= nrpages
) {
1632 nrpages
= min(last_offset
- i
, bio_blocks
);
1634 /* read ahead node pages */
1635 err
= ra_sum_pages(sbi
, &page_list
, addr
, nrpages
);
1639 list_for_each_entry_safe(page
, tmp
, &page_list
, lru
) {
1642 if (unlikely(!PageUptodate(page
))) {
1645 rn
= F2FS_NODE(page
);
1646 sum_entry
->nid
= rn
->footer
.nid
;
1647 sum_entry
->version
= 0;
1648 sum_entry
->ofs_in_node
= 0;
1652 list_del(&page
->lru
);
1654 __free_pages(page
, 0);
1660 static bool flush_nats_in_journal(struct f2fs_sb_info
*sbi
)
1662 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1663 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1664 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1667 mutex_lock(&curseg
->curseg_mutex
);
1669 if (nats_in_cursum(sum
) < NAT_JOURNAL_ENTRIES
) {
1670 mutex_unlock(&curseg
->curseg_mutex
);
1674 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1675 struct nat_entry
*ne
;
1676 struct f2fs_nat_entry raw_ne
;
1677 nid_t nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1679 raw_ne
= nat_in_journal(sum
, i
);
1681 write_lock(&nm_i
->nat_tree_lock
);
1682 ne
= __lookup_nat_cache(nm_i
, nid
);
1684 __set_nat_cache_dirty(nm_i
, ne
);
1685 write_unlock(&nm_i
->nat_tree_lock
);
1688 ne
= grab_nat_entry(nm_i
, nid
);
1690 write_unlock(&nm_i
->nat_tree_lock
);
1693 nat_set_blkaddr(ne
, le32_to_cpu(raw_ne
.block_addr
));
1694 nat_set_ino(ne
, le32_to_cpu(raw_ne
.ino
));
1695 nat_set_version(ne
, raw_ne
.version
);
1696 __set_nat_cache_dirty(nm_i
, ne
);
1697 write_unlock(&nm_i
->nat_tree_lock
);
1699 update_nats_in_cursum(sum
, -i
);
1700 mutex_unlock(&curseg
->curseg_mutex
);
1705 * This function is called during the checkpointing process.
1707 void flush_nat_entries(struct f2fs_sb_info
*sbi
)
1709 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1710 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1711 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1712 struct list_head
*cur
, *n
;
1713 struct page
*page
= NULL
;
1714 struct f2fs_nat_block
*nat_blk
= NULL
;
1715 nid_t start_nid
= 0, end_nid
= 0;
1718 flushed
= flush_nats_in_journal(sbi
);
1721 mutex_lock(&curseg
->curseg_mutex
);
1723 /* 1) flush dirty nat caches */
1724 list_for_each_safe(cur
, n
, &nm_i
->dirty_nat_entries
) {
1725 struct nat_entry
*ne
;
1727 struct f2fs_nat_entry raw_ne
;
1729 block_t new_blkaddr
;
1731 ne
= list_entry(cur
, struct nat_entry
, list
);
1732 nid
= nat_get_nid(ne
);
1734 if (nat_get_blkaddr(ne
) == NEW_ADDR
)
1739 /* if there is room for nat enries in curseg->sumpage */
1740 offset
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 1);
1742 raw_ne
= nat_in_journal(sum
, offset
);
1746 if (!page
|| (start_nid
> nid
|| nid
> end_nid
)) {
1748 f2fs_put_page(page
, 1);
1751 start_nid
= START_NID(nid
);
1752 end_nid
= start_nid
+ NAT_ENTRY_PER_BLOCK
- 1;
1755 * get nat block with dirty flag, increased reference
1756 * count, mapped and lock
1758 page
= get_next_nat_page(sbi
, start_nid
);
1759 nat_blk
= page_address(page
);
1762 f2fs_bug_on(!nat_blk
);
1763 raw_ne
= nat_blk
->entries
[nid
- start_nid
];
1765 new_blkaddr
= nat_get_blkaddr(ne
);
1767 raw_ne
.ino
= cpu_to_le32(nat_get_ino(ne
));
1768 raw_ne
.block_addr
= cpu_to_le32(new_blkaddr
);
1769 raw_ne
.version
= nat_get_version(ne
);
1772 nat_blk
->entries
[nid
- start_nid
] = raw_ne
;
1774 nat_in_journal(sum
, offset
) = raw_ne
;
1775 nid_in_journal(sum
, offset
) = cpu_to_le32(nid
);
1778 if (nat_get_blkaddr(ne
) == NULL_ADDR
&&
1779 add_free_nid(NM_I(sbi
), nid
, false) <= 0) {
1780 write_lock(&nm_i
->nat_tree_lock
);
1781 __del_from_nat_cache(nm_i
, ne
);
1782 write_unlock(&nm_i
->nat_tree_lock
);
1784 write_lock(&nm_i
->nat_tree_lock
);
1785 __clear_nat_cache_dirty(nm_i
, ne
);
1786 ne
->checkpointed
= true;
1787 write_unlock(&nm_i
->nat_tree_lock
);
1791 mutex_unlock(&curseg
->curseg_mutex
);
1792 f2fs_put_page(page
, 1);
1794 /* 2) shrink nat caches if necessary */
1795 try_to_free_nats(sbi
, nm_i
->nat_cnt
- NM_WOUT_THRESHOLD
);
1798 static int init_node_manager(struct f2fs_sb_info
*sbi
)
1800 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
1801 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1802 unsigned char *version_bitmap
;
1803 unsigned int nat_segs
, nat_blocks
;
1805 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
1807 /* segment_count_nat includes pair segment so divide to 2. */
1808 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
1809 nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
1810 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nat_blocks
;
1814 INIT_LIST_HEAD(&nm_i
->free_nid_list
);
1815 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_ATOMIC
);
1816 INIT_LIST_HEAD(&nm_i
->nat_entries
);
1817 INIT_LIST_HEAD(&nm_i
->dirty_nat_entries
);
1819 mutex_init(&nm_i
->build_lock
);
1820 spin_lock_init(&nm_i
->free_nid_list_lock
);
1821 rwlock_init(&nm_i
->nat_tree_lock
);
1823 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
1824 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
1825 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
1826 if (!version_bitmap
)
1829 nm_i
->nat_bitmap
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
1831 if (!nm_i
->nat_bitmap
)
1836 int build_node_manager(struct f2fs_sb_info
*sbi
)
1840 sbi
->nm_info
= kzalloc(sizeof(struct f2fs_nm_info
), GFP_KERNEL
);
1844 err
= init_node_manager(sbi
);
1848 build_free_nids(sbi
);
1852 void destroy_node_manager(struct f2fs_sb_info
*sbi
)
1854 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1855 struct free_nid
*i
, *next_i
;
1856 struct nat_entry
*natvec
[NATVEC_SIZE
];
1863 /* destroy free nid list */
1864 spin_lock(&nm_i
->free_nid_list_lock
);
1865 list_for_each_entry_safe(i
, next_i
, &nm_i
->free_nid_list
, list
) {
1866 f2fs_bug_on(i
->state
== NID_ALLOC
);
1867 __del_from_free_nid_list(i
);
1870 f2fs_bug_on(nm_i
->fcnt
);
1871 spin_unlock(&nm_i
->free_nid_list_lock
);
1873 /* destroy nat cache */
1874 write_lock(&nm_i
->nat_tree_lock
);
1875 while ((found
= __gang_lookup_nat_cache(nm_i
,
1876 nid
, NATVEC_SIZE
, natvec
))) {
1878 for (idx
= 0; idx
< found
; idx
++) {
1879 struct nat_entry
*e
= natvec
[idx
];
1880 nid
= nat_get_nid(e
) + 1;
1881 __del_from_nat_cache(nm_i
, e
);
1884 f2fs_bug_on(nm_i
->nat_cnt
);
1885 write_unlock(&nm_i
->nat_tree_lock
);
1887 kfree(nm_i
->nat_bitmap
);
1888 sbi
->nm_info
= NULL
;
1892 int __init
create_node_manager_caches(void)
1894 nat_entry_slab
= f2fs_kmem_cache_create("nat_entry",
1895 sizeof(struct nat_entry
), NULL
);
1896 if (!nat_entry_slab
)
1899 free_nid_slab
= f2fs_kmem_cache_create("free_nid",
1900 sizeof(struct free_nid
), NULL
);
1901 if (!free_nid_slab
) {
1902 kmem_cache_destroy(nat_entry_slab
);
1908 void destroy_node_manager_caches(void)
1910 kmem_cache_destroy(free_nid_slab
);
1911 kmem_cache_destroy(nat_entry_slab
);