1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/backing-dev.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
21 #include <trace/events/f2fs.h>
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
25 static struct kmem_cache
*nat_entry_slab
;
26 static struct kmem_cache
*free_nid_slab
;
27 static struct kmem_cache
*nat_entry_set_slab
;
28 static struct kmem_cache
*fsync_node_entry_slab
;
31 * Check whether the given nid is within node id range.
33 int f2fs_check_nid_range(struct f2fs_sb_info
*sbi
, nid_t nid
)
35 if (unlikely(nid
< F2FS_ROOT_INO(sbi
) || nid
>= NM_I(sbi
)->max_nid
)) {
36 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
37 f2fs_warn(sbi
, "%s: out-of-range nid=%x, run fsck to fix.",
44 bool f2fs_available_free_memory(struct f2fs_sb_info
*sbi
, int type
)
46 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
48 unsigned long avail_ram
;
49 unsigned long mem_size
= 0;
54 /* only uses low memory */
55 avail_ram
= val
.totalram
- val
.totalhigh
;
58 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
60 if (type
== FREE_NIDS
) {
61 mem_size
= (nm_i
->nid_cnt
[FREE_NID
] *
62 sizeof(struct free_nid
)) >> PAGE_SHIFT
;
63 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
64 } else if (type
== NAT_ENTRIES
) {
65 mem_size
= (nm_i
->nat_cnt
[TOTAL_NAT
] *
66 sizeof(struct nat_entry
)) >> PAGE_SHIFT
;
67 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
68 if (excess_cached_nats(sbi
))
70 } else if (type
== DIRTY_DENTS
) {
71 if (sbi
->sb
->s_bdi
->wb
.dirty_exceeded
)
73 mem_size
= get_pages(sbi
, F2FS_DIRTY_DENTS
);
74 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
75 } else if (type
== INO_ENTRIES
) {
78 for (i
= 0; i
< MAX_INO_ENTRY
; i
++)
79 mem_size
+= sbi
->im
[i
].ino_num
*
80 sizeof(struct ino_entry
);
81 mem_size
>>= PAGE_SHIFT
;
82 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
83 } else if (type
== EXTENT_CACHE
) {
84 mem_size
= (atomic_read(&sbi
->total_ext_tree
) *
85 sizeof(struct extent_tree
) +
86 atomic_read(&sbi
->total_ext_node
) *
87 sizeof(struct extent_node
)) >> PAGE_SHIFT
;
88 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
89 } else if (type
== INMEM_PAGES
) {
90 /* it allows 20% / total_ram for inmemory pages */
91 mem_size
= get_pages(sbi
, F2FS_INMEM_PAGES
);
92 res
= mem_size
< (val
.totalram
/ 5);
94 if (!sbi
->sb
->s_bdi
->wb
.dirty_exceeded
)
100 static void clear_node_page_dirty(struct page
*page
)
102 if (PageDirty(page
)) {
103 f2fs_clear_page_cache_dirty_tag(page
);
104 clear_page_dirty_for_io(page
);
105 dec_page_count(F2FS_P_SB(page
), F2FS_DIRTY_NODES
);
107 ClearPageUptodate(page
);
110 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
112 return f2fs_get_meta_page_retry(sbi
, current_nat_addr(sbi
, nid
));
115 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
117 struct page
*src_page
;
118 struct page
*dst_page
;
122 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
124 dst_off
= next_nat_addr(sbi
, current_nat_addr(sbi
, nid
));
126 /* get current nat block page with lock */
127 src_page
= get_current_nat_page(sbi
, nid
);
128 if (IS_ERR(src_page
))
130 dst_page
= f2fs_grab_meta_page(sbi
, dst_off
);
131 f2fs_bug_on(sbi
, PageDirty(src_page
));
133 src_addr
= page_address(src_page
);
134 dst_addr
= page_address(dst_page
);
135 memcpy(dst_addr
, src_addr
, PAGE_SIZE
);
136 set_page_dirty(dst_page
);
137 f2fs_put_page(src_page
, 1);
139 set_to_next_nat(nm_i
, nid
);
144 static struct nat_entry
*__alloc_nat_entry(nid_t nid
, bool no_fail
)
146 struct nat_entry
*new;
149 new = f2fs_kmem_cache_alloc(nat_entry_slab
, GFP_F2FS_ZERO
);
151 new = kmem_cache_alloc(nat_entry_slab
, GFP_F2FS_ZERO
);
153 nat_set_nid(new, nid
);
159 static void __free_nat_entry(struct nat_entry
*e
)
161 kmem_cache_free(nat_entry_slab
, e
);
164 /* must be locked by nat_tree_lock */
165 static struct nat_entry
*__init_nat_entry(struct f2fs_nm_info
*nm_i
,
166 struct nat_entry
*ne
, struct f2fs_nat_entry
*raw_ne
, bool no_fail
)
169 f2fs_radix_tree_insert(&nm_i
->nat_root
, nat_get_nid(ne
), ne
);
170 else if (radix_tree_insert(&nm_i
->nat_root
, nat_get_nid(ne
), ne
))
174 node_info_from_raw_nat(&ne
->ni
, raw_ne
);
176 spin_lock(&nm_i
->nat_list_lock
);
177 list_add_tail(&ne
->list
, &nm_i
->nat_entries
);
178 spin_unlock(&nm_i
->nat_list_lock
);
180 nm_i
->nat_cnt
[TOTAL_NAT
]++;
181 nm_i
->nat_cnt
[RECLAIMABLE_NAT
]++;
185 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
187 struct nat_entry
*ne
;
189 ne
= radix_tree_lookup(&nm_i
->nat_root
, n
);
191 /* for recent accessed nat entry, move it to tail of lru list */
192 if (ne
&& !get_nat_flag(ne
, IS_DIRTY
)) {
193 spin_lock(&nm_i
->nat_list_lock
);
194 if (!list_empty(&ne
->list
))
195 list_move_tail(&ne
->list
, &nm_i
->nat_entries
);
196 spin_unlock(&nm_i
->nat_list_lock
);
202 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
203 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
205 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
208 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
210 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
211 nm_i
->nat_cnt
[TOTAL_NAT
]--;
212 nm_i
->nat_cnt
[RECLAIMABLE_NAT
]--;
216 static struct nat_entry_set
*__grab_nat_entry_set(struct f2fs_nm_info
*nm_i
,
217 struct nat_entry
*ne
)
219 nid_t set
= NAT_BLOCK_OFFSET(ne
->ni
.nid
);
220 struct nat_entry_set
*head
;
222 head
= radix_tree_lookup(&nm_i
->nat_set_root
, set
);
224 head
= f2fs_kmem_cache_alloc(nat_entry_set_slab
, GFP_NOFS
);
226 INIT_LIST_HEAD(&head
->entry_list
);
227 INIT_LIST_HEAD(&head
->set_list
);
230 f2fs_radix_tree_insert(&nm_i
->nat_set_root
, set
, head
);
235 static void __set_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
236 struct nat_entry
*ne
)
238 struct nat_entry_set
*head
;
239 bool new_ne
= nat_get_blkaddr(ne
) == NEW_ADDR
;
242 head
= __grab_nat_entry_set(nm_i
, ne
);
245 * update entry_cnt in below condition:
246 * 1. update NEW_ADDR to valid block address;
247 * 2. update old block address to new one;
249 if (!new_ne
&& (get_nat_flag(ne
, IS_PREALLOC
) ||
250 !get_nat_flag(ne
, IS_DIRTY
)))
253 set_nat_flag(ne
, IS_PREALLOC
, new_ne
);
255 if (get_nat_flag(ne
, IS_DIRTY
))
258 nm_i
->nat_cnt
[DIRTY_NAT
]++;
259 nm_i
->nat_cnt
[RECLAIMABLE_NAT
]--;
260 set_nat_flag(ne
, IS_DIRTY
, true);
262 spin_lock(&nm_i
->nat_list_lock
);
264 list_del_init(&ne
->list
);
266 list_move_tail(&ne
->list
, &head
->entry_list
);
267 spin_unlock(&nm_i
->nat_list_lock
);
270 static void __clear_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
271 struct nat_entry_set
*set
, struct nat_entry
*ne
)
273 spin_lock(&nm_i
->nat_list_lock
);
274 list_move_tail(&ne
->list
, &nm_i
->nat_entries
);
275 spin_unlock(&nm_i
->nat_list_lock
);
277 set_nat_flag(ne
, IS_DIRTY
, false);
279 nm_i
->nat_cnt
[DIRTY_NAT
]--;
280 nm_i
->nat_cnt
[RECLAIMABLE_NAT
]++;
283 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info
*nm_i
,
284 nid_t start
, unsigned int nr
, struct nat_entry_set
**ep
)
286 return radix_tree_gang_lookup(&nm_i
->nat_set_root
, (void **)ep
,
290 bool f2fs_in_warm_node_list(struct f2fs_sb_info
*sbi
, struct page
*page
)
292 return NODE_MAPPING(sbi
) == page
->mapping
&&
293 IS_DNODE(page
) && is_cold_node(page
);
296 void f2fs_init_fsync_node_info(struct f2fs_sb_info
*sbi
)
298 spin_lock_init(&sbi
->fsync_node_lock
);
299 INIT_LIST_HEAD(&sbi
->fsync_node_list
);
300 sbi
->fsync_seg_id
= 0;
301 sbi
->fsync_node_num
= 0;
304 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info
*sbi
,
307 struct fsync_node_entry
*fn
;
311 fn
= f2fs_kmem_cache_alloc(fsync_node_entry_slab
, GFP_NOFS
);
315 INIT_LIST_HEAD(&fn
->list
);
317 spin_lock_irqsave(&sbi
->fsync_node_lock
, flags
);
318 list_add_tail(&fn
->list
, &sbi
->fsync_node_list
);
319 fn
->seq_id
= sbi
->fsync_seg_id
++;
321 sbi
->fsync_node_num
++;
322 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
327 void f2fs_del_fsync_node_entry(struct f2fs_sb_info
*sbi
, struct page
*page
)
329 struct fsync_node_entry
*fn
;
332 spin_lock_irqsave(&sbi
->fsync_node_lock
, flags
);
333 list_for_each_entry(fn
, &sbi
->fsync_node_list
, list
) {
334 if (fn
->page
== page
) {
336 sbi
->fsync_node_num
--;
337 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
338 kmem_cache_free(fsync_node_entry_slab
, fn
);
343 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
347 void f2fs_reset_fsync_node_info(struct f2fs_sb_info
*sbi
)
351 spin_lock_irqsave(&sbi
->fsync_node_lock
, flags
);
352 sbi
->fsync_seg_id
= 0;
353 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
356 int f2fs_need_dentry_mark(struct f2fs_sb_info
*sbi
, nid_t nid
)
358 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
362 down_read(&nm_i
->nat_tree_lock
);
363 e
= __lookup_nat_cache(nm_i
, nid
);
365 if (!get_nat_flag(e
, IS_CHECKPOINTED
) &&
366 !get_nat_flag(e
, HAS_FSYNCED_INODE
))
369 up_read(&nm_i
->nat_tree_lock
);
373 bool f2fs_is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
375 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
379 down_read(&nm_i
->nat_tree_lock
);
380 e
= __lookup_nat_cache(nm_i
, nid
);
381 if (e
&& !get_nat_flag(e
, IS_CHECKPOINTED
))
383 up_read(&nm_i
->nat_tree_lock
);
387 bool f2fs_need_inode_block_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
389 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
391 bool need_update
= true;
393 down_read(&nm_i
->nat_tree_lock
);
394 e
= __lookup_nat_cache(nm_i
, ino
);
395 if (e
&& get_nat_flag(e
, HAS_LAST_FSYNC
) &&
396 (get_nat_flag(e
, IS_CHECKPOINTED
) ||
397 get_nat_flag(e
, HAS_FSYNCED_INODE
)))
399 up_read(&nm_i
->nat_tree_lock
);
403 /* must be locked by nat_tree_lock */
404 static void cache_nat_entry(struct f2fs_sb_info
*sbi
, nid_t nid
,
405 struct f2fs_nat_entry
*ne
)
407 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
408 struct nat_entry
*new, *e
;
410 new = __alloc_nat_entry(nid
, false);
414 down_write(&nm_i
->nat_tree_lock
);
415 e
= __lookup_nat_cache(nm_i
, nid
);
417 e
= __init_nat_entry(nm_i
, new, ne
, false);
419 f2fs_bug_on(sbi
, nat_get_ino(e
) != le32_to_cpu(ne
->ino
) ||
420 nat_get_blkaddr(e
) !=
421 le32_to_cpu(ne
->block_addr
) ||
422 nat_get_version(e
) != ne
->version
);
423 up_write(&nm_i
->nat_tree_lock
);
425 __free_nat_entry(new);
428 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
429 block_t new_blkaddr
, bool fsync_done
)
431 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
433 struct nat_entry
*new = __alloc_nat_entry(ni
->nid
, true);
435 down_write(&nm_i
->nat_tree_lock
);
436 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
438 e
= __init_nat_entry(nm_i
, new, NULL
, true);
439 copy_node_info(&e
->ni
, ni
);
440 f2fs_bug_on(sbi
, ni
->blk_addr
== NEW_ADDR
);
441 } else if (new_blkaddr
== NEW_ADDR
) {
443 * when nid is reallocated,
444 * previous nat entry can be remained in nat cache.
445 * So, reinitialize it with new information.
447 copy_node_info(&e
->ni
, ni
);
448 f2fs_bug_on(sbi
, ni
->blk_addr
!= NULL_ADDR
);
450 /* let's free early to reduce memory consumption */
452 __free_nat_entry(new);
455 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) != ni
->blk_addr
);
456 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NULL_ADDR
&&
457 new_blkaddr
== NULL_ADDR
);
458 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NEW_ADDR
&&
459 new_blkaddr
== NEW_ADDR
);
460 f2fs_bug_on(sbi
, __is_valid_data_blkaddr(nat_get_blkaddr(e
)) &&
461 new_blkaddr
== NEW_ADDR
);
463 /* increment version no as node is removed */
464 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
465 unsigned char version
= nat_get_version(e
);
466 nat_set_version(e
, inc_node_version(version
));
470 nat_set_blkaddr(e
, new_blkaddr
);
471 if (!__is_valid_data_blkaddr(new_blkaddr
))
472 set_nat_flag(e
, IS_CHECKPOINTED
, false);
473 __set_nat_cache_dirty(nm_i
, e
);
475 /* update fsync_mark if its inode nat entry is still alive */
476 if (ni
->nid
!= ni
->ino
)
477 e
= __lookup_nat_cache(nm_i
, ni
->ino
);
479 if (fsync_done
&& ni
->nid
== ni
->ino
)
480 set_nat_flag(e
, HAS_FSYNCED_INODE
, true);
481 set_nat_flag(e
, HAS_LAST_FSYNC
, fsync_done
);
483 up_write(&nm_i
->nat_tree_lock
);
486 int f2fs_try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
488 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
491 if (!down_write_trylock(&nm_i
->nat_tree_lock
))
494 spin_lock(&nm_i
->nat_list_lock
);
496 struct nat_entry
*ne
;
498 if (list_empty(&nm_i
->nat_entries
))
501 ne
= list_first_entry(&nm_i
->nat_entries
,
502 struct nat_entry
, list
);
504 spin_unlock(&nm_i
->nat_list_lock
);
506 __del_from_nat_cache(nm_i
, ne
);
509 spin_lock(&nm_i
->nat_list_lock
);
511 spin_unlock(&nm_i
->nat_list_lock
);
513 up_write(&nm_i
->nat_tree_lock
);
514 return nr
- nr_shrink
;
517 int f2fs_get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
,
518 struct node_info
*ni
)
520 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
521 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
522 struct f2fs_journal
*journal
= curseg
->journal
;
523 nid_t start_nid
= START_NID(nid
);
524 struct f2fs_nat_block
*nat_blk
;
525 struct page
*page
= NULL
;
526 struct f2fs_nat_entry ne
;
534 /* Check nat cache */
535 down_read(&nm_i
->nat_tree_lock
);
536 e
= __lookup_nat_cache(nm_i
, nid
);
538 ni
->ino
= nat_get_ino(e
);
539 ni
->blk_addr
= nat_get_blkaddr(e
);
540 ni
->version
= nat_get_version(e
);
541 up_read(&nm_i
->nat_tree_lock
);
545 memset(&ne
, 0, sizeof(struct f2fs_nat_entry
));
547 /* Check current segment summary */
548 down_read(&curseg
->journal_rwsem
);
549 i
= f2fs_lookup_journal_in_cursum(journal
, NAT_JOURNAL
, nid
, 0);
551 ne
= nat_in_journal(journal
, i
);
552 node_info_from_raw_nat(ni
, &ne
);
554 up_read(&curseg
->journal_rwsem
);
556 up_read(&nm_i
->nat_tree_lock
);
560 /* Fill node_info from nat page */
561 index
= current_nat_addr(sbi
, nid
);
562 up_read(&nm_i
->nat_tree_lock
);
564 page
= f2fs_get_meta_page(sbi
, index
);
566 return PTR_ERR(page
);
568 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
569 ne
= nat_blk
->entries
[nid
- start_nid
];
570 node_info_from_raw_nat(ni
, &ne
);
571 f2fs_put_page(page
, 1);
573 blkaddr
= le32_to_cpu(ne
.block_addr
);
574 if (__is_valid_data_blkaddr(blkaddr
) &&
575 !f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC_ENHANCE
))
578 /* cache nat entry */
579 cache_nat_entry(sbi
, nid
, &ne
);
584 * readahead MAX_RA_NODE number of node pages.
586 static void f2fs_ra_node_pages(struct page
*parent
, int start
, int n
)
588 struct f2fs_sb_info
*sbi
= F2FS_P_SB(parent
);
589 struct blk_plug plug
;
593 blk_start_plug(&plug
);
595 /* Then, try readahead for siblings of the desired node */
597 end
= min(end
, NIDS_PER_BLOCK
);
598 for (i
= start
; i
< end
; i
++) {
599 nid
= get_nid(parent
, i
, false);
600 f2fs_ra_node_page(sbi
, nid
);
603 blk_finish_plug(&plug
);
606 pgoff_t
f2fs_get_next_page_offset(struct dnode_of_data
*dn
, pgoff_t pgofs
)
608 const long direct_index
= ADDRS_PER_INODE(dn
->inode
);
609 const long direct_blks
= ADDRS_PER_BLOCK(dn
->inode
);
610 const long indirect_blks
= ADDRS_PER_BLOCK(dn
->inode
) * NIDS_PER_BLOCK
;
611 unsigned int skipped_unit
= ADDRS_PER_BLOCK(dn
->inode
);
612 int cur_level
= dn
->cur_level
;
613 int max_level
= dn
->max_level
;
619 while (max_level
-- > cur_level
)
620 skipped_unit
*= NIDS_PER_BLOCK
;
622 switch (dn
->max_level
) {
624 base
+= 2 * indirect_blks
;
627 base
+= 2 * direct_blks
;
630 base
+= direct_index
;
633 f2fs_bug_on(F2FS_I_SB(dn
->inode
), 1);
636 return ((pgofs
- base
) / skipped_unit
+ 1) * skipped_unit
+ base
;
640 * The maximum depth is four.
641 * Offset[0] will have raw inode offset.
643 static int get_node_path(struct inode
*inode
, long block
,
644 int offset
[4], unsigned int noffset
[4])
646 const long direct_index
= ADDRS_PER_INODE(inode
);
647 const long direct_blks
= ADDRS_PER_BLOCK(inode
);
648 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
649 const long indirect_blks
= ADDRS_PER_BLOCK(inode
) * NIDS_PER_BLOCK
;
650 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
656 if (block
< direct_index
) {
660 block
-= direct_index
;
661 if (block
< direct_blks
) {
662 offset
[n
++] = NODE_DIR1_BLOCK
;
668 block
-= direct_blks
;
669 if (block
< direct_blks
) {
670 offset
[n
++] = NODE_DIR2_BLOCK
;
676 block
-= direct_blks
;
677 if (block
< indirect_blks
) {
678 offset
[n
++] = NODE_IND1_BLOCK
;
680 offset
[n
++] = block
/ direct_blks
;
681 noffset
[n
] = 4 + offset
[n
- 1];
682 offset
[n
] = block
% direct_blks
;
686 block
-= indirect_blks
;
687 if (block
< indirect_blks
) {
688 offset
[n
++] = NODE_IND2_BLOCK
;
689 noffset
[n
] = 4 + dptrs_per_blk
;
690 offset
[n
++] = block
/ direct_blks
;
691 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
692 offset
[n
] = block
% direct_blks
;
696 block
-= indirect_blks
;
697 if (block
< dindirect_blks
) {
698 offset
[n
++] = NODE_DIND_BLOCK
;
699 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
700 offset
[n
++] = block
/ indirect_blks
;
701 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
702 offset
[n
- 1] * (dptrs_per_blk
+ 1);
703 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
704 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
705 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
707 offset
[n
] = block
% direct_blks
;
718 * Caller should call f2fs_put_dnode(dn).
719 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
720 * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
722 int f2fs_get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
724 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
725 struct page
*npage
[4];
726 struct page
*parent
= NULL
;
728 unsigned int noffset
[4];
733 level
= get_node_path(dn
->inode
, index
, offset
, noffset
);
737 nids
[0] = dn
->inode
->i_ino
;
738 npage
[0] = dn
->inode_page
;
741 npage
[0] = f2fs_get_node_page(sbi
, nids
[0]);
742 if (IS_ERR(npage
[0]))
743 return PTR_ERR(npage
[0]);
746 /* if inline_data is set, should not report any block indices */
747 if (f2fs_has_inline_data(dn
->inode
) && index
) {
749 f2fs_put_page(npage
[0], 1);
755 nids
[1] = get_nid(parent
, offset
[0], true);
756 dn
->inode_page
= npage
[0];
757 dn
->inode_page_locked
= true;
759 /* get indirect or direct nodes */
760 for (i
= 1; i
<= level
; i
++) {
763 if (!nids
[i
] && mode
== ALLOC_NODE
) {
765 if (!f2fs_alloc_nid(sbi
, &(nids
[i
]))) {
771 npage
[i
] = f2fs_new_node_page(dn
, noffset
[i
]);
772 if (IS_ERR(npage
[i
])) {
773 f2fs_alloc_nid_failed(sbi
, nids
[i
]);
774 err
= PTR_ERR(npage
[i
]);
778 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
779 f2fs_alloc_nid_done(sbi
, nids
[i
]);
781 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
782 npage
[i
] = f2fs_get_node_page_ra(parent
, offset
[i
- 1]);
783 if (IS_ERR(npage
[i
])) {
784 err
= PTR_ERR(npage
[i
]);
790 dn
->inode_page_locked
= false;
793 f2fs_put_page(parent
, 1);
797 npage
[i
] = f2fs_get_node_page(sbi
, nids
[i
]);
798 if (IS_ERR(npage
[i
])) {
799 err
= PTR_ERR(npage
[i
]);
800 f2fs_put_page(npage
[0], 0);
806 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
809 dn
->nid
= nids
[level
];
810 dn
->ofs_in_node
= offset
[level
];
811 dn
->node_page
= npage
[level
];
812 dn
->data_blkaddr
= f2fs_data_blkaddr(dn
);
816 f2fs_put_page(parent
, 1);
818 f2fs_put_page(npage
[0], 0);
820 dn
->inode_page
= NULL
;
821 dn
->node_page
= NULL
;
822 if (err
== -ENOENT
) {
824 dn
->max_level
= level
;
825 dn
->ofs_in_node
= offset
[level
];
830 static int truncate_node(struct dnode_of_data
*dn
)
832 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
837 err
= f2fs_get_node_info(sbi
, dn
->nid
, &ni
);
841 /* Deallocate node address */
842 f2fs_invalidate_blocks(sbi
, ni
.blk_addr
);
843 dec_valid_node_count(sbi
, dn
->inode
, dn
->nid
== dn
->inode
->i_ino
);
844 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
846 if (dn
->nid
== dn
->inode
->i_ino
) {
847 f2fs_remove_orphan_inode(sbi
, dn
->nid
);
848 dec_valid_inode_count(sbi
);
849 f2fs_inode_synced(dn
->inode
);
852 clear_node_page_dirty(dn
->node_page
);
853 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
855 index
= dn
->node_page
->index
;
856 f2fs_put_page(dn
->node_page
, 1);
858 invalidate_mapping_pages(NODE_MAPPING(sbi
),
861 dn
->node_page
= NULL
;
862 trace_f2fs_truncate_node(dn
->inode
, dn
->nid
, ni
.blk_addr
);
867 static int truncate_dnode(struct dnode_of_data
*dn
)
875 /* get direct node */
876 page
= f2fs_get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
877 if (PTR_ERR(page
) == -ENOENT
)
879 else if (IS_ERR(page
))
880 return PTR_ERR(page
);
882 /* Make dnode_of_data for parameter */
883 dn
->node_page
= page
;
885 f2fs_truncate_data_blocks(dn
);
886 err
= truncate_node(dn
);
893 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
896 struct dnode_of_data rdn
= *dn
;
898 struct f2fs_node
*rn
;
900 unsigned int child_nofs
;
905 return NIDS_PER_BLOCK
+ 1;
907 trace_f2fs_truncate_nodes_enter(dn
->inode
, dn
->nid
, dn
->data_blkaddr
);
909 page
= f2fs_get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
911 trace_f2fs_truncate_nodes_exit(dn
->inode
, PTR_ERR(page
));
912 return PTR_ERR(page
);
915 f2fs_ra_node_pages(page
, ofs
, NIDS_PER_BLOCK
);
917 rn
= F2FS_NODE(page
);
919 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
920 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
924 ret
= truncate_dnode(&rdn
);
927 if (set_nid(page
, i
, 0, false))
928 dn
->node_changed
= true;
931 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
932 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
933 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
934 if (child_nid
== 0) {
935 child_nofs
+= NIDS_PER_BLOCK
+ 1;
939 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
940 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
941 if (set_nid(page
, i
, 0, false))
942 dn
->node_changed
= true;
944 } else if (ret
< 0 && ret
!= -ENOENT
) {
952 /* remove current indirect node */
953 dn
->node_page
= page
;
954 ret
= truncate_node(dn
);
959 f2fs_put_page(page
, 1);
961 trace_f2fs_truncate_nodes_exit(dn
->inode
, freed
);
965 f2fs_put_page(page
, 1);
966 trace_f2fs_truncate_nodes_exit(dn
->inode
, ret
);
970 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
971 struct f2fs_inode
*ri
, int *offset
, int depth
)
973 struct page
*pages
[2];
980 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
984 /* get indirect nodes in the path */
985 for (i
= 0; i
< idx
+ 1; i
++) {
986 /* reference count'll be increased */
987 pages
[i
] = f2fs_get_node_page(F2FS_I_SB(dn
->inode
), nid
[i
]);
988 if (IS_ERR(pages
[i
])) {
989 err
= PTR_ERR(pages
[i
]);
993 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
996 f2fs_ra_node_pages(pages
[idx
], offset
[idx
+ 1], NIDS_PER_BLOCK
);
998 /* free direct nodes linked to a partial indirect node */
999 for (i
= offset
[idx
+ 1]; i
< NIDS_PER_BLOCK
; i
++) {
1000 child_nid
= get_nid(pages
[idx
], i
, false);
1003 dn
->nid
= child_nid
;
1004 err
= truncate_dnode(dn
);
1007 if (set_nid(pages
[idx
], i
, 0, false))
1008 dn
->node_changed
= true;
1011 if (offset
[idx
+ 1] == 0) {
1012 dn
->node_page
= pages
[idx
];
1014 err
= truncate_node(dn
);
1018 f2fs_put_page(pages
[idx
], 1);
1021 offset
[idx
+ 1] = 0;
1024 for (i
= idx
; i
>= 0; i
--)
1025 f2fs_put_page(pages
[i
], 1);
1027 trace_f2fs_truncate_partial_nodes(dn
->inode
, nid
, depth
, err
);
1033 * All the block addresses of data and nodes should be nullified.
1035 int f2fs_truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
1037 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1038 int err
= 0, cont
= 1;
1039 int level
, offset
[4], noffset
[4];
1040 unsigned int nofs
= 0;
1041 struct f2fs_inode
*ri
;
1042 struct dnode_of_data dn
;
1045 trace_f2fs_truncate_inode_blocks_enter(inode
, from
);
1047 level
= get_node_path(inode
, from
, offset
, noffset
);
1049 trace_f2fs_truncate_inode_blocks_exit(inode
, level
);
1053 page
= f2fs_get_node_page(sbi
, inode
->i_ino
);
1055 trace_f2fs_truncate_inode_blocks_exit(inode
, PTR_ERR(page
));
1056 return PTR_ERR(page
);
1059 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
1062 ri
= F2FS_INODE(page
);
1070 if (!offset
[level
- 1])
1072 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
1073 if (err
< 0 && err
!= -ENOENT
)
1075 nofs
+= 1 + NIDS_PER_BLOCK
;
1078 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
1079 if (!offset
[level
- 1])
1081 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
1082 if (err
< 0 && err
!= -ENOENT
)
1091 dn
.nid
= le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
1092 switch (offset
[0]) {
1093 case NODE_DIR1_BLOCK
:
1094 case NODE_DIR2_BLOCK
:
1095 err
= truncate_dnode(&dn
);
1098 case NODE_IND1_BLOCK
:
1099 case NODE_IND2_BLOCK
:
1100 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
1103 case NODE_DIND_BLOCK
:
1104 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
1111 if (err
< 0 && err
!= -ENOENT
)
1113 if (offset
[1] == 0 &&
1114 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
1116 BUG_ON(page
->mapping
!= NODE_MAPPING(sbi
));
1117 f2fs_wait_on_page_writeback(page
, NODE
, true, true);
1118 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
1119 set_page_dirty(page
);
1127 f2fs_put_page(page
, 0);
1128 trace_f2fs_truncate_inode_blocks_exit(inode
, err
);
1129 return err
> 0 ? 0 : err
;
1132 /* caller must lock inode page */
1133 int f2fs_truncate_xattr_node(struct inode
*inode
)
1135 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1136 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
1137 struct dnode_of_data dn
;
1144 npage
= f2fs_get_node_page(sbi
, nid
);
1146 return PTR_ERR(npage
);
1148 set_new_dnode(&dn
, inode
, NULL
, npage
, nid
);
1149 err
= truncate_node(&dn
);
1151 f2fs_put_page(npage
, 1);
1155 f2fs_i_xnid_write(inode
, 0);
1161 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1164 int f2fs_remove_inode_page(struct inode
*inode
)
1166 struct dnode_of_data dn
;
1169 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
1170 err
= f2fs_get_dnode_of_data(&dn
, 0, LOOKUP_NODE
);
1174 err
= f2fs_truncate_xattr_node(inode
);
1176 f2fs_put_dnode(&dn
);
1180 /* remove potential inline_data blocks */
1181 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1182 S_ISLNK(inode
->i_mode
))
1183 f2fs_truncate_data_blocks_range(&dn
, 1);
1185 /* 0 is possible, after f2fs_new_inode() has failed */
1186 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
)))) {
1187 f2fs_put_dnode(&dn
);
1191 if (unlikely(inode
->i_blocks
!= 0 && inode
->i_blocks
!= 8)) {
1192 f2fs_warn(F2FS_I_SB(inode
),
1193 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1194 inode
->i_ino
, (unsigned long long)inode
->i_blocks
);
1195 set_sbi_flag(F2FS_I_SB(inode
), SBI_NEED_FSCK
);
1198 /* will put inode & node pages */
1199 err
= truncate_node(&dn
);
1201 f2fs_put_dnode(&dn
);
1207 struct page
*f2fs_new_inode_page(struct inode
*inode
)
1209 struct dnode_of_data dn
;
1211 /* allocate inode page for new inode */
1212 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
1214 /* caller should f2fs_put_page(page, 1); */
1215 return f2fs_new_node_page(&dn
, 0);
1218 struct page
*f2fs_new_node_page(struct dnode_of_data
*dn
, unsigned int ofs
)
1220 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1221 struct node_info new_ni
;
1225 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
1226 return ERR_PTR(-EPERM
);
1228 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), dn
->nid
, false);
1230 return ERR_PTR(-ENOMEM
);
1232 if (unlikely((err
= inc_valid_node_count(sbi
, dn
->inode
, !ofs
))))
1235 #ifdef CONFIG_F2FS_CHECK_FS
1236 err
= f2fs_get_node_info(sbi
, dn
->nid
, &new_ni
);
1238 dec_valid_node_count(sbi
, dn
->inode
, !ofs
);
1241 f2fs_bug_on(sbi
, new_ni
.blk_addr
!= NULL_ADDR
);
1243 new_ni
.nid
= dn
->nid
;
1244 new_ni
.ino
= dn
->inode
->i_ino
;
1245 new_ni
.blk_addr
= NULL_ADDR
;
1248 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
1250 f2fs_wait_on_page_writeback(page
, NODE
, true, true);
1251 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
1252 set_cold_node(page
, S_ISDIR(dn
->inode
->i_mode
));
1253 if (!PageUptodate(page
))
1254 SetPageUptodate(page
);
1255 if (set_page_dirty(page
))
1256 dn
->node_changed
= true;
1258 if (f2fs_has_xattr_block(ofs
))
1259 f2fs_i_xnid_write(dn
->inode
, dn
->nid
);
1262 inc_valid_inode_count(sbi
);
1266 clear_node_page_dirty(page
);
1267 f2fs_put_page(page
, 1);
1268 return ERR_PTR(err
);
1272 * Caller should do after getting the following values.
1273 * 0: f2fs_put_page(page, 0)
1274 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1276 static int read_node_page(struct page
*page
, int op_flags
)
1278 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1279 struct node_info ni
;
1280 struct f2fs_io_info fio
= {
1284 .op_flags
= op_flags
,
1286 .encrypted_page
= NULL
,
1290 if (PageUptodate(page
)) {
1291 if (!f2fs_inode_chksum_verify(sbi
, page
)) {
1292 ClearPageUptodate(page
);
1298 err
= f2fs_get_node_info(sbi
, page
->index
, &ni
);
1302 if (unlikely(ni
.blk_addr
== NULL_ADDR
) ||
1303 is_sbi_flag_set(sbi
, SBI_IS_SHUTDOWN
)) {
1304 ClearPageUptodate(page
);
1308 fio
.new_blkaddr
= fio
.old_blkaddr
= ni
.blk_addr
;
1310 err
= f2fs_submit_page_bio(&fio
);
1313 f2fs_update_iostat(sbi
, FS_NODE_READ_IO
, F2FS_BLKSIZE
);
1319 * Readahead a node page
1321 void f2fs_ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
1328 if (f2fs_check_nid_range(sbi
, nid
))
1331 apage
= xa_load(&NODE_MAPPING(sbi
)->i_pages
, nid
);
1335 apage
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), nid
, false);
1339 err
= read_node_page(apage
, REQ_RAHEAD
);
1340 f2fs_put_page(apage
, err
? 1 : 0);
1343 static struct page
*__get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
,
1344 struct page
*parent
, int start
)
1350 return ERR_PTR(-ENOENT
);
1351 if (f2fs_check_nid_range(sbi
, nid
))
1352 return ERR_PTR(-EINVAL
);
1354 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), nid
, false);
1356 return ERR_PTR(-ENOMEM
);
1358 err
= read_node_page(page
, 0);
1360 f2fs_put_page(page
, 1);
1361 return ERR_PTR(err
);
1362 } else if (err
== LOCKED_PAGE
) {
1368 f2fs_ra_node_pages(parent
, start
+ 1, MAX_RA_NODE
);
1372 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1373 f2fs_put_page(page
, 1);
1377 if (unlikely(!PageUptodate(page
))) {
1382 if (!f2fs_inode_chksum_verify(sbi
, page
)) {
1387 if(unlikely(nid
!= nid_of_node(page
))) {
1388 f2fs_warn(sbi
, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1389 nid
, nid_of_node(page
), ino_of_node(page
),
1390 ofs_of_node(page
), cpver_of_node(page
),
1391 next_blkaddr_of_node(page
));
1394 ClearPageUptodate(page
);
1395 f2fs_put_page(page
, 1);
1396 return ERR_PTR(err
);
1401 struct page
*f2fs_get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
1403 return __get_node_page(sbi
, nid
, NULL
, 0);
1406 struct page
*f2fs_get_node_page_ra(struct page
*parent
, int start
)
1408 struct f2fs_sb_info
*sbi
= F2FS_P_SB(parent
);
1409 nid_t nid
= get_nid(parent
, start
, false);
1411 return __get_node_page(sbi
, nid
, parent
, start
);
1414 static void flush_inline_data(struct f2fs_sb_info
*sbi
, nid_t ino
)
1416 struct inode
*inode
;
1420 /* should flush inline_data before evict_inode */
1421 inode
= ilookup(sbi
->sb
, ino
);
1425 page
= f2fs_pagecache_get_page(inode
->i_mapping
, 0,
1426 FGP_LOCK
|FGP_NOWAIT
, 0);
1430 if (!PageUptodate(page
))
1433 if (!PageDirty(page
))
1436 if (!clear_page_dirty_for_io(page
))
1439 ret
= f2fs_write_inline_data(inode
, page
);
1440 inode_dec_dirty_pages(inode
);
1441 f2fs_remove_dirty_inode(inode
);
1443 set_page_dirty(page
);
1445 f2fs_put_page(page
, 1);
1450 static struct page
*last_fsync_dnode(struct f2fs_sb_info
*sbi
, nid_t ino
)
1453 struct pagevec pvec
;
1454 struct page
*last_page
= NULL
;
1457 pagevec_init(&pvec
);
1460 while ((nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1461 PAGECACHE_TAG_DIRTY
))) {
1464 for (i
= 0; i
< nr_pages
; i
++) {
1465 struct page
*page
= pvec
.pages
[i
];
1467 if (unlikely(f2fs_cp_error(sbi
))) {
1468 f2fs_put_page(last_page
, 0);
1469 pagevec_release(&pvec
);
1470 return ERR_PTR(-EIO
);
1473 if (!IS_DNODE(page
) || !is_cold_node(page
))
1475 if (ino_of_node(page
) != ino
)
1480 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1485 if (ino_of_node(page
) != ino
)
1486 goto continue_unlock
;
1488 if (!PageDirty(page
)) {
1489 /* someone wrote it for us */
1490 goto continue_unlock
;
1494 f2fs_put_page(last_page
, 0);
1500 pagevec_release(&pvec
);
1506 static int __write_node_page(struct page
*page
, bool atomic
, bool *submitted
,
1507 struct writeback_control
*wbc
, bool do_balance
,
1508 enum iostat_type io_type
, unsigned int *seq_id
)
1510 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1512 struct node_info ni
;
1513 struct f2fs_io_info fio
= {
1515 .ino
= ino_of_node(page
),
1518 .op_flags
= wbc_to_write_flags(wbc
),
1520 .encrypted_page
= NULL
,
1527 trace_f2fs_writepage(page
, NODE
);
1529 if (unlikely(f2fs_cp_error(sbi
))) {
1530 if (is_sbi_flag_set(sbi
, SBI_IS_CLOSE
)) {
1531 ClearPageUptodate(page
);
1532 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1539 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1542 if (!is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
1543 wbc
->sync_mode
== WB_SYNC_NONE
&&
1544 IS_DNODE(page
) && is_cold_node(page
))
1547 /* get old block addr of this node page */
1548 nid
= nid_of_node(page
);
1549 f2fs_bug_on(sbi
, page
->index
!= nid
);
1551 if (f2fs_get_node_info(sbi
, nid
, &ni
))
1554 if (wbc
->for_reclaim
) {
1555 if (!down_read_trylock(&sbi
->node_write
))
1558 down_read(&sbi
->node_write
);
1561 /* This page is already truncated */
1562 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1563 ClearPageUptodate(page
);
1564 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1565 up_read(&sbi
->node_write
);
1570 if (__is_valid_data_blkaddr(ni
.blk_addr
) &&
1571 !f2fs_is_valid_blkaddr(sbi
, ni
.blk_addr
,
1572 DATA_GENERIC_ENHANCE
)) {
1573 up_read(&sbi
->node_write
);
1577 if (atomic
&& !test_opt(sbi
, NOBARRIER
))
1578 fio
.op_flags
|= REQ_PREFLUSH
| REQ_FUA
;
1580 /* should add to global list before clearing PAGECACHE status */
1581 if (f2fs_in_warm_node_list(sbi
, page
)) {
1582 seq
= f2fs_add_fsync_node_entry(sbi
, page
);
1587 set_page_writeback(page
);
1588 ClearPageError(page
);
1590 fio
.old_blkaddr
= ni
.blk_addr
;
1591 f2fs_do_write_node_page(nid
, &fio
);
1592 set_node_addr(sbi
, &ni
, fio
.new_blkaddr
, is_fsync_dnode(page
));
1593 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1594 up_read(&sbi
->node_write
);
1596 if (wbc
->for_reclaim
) {
1597 f2fs_submit_merged_write_cond(sbi
, NULL
, page
, 0, NODE
);
1603 if (unlikely(f2fs_cp_error(sbi
))) {
1604 f2fs_submit_merged_write(sbi
, NODE
);
1608 *submitted
= fio
.submitted
;
1611 f2fs_balance_fs(sbi
, false);
1615 redirty_page_for_writepage(wbc
, page
);
1616 return AOP_WRITEPAGE_ACTIVATE
;
1619 int f2fs_move_node_page(struct page
*node_page
, int gc_type
)
1623 if (gc_type
== FG_GC
) {
1624 struct writeback_control wbc
= {
1625 .sync_mode
= WB_SYNC_ALL
,
1630 f2fs_wait_on_page_writeback(node_page
, NODE
, true, true);
1632 set_page_dirty(node_page
);
1634 if (!clear_page_dirty_for_io(node_page
)) {
1639 if (__write_node_page(node_page
, false, NULL
,
1640 &wbc
, false, FS_GC_NODE_IO
, NULL
)) {
1642 unlock_page(node_page
);
1646 /* set page dirty and write it */
1647 if (!PageWriteback(node_page
))
1648 set_page_dirty(node_page
);
1651 unlock_page(node_page
);
1653 f2fs_put_page(node_page
, 0);
1657 static int f2fs_write_node_page(struct page
*page
,
1658 struct writeback_control
*wbc
)
1660 return __write_node_page(page
, false, NULL
, wbc
, false,
1664 int f2fs_fsync_node_pages(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
1665 struct writeback_control
*wbc
, bool atomic
,
1666 unsigned int *seq_id
)
1669 struct pagevec pvec
;
1671 struct page
*last_page
= NULL
;
1672 bool marked
= false;
1673 nid_t ino
= inode
->i_ino
;
1678 last_page
= last_fsync_dnode(sbi
, ino
);
1679 if (IS_ERR_OR_NULL(last_page
))
1680 return PTR_ERR_OR_ZERO(last_page
);
1683 pagevec_init(&pvec
);
1686 while ((nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1687 PAGECACHE_TAG_DIRTY
))) {
1690 for (i
= 0; i
< nr_pages
; i
++) {
1691 struct page
*page
= pvec
.pages
[i
];
1692 bool submitted
= false;
1694 if (unlikely(f2fs_cp_error(sbi
))) {
1695 f2fs_put_page(last_page
, 0);
1696 pagevec_release(&pvec
);
1701 if (!IS_DNODE(page
) || !is_cold_node(page
))
1703 if (ino_of_node(page
) != ino
)
1708 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1713 if (ino_of_node(page
) != ino
)
1714 goto continue_unlock
;
1716 if (!PageDirty(page
) && page
!= last_page
) {
1717 /* someone wrote it for us */
1718 goto continue_unlock
;
1721 f2fs_wait_on_page_writeback(page
, NODE
, true, true);
1723 set_fsync_mark(page
, 0);
1724 set_dentry_mark(page
, 0);
1726 if (!atomic
|| page
== last_page
) {
1727 set_fsync_mark(page
, 1);
1728 if (IS_INODE(page
)) {
1729 if (is_inode_flag_set(inode
,
1731 f2fs_update_inode(inode
, page
);
1732 set_dentry_mark(page
,
1733 f2fs_need_dentry_mark(sbi
, ino
));
1735 /* may be written by other thread */
1736 if (!PageDirty(page
))
1737 set_page_dirty(page
);
1740 if (!clear_page_dirty_for_io(page
))
1741 goto continue_unlock
;
1743 ret
= __write_node_page(page
, atomic
&&
1745 &submitted
, wbc
, true,
1746 FS_NODE_IO
, seq_id
);
1749 f2fs_put_page(last_page
, 0);
1751 } else if (submitted
) {
1755 if (page
== last_page
) {
1756 f2fs_put_page(page
, 0);
1761 pagevec_release(&pvec
);
1767 if (!ret
&& atomic
&& !marked
) {
1768 f2fs_debug(sbi
, "Retry to write fsync mark: ino=%u, idx=%lx",
1769 ino
, last_page
->index
);
1770 lock_page(last_page
);
1771 f2fs_wait_on_page_writeback(last_page
, NODE
, true, true);
1772 set_page_dirty(last_page
);
1773 unlock_page(last_page
);
1778 f2fs_submit_merged_write_cond(sbi
, NULL
, NULL
, ino
, NODE
);
1779 return ret
? -EIO
: 0;
1782 static int f2fs_match_ino(struct inode
*inode
, unsigned long ino
, void *data
)
1784 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1787 if (inode
->i_ino
!= ino
)
1790 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
))
1793 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
1794 clean
= list_empty(&F2FS_I(inode
)->gdirty_list
);
1795 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1800 inode
= igrab(inode
);
1806 static bool flush_dirty_inode(struct page
*page
)
1808 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1809 struct inode
*inode
;
1810 nid_t ino
= ino_of_node(page
);
1812 inode
= find_inode_nowait(sbi
->sb
, ino
, f2fs_match_ino
, NULL
);
1816 f2fs_update_inode(inode
, page
);
1823 void f2fs_flush_inline_data(struct f2fs_sb_info
*sbi
)
1826 struct pagevec pvec
;
1829 pagevec_init(&pvec
);
1831 while ((nr_pages
= pagevec_lookup_tag(&pvec
,
1832 NODE_MAPPING(sbi
), &index
, PAGECACHE_TAG_DIRTY
))) {
1835 for (i
= 0; i
< nr_pages
; i
++) {
1836 struct page
*page
= pvec
.pages
[i
];
1838 if (!IS_DNODE(page
))
1843 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1849 if (!PageDirty(page
)) {
1850 /* someone wrote it for us */
1851 goto continue_unlock
;
1854 /* flush inline_data, if it's async context. */
1855 if (is_inline_node(page
)) {
1856 clear_inline_node(page
);
1858 flush_inline_data(sbi
, ino_of_node(page
));
1863 pagevec_release(&pvec
);
1868 int f2fs_sync_node_pages(struct f2fs_sb_info
*sbi
,
1869 struct writeback_control
*wbc
,
1870 bool do_balance
, enum iostat_type io_type
)
1873 struct pagevec pvec
;
1877 int nr_pages
, done
= 0;
1879 pagevec_init(&pvec
);
1884 while (!done
&& (nr_pages
= pagevec_lookup_tag(&pvec
,
1885 NODE_MAPPING(sbi
), &index
, PAGECACHE_TAG_DIRTY
))) {
1888 for (i
= 0; i
< nr_pages
; i
++) {
1889 struct page
*page
= pvec
.pages
[i
];
1890 bool submitted
= false;
1891 bool may_dirty
= true;
1893 /* give a priority to WB_SYNC threads */
1894 if (atomic_read(&sbi
->wb_sync_req
[NODE
]) &&
1895 wbc
->sync_mode
== WB_SYNC_NONE
) {
1901 * flushing sequence with step:
1906 if (step
== 0 && IS_DNODE(page
))
1908 if (step
== 1 && (!IS_DNODE(page
) ||
1909 is_cold_node(page
)))
1911 if (step
== 2 && (!IS_DNODE(page
) ||
1912 !is_cold_node(page
)))
1915 if (wbc
->sync_mode
== WB_SYNC_ALL
)
1917 else if (!trylock_page(page
))
1920 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1926 if (!PageDirty(page
)) {
1927 /* someone wrote it for us */
1928 goto continue_unlock
;
1931 /* flush inline_data/inode, if it's async context. */
1935 /* flush inline_data */
1936 if (is_inline_node(page
)) {
1937 clear_inline_node(page
);
1939 flush_inline_data(sbi
, ino_of_node(page
));
1943 /* flush dirty inode */
1944 if (IS_INODE(page
) && may_dirty
) {
1946 if (flush_dirty_inode(page
))
1950 f2fs_wait_on_page_writeback(page
, NODE
, true, true);
1952 if (!clear_page_dirty_for_io(page
))
1953 goto continue_unlock
;
1955 set_fsync_mark(page
, 0);
1956 set_dentry_mark(page
, 0);
1958 ret
= __write_node_page(page
, false, &submitted
,
1959 wbc
, do_balance
, io_type
, NULL
);
1965 if (--wbc
->nr_to_write
== 0)
1968 pagevec_release(&pvec
);
1971 if (wbc
->nr_to_write
== 0) {
1978 if (!is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
1979 wbc
->sync_mode
== WB_SYNC_NONE
&& step
== 1)
1986 f2fs_submit_merged_write(sbi
, NODE
);
1988 if (unlikely(f2fs_cp_error(sbi
)))
1993 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info
*sbi
,
1994 unsigned int seq_id
)
1996 struct fsync_node_entry
*fn
;
1998 struct list_head
*head
= &sbi
->fsync_node_list
;
1999 unsigned long flags
;
2000 unsigned int cur_seq_id
= 0;
2003 while (seq_id
&& cur_seq_id
< seq_id
) {
2004 spin_lock_irqsave(&sbi
->fsync_node_lock
, flags
);
2005 if (list_empty(head
)) {
2006 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
2009 fn
= list_first_entry(head
, struct fsync_node_entry
, list
);
2010 if (fn
->seq_id
> seq_id
) {
2011 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
2014 cur_seq_id
= fn
->seq_id
;
2017 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
2019 f2fs_wait_on_page_writeback(page
, NODE
, true, false);
2020 if (TestClearPageError(page
))
2029 ret2
= filemap_check_errors(NODE_MAPPING(sbi
));
2036 static int f2fs_write_node_pages(struct address_space
*mapping
,
2037 struct writeback_control
*wbc
)
2039 struct f2fs_sb_info
*sbi
= F2FS_M_SB(mapping
);
2040 struct blk_plug plug
;
2043 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
2046 /* balancing f2fs's metadata in background */
2047 f2fs_balance_fs_bg(sbi
, true);
2049 /* collect a number of dirty node pages and write together */
2050 if (wbc
->sync_mode
!= WB_SYNC_ALL
&&
2051 get_pages(sbi
, F2FS_DIRTY_NODES
) <
2052 nr_pages_to_skip(sbi
, NODE
))
2055 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2056 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
2057 else if (atomic_read(&sbi
->wb_sync_req
[NODE
]))
2060 trace_f2fs_writepages(mapping
->host
, wbc
, NODE
);
2062 diff
= nr_pages_to_write(sbi
, NODE
, wbc
);
2063 blk_start_plug(&plug
);
2064 f2fs_sync_node_pages(sbi
, wbc
, true, FS_NODE_IO
);
2065 blk_finish_plug(&plug
);
2066 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
2068 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2069 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
2073 wbc
->pages_skipped
+= get_pages(sbi
, F2FS_DIRTY_NODES
);
2074 trace_f2fs_writepages(mapping
->host
, wbc
, NODE
);
2078 static int f2fs_set_node_page_dirty(struct page
*page
)
2080 trace_f2fs_set_page_dirty(page
, NODE
);
2082 if (!PageUptodate(page
))
2083 SetPageUptodate(page
);
2084 #ifdef CONFIG_F2FS_CHECK_FS
2086 f2fs_inode_chksum_set(F2FS_P_SB(page
), page
);
2088 if (!PageDirty(page
)) {
2089 __set_page_dirty_nobuffers(page
);
2090 inc_page_count(F2FS_P_SB(page
), F2FS_DIRTY_NODES
);
2091 f2fs_set_page_private(page
, 0);
2092 f2fs_trace_pid(page
);
2099 * Structure of the f2fs node operations
2101 const struct address_space_operations f2fs_node_aops
= {
2102 .writepage
= f2fs_write_node_page
,
2103 .writepages
= f2fs_write_node_pages
,
2104 .set_page_dirty
= f2fs_set_node_page_dirty
,
2105 .invalidatepage
= f2fs_invalidate_page
,
2106 .releasepage
= f2fs_release_page
,
2107 #ifdef CONFIG_MIGRATION
2108 .migratepage
= f2fs_migrate_page
,
2112 static struct free_nid
*__lookup_free_nid_list(struct f2fs_nm_info
*nm_i
,
2115 return radix_tree_lookup(&nm_i
->free_nid_root
, n
);
2118 static int __insert_free_nid(struct f2fs_sb_info
*sbi
,
2121 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2123 int err
= radix_tree_insert(&nm_i
->free_nid_root
, i
->nid
, i
);
2127 nm_i
->nid_cnt
[FREE_NID
]++;
2128 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
2132 static void __remove_free_nid(struct f2fs_sb_info
*sbi
,
2133 struct free_nid
*i
, enum nid_state state
)
2135 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2137 f2fs_bug_on(sbi
, state
!= i
->state
);
2138 nm_i
->nid_cnt
[state
]--;
2139 if (state
== FREE_NID
)
2141 radix_tree_delete(&nm_i
->free_nid_root
, i
->nid
);
2144 static void __move_free_nid(struct f2fs_sb_info
*sbi
, struct free_nid
*i
,
2145 enum nid_state org_state
, enum nid_state dst_state
)
2147 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2149 f2fs_bug_on(sbi
, org_state
!= i
->state
);
2150 i
->state
= dst_state
;
2151 nm_i
->nid_cnt
[org_state
]--;
2152 nm_i
->nid_cnt
[dst_state
]++;
2154 switch (dst_state
) {
2159 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
2166 static void update_free_nid_bitmap(struct f2fs_sb_info
*sbi
, nid_t nid
,
2167 bool set
, bool build
)
2169 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2170 unsigned int nat_ofs
= NAT_BLOCK_OFFSET(nid
);
2171 unsigned int nid_ofs
= nid
- START_NID(nid
);
2173 if (!test_bit_le(nat_ofs
, nm_i
->nat_block_bitmap
))
2177 if (test_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]))
2179 __set_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]);
2180 nm_i
->free_nid_count
[nat_ofs
]++;
2182 if (!test_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]))
2184 __clear_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]);
2186 nm_i
->free_nid_count
[nat_ofs
]--;
2190 /* return if the nid is recognized as free */
2191 static bool add_free_nid(struct f2fs_sb_info
*sbi
,
2192 nid_t nid
, bool build
, bool update
)
2194 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2195 struct free_nid
*i
, *e
;
2196 struct nat_entry
*ne
;
2200 /* 0 nid should not be used */
2201 if (unlikely(nid
== 0))
2204 if (unlikely(f2fs_check_nid_range(sbi
, nid
)))
2207 i
= f2fs_kmem_cache_alloc(free_nid_slab
, GFP_NOFS
);
2209 i
->state
= FREE_NID
;
2211 radix_tree_preload(GFP_NOFS
| __GFP_NOFAIL
);
2213 spin_lock(&nm_i
->nid_list_lock
);
2221 * - __insert_nid_to_list(PREALLOC_NID)
2222 * - f2fs_balance_fs_bg
2223 * - f2fs_build_free_nids
2224 * - __f2fs_build_free_nids
2227 * - __lookup_nat_cache
2229 * - f2fs_init_inode_metadata
2230 * - f2fs_new_inode_page
2231 * - f2fs_new_node_page
2233 * - f2fs_alloc_nid_done
2234 * - __remove_nid_from_list(PREALLOC_NID)
2235 * - __insert_nid_to_list(FREE_NID)
2237 ne
= __lookup_nat_cache(nm_i
, nid
);
2238 if (ne
&& (!get_nat_flag(ne
, IS_CHECKPOINTED
) ||
2239 nat_get_blkaddr(ne
) != NULL_ADDR
))
2242 e
= __lookup_free_nid_list(nm_i
, nid
);
2244 if (e
->state
== FREE_NID
)
2250 err
= __insert_free_nid(sbi
, i
);
2253 update_free_nid_bitmap(sbi
, nid
, ret
, build
);
2255 nm_i
->available_nids
++;
2257 spin_unlock(&nm_i
->nid_list_lock
);
2258 radix_tree_preload_end();
2261 kmem_cache_free(free_nid_slab
, i
);
2265 static void remove_free_nid(struct f2fs_sb_info
*sbi
, nid_t nid
)
2267 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2269 bool need_free
= false;
2271 spin_lock(&nm_i
->nid_list_lock
);
2272 i
= __lookup_free_nid_list(nm_i
, nid
);
2273 if (i
&& i
->state
== FREE_NID
) {
2274 __remove_free_nid(sbi
, i
, FREE_NID
);
2277 spin_unlock(&nm_i
->nid_list_lock
);
2280 kmem_cache_free(free_nid_slab
, i
);
2283 static int scan_nat_page(struct f2fs_sb_info
*sbi
,
2284 struct page
*nat_page
, nid_t start_nid
)
2286 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2287 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
2289 unsigned int nat_ofs
= NAT_BLOCK_OFFSET(start_nid
);
2292 __set_bit_le(nat_ofs
, nm_i
->nat_block_bitmap
);
2294 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
2296 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
2297 if (unlikely(start_nid
>= nm_i
->max_nid
))
2300 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
2302 if (blk_addr
== NEW_ADDR
)
2305 if (blk_addr
== NULL_ADDR
) {
2306 add_free_nid(sbi
, start_nid
, true, true);
2308 spin_lock(&NM_I(sbi
)->nid_list_lock
);
2309 update_free_nid_bitmap(sbi
, start_nid
, false, true);
2310 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
2317 static void scan_curseg_cache(struct f2fs_sb_info
*sbi
)
2319 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2320 struct f2fs_journal
*journal
= curseg
->journal
;
2323 down_read(&curseg
->journal_rwsem
);
2324 for (i
= 0; i
< nats_in_cursum(journal
); i
++) {
2328 addr
= le32_to_cpu(nat_in_journal(journal
, i
).block_addr
);
2329 nid
= le32_to_cpu(nid_in_journal(journal
, i
));
2330 if (addr
== NULL_ADDR
)
2331 add_free_nid(sbi
, nid
, true, false);
2333 remove_free_nid(sbi
, nid
);
2335 up_read(&curseg
->journal_rwsem
);
2338 static void scan_free_nid_bits(struct f2fs_sb_info
*sbi
)
2340 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2341 unsigned int i
, idx
;
2344 down_read(&nm_i
->nat_tree_lock
);
2346 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
2347 if (!test_bit_le(i
, nm_i
->nat_block_bitmap
))
2349 if (!nm_i
->free_nid_count
[i
])
2351 for (idx
= 0; idx
< NAT_ENTRY_PER_BLOCK
; idx
++) {
2352 idx
= find_next_bit_le(nm_i
->free_nid_bitmap
[i
],
2353 NAT_ENTRY_PER_BLOCK
, idx
);
2354 if (idx
>= NAT_ENTRY_PER_BLOCK
)
2357 nid
= i
* NAT_ENTRY_PER_BLOCK
+ idx
;
2358 add_free_nid(sbi
, nid
, true, false);
2360 if (nm_i
->nid_cnt
[FREE_NID
] >= MAX_FREE_NIDS
)
2365 scan_curseg_cache(sbi
);
2367 up_read(&nm_i
->nat_tree_lock
);
2370 static int __f2fs_build_free_nids(struct f2fs_sb_info
*sbi
,
2371 bool sync
, bool mount
)
2373 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2375 nid_t nid
= nm_i
->next_scan_nid
;
2377 if (unlikely(nid
>= nm_i
->max_nid
))
2380 if (unlikely(nid
% NAT_ENTRY_PER_BLOCK
))
2381 nid
= NAT_BLOCK_OFFSET(nid
) * NAT_ENTRY_PER_BLOCK
;
2383 /* Enough entries */
2384 if (nm_i
->nid_cnt
[FREE_NID
] >= NAT_ENTRY_PER_BLOCK
)
2387 if (!sync
&& !f2fs_available_free_memory(sbi
, FREE_NIDS
))
2391 /* try to find free nids in free_nid_bitmap */
2392 scan_free_nid_bits(sbi
);
2394 if (nm_i
->nid_cnt
[FREE_NID
] >= NAT_ENTRY_PER_BLOCK
)
2398 /* readahead nat pages to be scanned */
2399 f2fs_ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), FREE_NID_PAGES
,
2402 down_read(&nm_i
->nat_tree_lock
);
2405 if (!test_bit_le(NAT_BLOCK_OFFSET(nid
),
2406 nm_i
->nat_block_bitmap
)) {
2407 struct page
*page
= get_current_nat_page(sbi
, nid
);
2410 ret
= PTR_ERR(page
);
2412 ret
= scan_nat_page(sbi
, page
, nid
);
2413 f2fs_put_page(page
, 1);
2417 up_read(&nm_i
->nat_tree_lock
);
2418 f2fs_err(sbi
, "NAT is corrupt, run fsck to fix it");
2423 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
2424 if (unlikely(nid
>= nm_i
->max_nid
))
2427 if (++i
>= FREE_NID_PAGES
)
2431 /* go to the next free nat pages to find free nids abundantly */
2432 nm_i
->next_scan_nid
= nid
;
2434 /* find free nids from current sum_pages */
2435 scan_curseg_cache(sbi
);
2437 up_read(&nm_i
->nat_tree_lock
);
2439 f2fs_ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nm_i
->next_scan_nid
),
2440 nm_i
->ra_nid_pages
, META_NAT
, false);
2445 int f2fs_build_free_nids(struct f2fs_sb_info
*sbi
, bool sync
, bool mount
)
2449 mutex_lock(&NM_I(sbi
)->build_lock
);
2450 ret
= __f2fs_build_free_nids(sbi
, sync
, mount
);
2451 mutex_unlock(&NM_I(sbi
)->build_lock
);
2457 * If this function returns success, caller can obtain a new nid
2458 * from second parameter of this function.
2459 * The returned nid could be used ino as well as nid when inode is created.
2461 bool f2fs_alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
2463 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2464 struct free_nid
*i
= NULL
;
2466 if (time_to_inject(sbi
, FAULT_ALLOC_NID
)) {
2467 f2fs_show_injection_info(sbi
, FAULT_ALLOC_NID
);
2471 spin_lock(&nm_i
->nid_list_lock
);
2473 if (unlikely(nm_i
->available_nids
== 0)) {
2474 spin_unlock(&nm_i
->nid_list_lock
);
2478 /* We should not use stale free nids created by f2fs_build_free_nids */
2479 if (nm_i
->nid_cnt
[FREE_NID
] && !on_f2fs_build_free_nids(nm_i
)) {
2480 f2fs_bug_on(sbi
, list_empty(&nm_i
->free_nid_list
));
2481 i
= list_first_entry(&nm_i
->free_nid_list
,
2482 struct free_nid
, list
);
2485 __move_free_nid(sbi
, i
, FREE_NID
, PREALLOC_NID
);
2486 nm_i
->available_nids
--;
2488 update_free_nid_bitmap(sbi
, *nid
, false, false);
2490 spin_unlock(&nm_i
->nid_list_lock
);
2493 spin_unlock(&nm_i
->nid_list_lock
);
2495 /* Let's scan nat pages and its caches to get free nids */
2496 if (!f2fs_build_free_nids(sbi
, true, false))
2502 * f2fs_alloc_nid() should be called prior to this function.
2504 void f2fs_alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
2506 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2509 spin_lock(&nm_i
->nid_list_lock
);
2510 i
= __lookup_free_nid_list(nm_i
, nid
);
2511 f2fs_bug_on(sbi
, !i
);
2512 __remove_free_nid(sbi
, i
, PREALLOC_NID
);
2513 spin_unlock(&nm_i
->nid_list_lock
);
2515 kmem_cache_free(free_nid_slab
, i
);
2519 * f2fs_alloc_nid() should be called prior to this function.
2521 void f2fs_alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
2523 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2525 bool need_free
= false;
2530 spin_lock(&nm_i
->nid_list_lock
);
2531 i
= __lookup_free_nid_list(nm_i
, nid
);
2532 f2fs_bug_on(sbi
, !i
);
2534 if (!f2fs_available_free_memory(sbi
, FREE_NIDS
)) {
2535 __remove_free_nid(sbi
, i
, PREALLOC_NID
);
2538 __move_free_nid(sbi
, i
, PREALLOC_NID
, FREE_NID
);
2541 nm_i
->available_nids
++;
2543 update_free_nid_bitmap(sbi
, nid
, true, false);
2545 spin_unlock(&nm_i
->nid_list_lock
);
2548 kmem_cache_free(free_nid_slab
, i
);
2551 int f2fs_try_to_free_nids(struct f2fs_sb_info
*sbi
, int nr_shrink
)
2553 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2556 if (nm_i
->nid_cnt
[FREE_NID
] <= MAX_FREE_NIDS
)
2559 if (!mutex_trylock(&nm_i
->build_lock
))
2562 while (nr_shrink
&& nm_i
->nid_cnt
[FREE_NID
] > MAX_FREE_NIDS
) {
2563 struct free_nid
*i
, *next
;
2564 unsigned int batch
= SHRINK_NID_BATCH_SIZE
;
2566 spin_lock(&nm_i
->nid_list_lock
);
2567 list_for_each_entry_safe(i
, next
, &nm_i
->free_nid_list
, list
) {
2568 if (!nr_shrink
|| !batch
||
2569 nm_i
->nid_cnt
[FREE_NID
] <= MAX_FREE_NIDS
)
2571 __remove_free_nid(sbi
, i
, FREE_NID
);
2572 kmem_cache_free(free_nid_slab
, i
);
2576 spin_unlock(&nm_i
->nid_list_lock
);
2579 mutex_unlock(&nm_i
->build_lock
);
2581 return nr
- nr_shrink
;
2584 int f2fs_recover_inline_xattr(struct inode
*inode
, struct page
*page
)
2586 void *src_addr
, *dst_addr
;
2589 struct f2fs_inode
*ri
;
2591 ipage
= f2fs_get_node_page(F2FS_I_SB(inode
), inode
->i_ino
);
2593 return PTR_ERR(ipage
);
2595 ri
= F2FS_INODE(page
);
2596 if (ri
->i_inline
& F2FS_INLINE_XATTR
) {
2597 if (!f2fs_has_inline_xattr(inode
)) {
2598 set_inode_flag(inode
, FI_INLINE_XATTR
);
2599 stat_inc_inline_xattr(inode
);
2602 if (f2fs_has_inline_xattr(inode
)) {
2603 stat_dec_inline_xattr(inode
);
2604 clear_inode_flag(inode
, FI_INLINE_XATTR
);
2609 dst_addr
= inline_xattr_addr(inode
, ipage
);
2610 src_addr
= inline_xattr_addr(inode
, page
);
2611 inline_size
= inline_xattr_size(inode
);
2613 f2fs_wait_on_page_writeback(ipage
, NODE
, true, true);
2614 memcpy(dst_addr
, src_addr
, inline_size
);
2616 f2fs_update_inode(inode
, ipage
);
2617 f2fs_put_page(ipage
, 1);
2621 int f2fs_recover_xattr_data(struct inode
*inode
, struct page
*page
)
2623 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2624 nid_t prev_xnid
= F2FS_I(inode
)->i_xattr_nid
;
2626 struct dnode_of_data dn
;
2627 struct node_info ni
;
2634 /* 1: invalidate the previous xattr nid */
2635 err
= f2fs_get_node_info(sbi
, prev_xnid
, &ni
);
2639 f2fs_invalidate_blocks(sbi
, ni
.blk_addr
);
2640 dec_valid_node_count(sbi
, inode
, false);
2641 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
2644 /* 2: update xattr nid in inode */
2645 if (!f2fs_alloc_nid(sbi
, &new_xnid
))
2648 set_new_dnode(&dn
, inode
, NULL
, NULL
, new_xnid
);
2649 xpage
= f2fs_new_node_page(&dn
, XATTR_NODE_OFFSET
);
2650 if (IS_ERR(xpage
)) {
2651 f2fs_alloc_nid_failed(sbi
, new_xnid
);
2652 return PTR_ERR(xpage
);
2655 f2fs_alloc_nid_done(sbi
, new_xnid
);
2656 f2fs_update_inode_page(inode
);
2658 /* 3: update and set xattr node page dirty */
2659 memcpy(F2FS_NODE(xpage
), F2FS_NODE(page
), VALID_XATTR_BLOCK_SIZE
);
2661 set_page_dirty(xpage
);
2662 f2fs_put_page(xpage
, 1);
2667 int f2fs_recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
2669 struct f2fs_inode
*src
, *dst
;
2670 nid_t ino
= ino_of_node(page
);
2671 struct node_info old_ni
, new_ni
;
2675 err
= f2fs_get_node_info(sbi
, ino
, &old_ni
);
2679 if (unlikely(old_ni
.blk_addr
!= NULL_ADDR
))
2682 ipage
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), ino
, false);
2684 congestion_wait(BLK_RW_ASYNC
, DEFAULT_IO_TIMEOUT
);
2688 /* Should not use this inode from free nid list */
2689 remove_free_nid(sbi
, ino
);
2691 if (!PageUptodate(ipage
))
2692 SetPageUptodate(ipage
);
2693 fill_node_footer(ipage
, ino
, ino
, 0, true);
2694 set_cold_node(ipage
, false);
2696 src
= F2FS_INODE(page
);
2697 dst
= F2FS_INODE(ipage
);
2699 memcpy(dst
, src
, (unsigned long)&src
->i_ext
- (unsigned long)src
);
2701 dst
->i_blocks
= cpu_to_le64(1);
2702 dst
->i_links
= cpu_to_le32(1);
2703 dst
->i_xattr_nid
= 0;
2704 dst
->i_inline
= src
->i_inline
& (F2FS_INLINE_XATTR
| F2FS_EXTRA_ATTR
);
2705 if (dst
->i_inline
& F2FS_EXTRA_ATTR
) {
2706 dst
->i_extra_isize
= src
->i_extra_isize
;
2708 if (f2fs_sb_has_flexible_inline_xattr(sbi
) &&
2709 F2FS_FITS_IN_INODE(src
, le16_to_cpu(src
->i_extra_isize
),
2710 i_inline_xattr_size
))
2711 dst
->i_inline_xattr_size
= src
->i_inline_xattr_size
;
2713 if (f2fs_sb_has_project_quota(sbi
) &&
2714 F2FS_FITS_IN_INODE(src
, le16_to_cpu(src
->i_extra_isize
),
2716 dst
->i_projid
= src
->i_projid
;
2718 if (f2fs_sb_has_inode_crtime(sbi
) &&
2719 F2FS_FITS_IN_INODE(src
, le16_to_cpu(src
->i_extra_isize
),
2721 dst
->i_crtime
= src
->i_crtime
;
2722 dst
->i_crtime_nsec
= src
->i_crtime_nsec
;
2729 if (unlikely(inc_valid_node_count(sbi
, NULL
, true)))
2731 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
2732 inc_valid_inode_count(sbi
);
2733 set_page_dirty(ipage
);
2734 f2fs_put_page(ipage
, 1);
2738 int f2fs_restore_node_summary(struct f2fs_sb_info
*sbi
,
2739 unsigned int segno
, struct f2fs_summary_block
*sum
)
2741 struct f2fs_node
*rn
;
2742 struct f2fs_summary
*sum_entry
;
2744 int i
, idx
, last_offset
, nrpages
;
2746 /* scan the node segment */
2747 last_offset
= sbi
->blocks_per_seg
;
2748 addr
= START_BLOCK(sbi
, segno
);
2749 sum_entry
= &sum
->entries
[0];
2751 for (i
= 0; i
< last_offset
; i
+= nrpages
, addr
+= nrpages
) {
2752 nrpages
= min(last_offset
- i
, BIO_MAX_PAGES
);
2754 /* readahead node pages */
2755 f2fs_ra_meta_pages(sbi
, addr
, nrpages
, META_POR
, true);
2757 for (idx
= addr
; idx
< addr
+ nrpages
; idx
++) {
2758 struct page
*page
= f2fs_get_tmp_page(sbi
, idx
);
2761 return PTR_ERR(page
);
2763 rn
= F2FS_NODE(page
);
2764 sum_entry
->nid
= rn
->footer
.nid
;
2765 sum_entry
->version
= 0;
2766 sum_entry
->ofs_in_node
= 0;
2768 f2fs_put_page(page
, 1);
2771 invalidate_mapping_pages(META_MAPPING(sbi
), addr
,
2777 static void remove_nats_in_journal(struct f2fs_sb_info
*sbi
)
2779 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2780 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2781 struct f2fs_journal
*journal
= curseg
->journal
;
2784 down_write(&curseg
->journal_rwsem
);
2785 for (i
= 0; i
< nats_in_cursum(journal
); i
++) {
2786 struct nat_entry
*ne
;
2787 struct f2fs_nat_entry raw_ne
;
2788 nid_t nid
= le32_to_cpu(nid_in_journal(journal
, i
));
2790 raw_ne
= nat_in_journal(journal
, i
);
2792 ne
= __lookup_nat_cache(nm_i
, nid
);
2794 ne
= __alloc_nat_entry(nid
, true);
2795 __init_nat_entry(nm_i
, ne
, &raw_ne
, true);
2799 * if a free nat in journal has not been used after last
2800 * checkpoint, we should remove it from available nids,
2801 * since later we will add it again.
2803 if (!get_nat_flag(ne
, IS_DIRTY
) &&
2804 le32_to_cpu(raw_ne
.block_addr
) == NULL_ADDR
) {
2805 spin_lock(&nm_i
->nid_list_lock
);
2806 nm_i
->available_nids
--;
2807 spin_unlock(&nm_i
->nid_list_lock
);
2810 __set_nat_cache_dirty(nm_i
, ne
);
2812 update_nats_in_cursum(journal
, -i
);
2813 up_write(&curseg
->journal_rwsem
);
2816 static void __adjust_nat_entry_set(struct nat_entry_set
*nes
,
2817 struct list_head
*head
, int max
)
2819 struct nat_entry_set
*cur
;
2821 if (nes
->entry_cnt
>= max
)
2824 list_for_each_entry(cur
, head
, set_list
) {
2825 if (cur
->entry_cnt
>= nes
->entry_cnt
) {
2826 list_add(&nes
->set_list
, cur
->set_list
.prev
);
2831 list_add_tail(&nes
->set_list
, head
);
2834 static void __update_nat_bits(struct f2fs_sb_info
*sbi
, nid_t start_nid
,
2837 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2838 unsigned int nat_index
= start_nid
/ NAT_ENTRY_PER_BLOCK
;
2839 struct f2fs_nat_block
*nat_blk
= page_address(page
);
2843 if (!enabled_nat_bits(sbi
, NULL
))
2846 if (nat_index
== 0) {
2850 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++) {
2851 if (le32_to_cpu(nat_blk
->entries
[i
].block_addr
) != NULL_ADDR
)
2855 __set_bit_le(nat_index
, nm_i
->empty_nat_bits
);
2856 __clear_bit_le(nat_index
, nm_i
->full_nat_bits
);
2860 __clear_bit_le(nat_index
, nm_i
->empty_nat_bits
);
2861 if (valid
== NAT_ENTRY_PER_BLOCK
)
2862 __set_bit_le(nat_index
, nm_i
->full_nat_bits
);
2864 __clear_bit_le(nat_index
, nm_i
->full_nat_bits
);
2867 static int __flush_nat_entry_set(struct f2fs_sb_info
*sbi
,
2868 struct nat_entry_set
*set
, struct cp_control
*cpc
)
2870 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2871 struct f2fs_journal
*journal
= curseg
->journal
;
2872 nid_t start_nid
= set
->set
* NAT_ENTRY_PER_BLOCK
;
2873 bool to_journal
= true;
2874 struct f2fs_nat_block
*nat_blk
;
2875 struct nat_entry
*ne
, *cur
;
2876 struct page
*page
= NULL
;
2879 * there are two steps to flush nat entries:
2880 * #1, flush nat entries to journal in current hot data summary block.
2881 * #2, flush nat entries to nat page.
2883 if (enabled_nat_bits(sbi
, cpc
) ||
2884 !__has_cursum_space(journal
, set
->entry_cnt
, NAT_JOURNAL
))
2888 down_write(&curseg
->journal_rwsem
);
2890 page
= get_next_nat_page(sbi
, start_nid
);
2892 return PTR_ERR(page
);
2894 nat_blk
= page_address(page
);
2895 f2fs_bug_on(sbi
, !nat_blk
);
2898 /* flush dirty nats in nat entry set */
2899 list_for_each_entry_safe(ne
, cur
, &set
->entry_list
, list
) {
2900 struct f2fs_nat_entry
*raw_ne
;
2901 nid_t nid
= nat_get_nid(ne
);
2904 f2fs_bug_on(sbi
, nat_get_blkaddr(ne
) == NEW_ADDR
);
2907 offset
= f2fs_lookup_journal_in_cursum(journal
,
2908 NAT_JOURNAL
, nid
, 1);
2909 f2fs_bug_on(sbi
, offset
< 0);
2910 raw_ne
= &nat_in_journal(journal
, offset
);
2911 nid_in_journal(journal
, offset
) = cpu_to_le32(nid
);
2913 raw_ne
= &nat_blk
->entries
[nid
- start_nid
];
2915 raw_nat_from_node_info(raw_ne
, &ne
->ni
);
2917 __clear_nat_cache_dirty(NM_I(sbi
), set
, ne
);
2918 if (nat_get_blkaddr(ne
) == NULL_ADDR
) {
2919 add_free_nid(sbi
, nid
, false, true);
2921 spin_lock(&NM_I(sbi
)->nid_list_lock
);
2922 update_free_nid_bitmap(sbi
, nid
, false, false);
2923 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
2928 up_write(&curseg
->journal_rwsem
);
2930 __update_nat_bits(sbi
, start_nid
, page
);
2931 f2fs_put_page(page
, 1);
2934 /* Allow dirty nats by node block allocation in write_begin */
2935 if (!set
->entry_cnt
) {
2936 radix_tree_delete(&NM_I(sbi
)->nat_set_root
, set
->set
);
2937 kmem_cache_free(nat_entry_set_slab
, set
);
2943 * This function is called during the checkpointing process.
2945 int f2fs_flush_nat_entries(struct f2fs_sb_info
*sbi
, struct cp_control
*cpc
)
2947 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2948 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2949 struct f2fs_journal
*journal
= curseg
->journal
;
2950 struct nat_entry_set
*setvec
[SETVEC_SIZE
];
2951 struct nat_entry_set
*set
, *tmp
;
2958 * during unmount, let's flush nat_bits before checking
2959 * nat_cnt[DIRTY_NAT].
2961 if (enabled_nat_bits(sbi
, cpc
)) {
2962 down_write(&nm_i
->nat_tree_lock
);
2963 remove_nats_in_journal(sbi
);
2964 up_write(&nm_i
->nat_tree_lock
);
2967 if (!nm_i
->nat_cnt
[DIRTY_NAT
])
2970 down_write(&nm_i
->nat_tree_lock
);
2973 * if there are no enough space in journal to store dirty nat
2974 * entries, remove all entries from journal and merge them
2975 * into nat entry set.
2977 if (enabled_nat_bits(sbi
, cpc
) ||
2978 !__has_cursum_space(journal
,
2979 nm_i
->nat_cnt
[DIRTY_NAT
], NAT_JOURNAL
))
2980 remove_nats_in_journal(sbi
);
2982 while ((found
= __gang_lookup_nat_set(nm_i
,
2983 set_idx
, SETVEC_SIZE
, setvec
))) {
2985 set_idx
= setvec
[found
- 1]->set
+ 1;
2986 for (idx
= 0; idx
< found
; idx
++)
2987 __adjust_nat_entry_set(setvec
[idx
], &sets
,
2988 MAX_NAT_JENTRIES(journal
));
2991 /* flush dirty nats in nat entry set */
2992 list_for_each_entry_safe(set
, tmp
, &sets
, set_list
) {
2993 err
= __flush_nat_entry_set(sbi
, set
, cpc
);
2998 up_write(&nm_i
->nat_tree_lock
);
2999 /* Allow dirty nats by node block allocation in write_begin */
3004 static int __get_nat_bitmaps(struct f2fs_sb_info
*sbi
)
3006 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
3007 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3008 unsigned int nat_bits_bytes
= nm_i
->nat_blocks
/ BITS_PER_BYTE
;
3010 __u64 cp_ver
= cur_cp_version(ckpt
);
3011 block_t nat_bits_addr
;
3013 if (!enabled_nat_bits(sbi
, NULL
))
3016 nm_i
->nat_bits_blocks
= F2FS_BLK_ALIGN((nat_bits_bytes
<< 1) + 8);
3017 nm_i
->nat_bits
= f2fs_kvzalloc(sbi
,
3018 nm_i
->nat_bits_blocks
<< F2FS_BLKSIZE_BITS
, GFP_KERNEL
);
3019 if (!nm_i
->nat_bits
)
3022 nat_bits_addr
= __start_cp_addr(sbi
) + sbi
->blocks_per_seg
-
3023 nm_i
->nat_bits_blocks
;
3024 for (i
= 0; i
< nm_i
->nat_bits_blocks
; i
++) {
3027 page
= f2fs_get_meta_page(sbi
, nat_bits_addr
++);
3029 return PTR_ERR(page
);
3031 memcpy(nm_i
->nat_bits
+ (i
<< F2FS_BLKSIZE_BITS
),
3032 page_address(page
), F2FS_BLKSIZE
);
3033 f2fs_put_page(page
, 1);
3036 cp_ver
|= (cur_cp_crc(ckpt
) << 32);
3037 if (cpu_to_le64(cp_ver
) != *(__le64
*)nm_i
->nat_bits
) {
3038 disable_nat_bits(sbi
, true);
3042 nm_i
->full_nat_bits
= nm_i
->nat_bits
+ 8;
3043 nm_i
->empty_nat_bits
= nm_i
->full_nat_bits
+ nat_bits_bytes
;
3045 f2fs_notice(sbi
, "Found nat_bits in checkpoint");
3049 static inline void load_free_nid_bitmap(struct f2fs_sb_info
*sbi
)
3051 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3053 nid_t nid
, last_nid
;
3055 if (!enabled_nat_bits(sbi
, NULL
))
3058 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
3059 i
= find_next_bit_le(nm_i
->empty_nat_bits
, nm_i
->nat_blocks
, i
);
3060 if (i
>= nm_i
->nat_blocks
)
3063 __set_bit_le(i
, nm_i
->nat_block_bitmap
);
3065 nid
= i
* NAT_ENTRY_PER_BLOCK
;
3066 last_nid
= nid
+ NAT_ENTRY_PER_BLOCK
;
3068 spin_lock(&NM_I(sbi
)->nid_list_lock
);
3069 for (; nid
< last_nid
; nid
++)
3070 update_free_nid_bitmap(sbi
, nid
, true, true);
3071 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
3074 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
3075 i
= find_next_bit_le(nm_i
->full_nat_bits
, nm_i
->nat_blocks
, i
);
3076 if (i
>= nm_i
->nat_blocks
)
3079 __set_bit_le(i
, nm_i
->nat_block_bitmap
);
3083 static int init_node_manager(struct f2fs_sb_info
*sbi
)
3085 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
3086 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3087 unsigned char *version_bitmap
;
3088 unsigned int nat_segs
;
3091 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
3093 /* segment_count_nat includes pair segment so divide to 2. */
3094 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
3095 nm_i
->nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
3096 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nm_i
->nat_blocks
;
3098 /* not used nids: 0, node, meta, (and root counted as valid node) */
3099 nm_i
->available_nids
= nm_i
->max_nid
- sbi
->total_valid_node_count
-
3100 F2FS_RESERVED_NODE_NUM
;
3101 nm_i
->nid_cnt
[FREE_NID
] = 0;
3102 nm_i
->nid_cnt
[PREALLOC_NID
] = 0;
3103 nm_i
->ram_thresh
= DEF_RAM_THRESHOLD
;
3104 nm_i
->ra_nid_pages
= DEF_RA_NID_PAGES
;
3105 nm_i
->dirty_nats_ratio
= DEF_DIRTY_NAT_RATIO_THRESHOLD
;
3107 INIT_RADIX_TREE(&nm_i
->free_nid_root
, GFP_ATOMIC
);
3108 INIT_LIST_HEAD(&nm_i
->free_nid_list
);
3109 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_NOIO
);
3110 INIT_RADIX_TREE(&nm_i
->nat_set_root
, GFP_NOIO
);
3111 INIT_LIST_HEAD(&nm_i
->nat_entries
);
3112 spin_lock_init(&nm_i
->nat_list_lock
);
3114 mutex_init(&nm_i
->build_lock
);
3115 spin_lock_init(&nm_i
->nid_list_lock
);
3116 init_rwsem(&nm_i
->nat_tree_lock
);
3118 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
3119 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
3120 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
3121 nm_i
->nat_bitmap
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
3123 if (!nm_i
->nat_bitmap
)
3126 err
= __get_nat_bitmaps(sbi
);
3130 #ifdef CONFIG_F2FS_CHECK_FS
3131 nm_i
->nat_bitmap_mir
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
3133 if (!nm_i
->nat_bitmap_mir
)
3140 static int init_free_nid_cache(struct f2fs_sb_info
*sbi
)
3142 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3145 nm_i
->free_nid_bitmap
=
3146 f2fs_kvzalloc(sbi
, array_size(sizeof(unsigned char *),
3149 if (!nm_i
->free_nid_bitmap
)
3152 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
3153 nm_i
->free_nid_bitmap
[i
] = f2fs_kvzalloc(sbi
,
3154 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK
), GFP_KERNEL
);
3155 if (!nm_i
->free_nid_bitmap
[i
])
3159 nm_i
->nat_block_bitmap
= f2fs_kvzalloc(sbi
, nm_i
->nat_blocks
/ 8,
3161 if (!nm_i
->nat_block_bitmap
)
3164 nm_i
->free_nid_count
=
3165 f2fs_kvzalloc(sbi
, array_size(sizeof(unsigned short),
3168 if (!nm_i
->free_nid_count
)
3173 int f2fs_build_node_manager(struct f2fs_sb_info
*sbi
)
3177 sbi
->nm_info
= f2fs_kzalloc(sbi
, sizeof(struct f2fs_nm_info
),
3182 err
= init_node_manager(sbi
);
3186 err
= init_free_nid_cache(sbi
);
3190 /* load free nid status from nat_bits table */
3191 load_free_nid_bitmap(sbi
);
3193 return f2fs_build_free_nids(sbi
, true, true);
3196 void f2fs_destroy_node_manager(struct f2fs_sb_info
*sbi
)
3198 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3199 struct free_nid
*i
, *next_i
;
3200 struct nat_entry
*natvec
[NATVEC_SIZE
];
3201 struct nat_entry_set
*setvec
[SETVEC_SIZE
];
3208 /* destroy free nid list */
3209 spin_lock(&nm_i
->nid_list_lock
);
3210 list_for_each_entry_safe(i
, next_i
, &nm_i
->free_nid_list
, list
) {
3211 __remove_free_nid(sbi
, i
, FREE_NID
);
3212 spin_unlock(&nm_i
->nid_list_lock
);
3213 kmem_cache_free(free_nid_slab
, i
);
3214 spin_lock(&nm_i
->nid_list_lock
);
3216 f2fs_bug_on(sbi
, nm_i
->nid_cnt
[FREE_NID
]);
3217 f2fs_bug_on(sbi
, nm_i
->nid_cnt
[PREALLOC_NID
]);
3218 f2fs_bug_on(sbi
, !list_empty(&nm_i
->free_nid_list
));
3219 spin_unlock(&nm_i
->nid_list_lock
);
3221 /* destroy nat cache */
3222 down_write(&nm_i
->nat_tree_lock
);
3223 while ((found
= __gang_lookup_nat_cache(nm_i
,
3224 nid
, NATVEC_SIZE
, natvec
))) {
3227 nid
= nat_get_nid(natvec
[found
- 1]) + 1;
3228 for (idx
= 0; idx
< found
; idx
++) {
3229 spin_lock(&nm_i
->nat_list_lock
);
3230 list_del(&natvec
[idx
]->list
);
3231 spin_unlock(&nm_i
->nat_list_lock
);
3233 __del_from_nat_cache(nm_i
, natvec
[idx
]);
3236 f2fs_bug_on(sbi
, nm_i
->nat_cnt
[TOTAL_NAT
]);
3238 /* destroy nat set cache */
3240 while ((found
= __gang_lookup_nat_set(nm_i
,
3241 nid
, SETVEC_SIZE
, setvec
))) {
3244 nid
= setvec
[found
- 1]->set
+ 1;
3245 for (idx
= 0; idx
< found
; idx
++) {
3246 /* entry_cnt is not zero, when cp_error was occurred */
3247 f2fs_bug_on(sbi
, !list_empty(&setvec
[idx
]->entry_list
));
3248 radix_tree_delete(&nm_i
->nat_set_root
, setvec
[idx
]->set
);
3249 kmem_cache_free(nat_entry_set_slab
, setvec
[idx
]);
3252 up_write(&nm_i
->nat_tree_lock
);
3254 kvfree(nm_i
->nat_block_bitmap
);
3255 if (nm_i
->free_nid_bitmap
) {
3258 for (i
= 0; i
< nm_i
->nat_blocks
; i
++)
3259 kvfree(nm_i
->free_nid_bitmap
[i
]);
3260 kvfree(nm_i
->free_nid_bitmap
);
3262 kvfree(nm_i
->free_nid_count
);
3264 kvfree(nm_i
->nat_bitmap
);
3265 kvfree(nm_i
->nat_bits
);
3266 #ifdef CONFIG_F2FS_CHECK_FS
3267 kvfree(nm_i
->nat_bitmap_mir
);
3269 sbi
->nm_info
= NULL
;
3273 int __init
f2fs_create_node_manager_caches(void)
3275 nat_entry_slab
= f2fs_kmem_cache_create("f2fs_nat_entry",
3276 sizeof(struct nat_entry
));
3277 if (!nat_entry_slab
)
3280 free_nid_slab
= f2fs_kmem_cache_create("f2fs_free_nid",
3281 sizeof(struct free_nid
));
3283 goto destroy_nat_entry
;
3285 nat_entry_set_slab
= f2fs_kmem_cache_create("f2fs_nat_entry_set",
3286 sizeof(struct nat_entry_set
));
3287 if (!nat_entry_set_slab
)
3288 goto destroy_free_nid
;
3290 fsync_node_entry_slab
= f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3291 sizeof(struct fsync_node_entry
));
3292 if (!fsync_node_entry_slab
)
3293 goto destroy_nat_entry_set
;
3296 destroy_nat_entry_set
:
3297 kmem_cache_destroy(nat_entry_set_slab
);
3299 kmem_cache_destroy(free_nid_slab
);
3301 kmem_cache_destroy(nat_entry_slab
);
3306 void f2fs_destroy_node_manager_caches(void)
3308 kmem_cache_destroy(fsync_node_entry_slab
);
3309 kmem_cache_destroy(nat_entry_set_slab
);
3310 kmem_cache_destroy(free_nid_slab
);
3311 kmem_cache_destroy(nat_entry_slab
);