1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs extent cache support
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
12 #include <linux/f2fs_fs.h>
16 #include <trace/events/f2fs.h>
18 static struct rb_entry
*__lookup_rb_tree_fast(struct rb_entry
*cached_re
,
22 if (cached_re
->ofs
<= ofs
&&
23 cached_re
->ofs
+ cached_re
->len
> ofs
) {
30 static struct rb_entry
*__lookup_rb_tree_slow(struct rb_root_cached
*root
,
33 struct rb_node
*node
= root
->rb_root
.rb_node
;
37 re
= rb_entry(node
, struct rb_entry
, rb_node
);
41 else if (ofs
>= re
->ofs
+ re
->len
)
42 node
= node
->rb_right
;
49 struct rb_entry
*f2fs_lookup_rb_tree(struct rb_root_cached
*root
,
50 struct rb_entry
*cached_re
, unsigned int ofs
)
54 re
= __lookup_rb_tree_fast(cached_re
, ofs
);
56 return __lookup_rb_tree_slow(root
, ofs
);
61 struct rb_node
**f2fs_lookup_rb_tree_ext(struct f2fs_sb_info
*sbi
,
62 struct rb_root_cached
*root
,
63 struct rb_node
**parent
,
64 unsigned long long key
, bool *leftmost
)
66 struct rb_node
**p
= &root
->rb_root
.rb_node
;
71 re
= rb_entry(*parent
, struct rb_entry
, rb_node
);
84 struct rb_node
**f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info
*sbi
,
85 struct rb_root_cached
*root
,
86 struct rb_node
**parent
,
87 unsigned int ofs
, bool *leftmost
)
89 struct rb_node
**p
= &root
->rb_root
.rb_node
;
94 re
= rb_entry(*parent
, struct rb_entry
, rb_node
);
98 } else if (ofs
>= re
->ofs
+ re
->len
) {
110 * lookup rb entry in position of @ofs in rb-tree,
111 * if hit, return the entry, otherwise, return NULL
112 * @prev_ex: extent before ofs
113 * @next_ex: extent after ofs
114 * @insert_p: insert point for new extent at ofs
115 * in order to simpfy the insertion after.
116 * tree must stay unchanged between lookup and insertion.
118 struct rb_entry
*f2fs_lookup_rb_tree_ret(struct rb_root_cached
*root
,
119 struct rb_entry
*cached_re
,
121 struct rb_entry
**prev_entry
,
122 struct rb_entry
**next_entry
,
123 struct rb_node
***insert_p
,
124 struct rb_node
**insert_parent
,
125 bool force
, bool *leftmost
)
127 struct rb_node
**pnode
= &root
->rb_root
.rb_node
;
128 struct rb_node
*parent
= NULL
, *tmp_node
;
129 struct rb_entry
*re
= cached_re
;
132 *insert_parent
= NULL
;
136 if (RB_EMPTY_ROOT(&root
->rb_root
))
140 if (re
->ofs
<= ofs
&& re
->ofs
+ re
->len
> ofs
)
141 goto lookup_neighbors
;
149 re
= rb_entry(*pnode
, struct rb_entry
, rb_node
);
152 pnode
= &(*pnode
)->rb_left
;
153 } else if (ofs
>= re
->ofs
+ re
->len
) {
154 pnode
= &(*pnode
)->rb_right
;
158 goto lookup_neighbors
;
163 *insert_parent
= parent
;
165 re
= rb_entry(parent
, struct rb_entry
, rb_node
);
167 if (parent
&& ofs
> re
->ofs
)
168 tmp_node
= rb_next(parent
);
169 *next_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
172 if (parent
&& ofs
< re
->ofs
)
173 tmp_node
= rb_prev(parent
);
174 *prev_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
178 if (ofs
== re
->ofs
|| force
) {
179 /* lookup prev node for merging backward later */
180 tmp_node
= rb_prev(&re
->rb_node
);
181 *prev_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
183 if (ofs
== re
->ofs
+ re
->len
- 1 || force
) {
184 /* lookup next node for merging frontward later */
185 tmp_node
= rb_next(&re
->rb_node
);
186 *next_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
191 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info
*sbi
,
192 struct rb_root_cached
*root
, bool check_key
)
194 #ifdef CONFIG_F2FS_CHECK_FS
195 struct rb_node
*cur
= rb_first_cached(root
), *next
;
196 struct rb_entry
*cur_re
, *next_re
;
206 cur_re
= rb_entry(cur
, struct rb_entry
, rb_node
);
207 next_re
= rb_entry(next
, struct rb_entry
, rb_node
);
210 if (cur_re
->key
> next_re
->key
) {
211 f2fs_info(sbi
, "inconsistent rbtree, "
212 "cur(%llu) next(%llu)",
213 cur_re
->key
, next_re
->key
);
219 if (cur_re
->ofs
+ cur_re
->len
> next_re
->ofs
) {
220 f2fs_info(sbi
, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
221 cur_re
->ofs
, cur_re
->len
,
222 next_re
->ofs
, next_re
->len
);
232 static struct kmem_cache
*extent_tree_slab
;
233 static struct kmem_cache
*extent_node_slab
;
235 static struct extent_node
*__attach_extent_node(struct f2fs_sb_info
*sbi
,
236 struct extent_tree
*et
, struct extent_info
*ei
,
237 struct rb_node
*parent
, struct rb_node
**p
,
240 struct extent_node
*en
;
242 en
= kmem_cache_alloc(extent_node_slab
, GFP_ATOMIC
);
247 INIT_LIST_HEAD(&en
->list
);
250 rb_link_node(&en
->rb_node
, parent
, p
);
251 rb_insert_color_cached(&en
->rb_node
, &et
->root
, leftmost
);
252 atomic_inc(&et
->node_cnt
);
253 atomic_inc(&sbi
->total_ext_node
);
257 static void __detach_extent_node(struct f2fs_sb_info
*sbi
,
258 struct extent_tree
*et
, struct extent_node
*en
)
260 rb_erase_cached(&en
->rb_node
, &et
->root
);
261 atomic_dec(&et
->node_cnt
);
262 atomic_dec(&sbi
->total_ext_node
);
264 if (et
->cached_en
== en
)
265 et
->cached_en
= NULL
;
266 kmem_cache_free(extent_node_slab
, en
);
270 * Flow to release an extent_node:
272 * 2. __detach_extent_node
273 * 3. kmem_cache_free.
275 static void __release_extent_node(struct f2fs_sb_info
*sbi
,
276 struct extent_tree
*et
, struct extent_node
*en
)
278 spin_lock(&sbi
->extent_lock
);
279 f2fs_bug_on(sbi
, list_empty(&en
->list
));
280 list_del_init(&en
->list
);
281 spin_unlock(&sbi
->extent_lock
);
283 __detach_extent_node(sbi
, et
, en
);
286 static struct extent_tree
*__grab_extent_tree(struct inode
*inode
)
288 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
289 struct extent_tree
*et
;
290 nid_t ino
= inode
->i_ino
;
292 mutex_lock(&sbi
->extent_tree_lock
);
293 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
295 et
= f2fs_kmem_cache_alloc(extent_tree_slab
, GFP_NOFS
);
296 f2fs_radix_tree_insert(&sbi
->extent_tree_root
, ino
, et
);
297 memset(et
, 0, sizeof(struct extent_tree
));
299 et
->root
= RB_ROOT_CACHED
;
300 et
->cached_en
= NULL
;
301 rwlock_init(&et
->lock
);
302 INIT_LIST_HEAD(&et
->list
);
303 atomic_set(&et
->node_cnt
, 0);
304 atomic_inc(&sbi
->total_ext_tree
);
306 atomic_dec(&sbi
->total_zombie_tree
);
307 list_del_init(&et
->list
);
309 mutex_unlock(&sbi
->extent_tree_lock
);
311 /* never died until evict_inode */
312 F2FS_I(inode
)->extent_tree
= et
;
317 static struct extent_node
*__init_extent_tree(struct f2fs_sb_info
*sbi
,
318 struct extent_tree
*et
, struct extent_info
*ei
)
320 struct rb_node
**p
= &et
->root
.rb_root
.rb_node
;
321 struct extent_node
*en
;
323 en
= __attach_extent_node(sbi
, et
, ei
, NULL
, p
, true);
327 et
->largest
= en
->ei
;
332 static unsigned int __free_extent_tree(struct f2fs_sb_info
*sbi
,
333 struct extent_tree
*et
)
335 struct rb_node
*node
, *next
;
336 struct extent_node
*en
;
337 unsigned int count
= atomic_read(&et
->node_cnt
);
339 node
= rb_first_cached(&et
->root
);
341 next
= rb_next(node
);
342 en
= rb_entry(node
, struct extent_node
, rb_node
);
343 __release_extent_node(sbi
, et
, en
);
347 return count
- atomic_read(&et
->node_cnt
);
350 static void __drop_largest_extent(struct extent_tree
*et
,
351 pgoff_t fofs
, unsigned int len
)
353 if (fofs
< et
->largest
.fofs
+ et
->largest
.len
&&
354 fofs
+ len
> et
->largest
.fofs
) {
356 et
->largest_updated
= true;
360 /* return true, if inode page is changed */
361 static void __f2fs_init_extent_tree(struct inode
*inode
, struct page
*ipage
)
363 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
364 struct f2fs_extent
*i_ext
= ipage
? &F2FS_INODE(ipage
)->i_ext
: NULL
;
365 struct extent_tree
*et
;
366 struct extent_node
*en
;
367 struct extent_info ei
;
369 if (!f2fs_may_extent_tree(inode
)) {
370 /* drop largest extent */
371 if (i_ext
&& i_ext
->len
) {
372 f2fs_wait_on_page_writeback(ipage
, NODE
, true, true);
374 set_page_dirty(ipage
);
380 et
= __grab_extent_tree(inode
);
382 if (!i_ext
|| !i_ext
->len
)
385 get_extent_info(&ei
, i_ext
);
387 write_lock(&et
->lock
);
388 if (atomic_read(&et
->node_cnt
))
391 en
= __init_extent_tree(sbi
, et
, &ei
);
393 spin_lock(&sbi
->extent_lock
);
394 list_add_tail(&en
->list
, &sbi
->extent_list
);
395 spin_unlock(&sbi
->extent_lock
);
398 write_unlock(&et
->lock
);
401 void f2fs_init_extent_tree(struct inode
*inode
, struct page
*ipage
)
403 __f2fs_init_extent_tree(inode
, ipage
);
405 if (!F2FS_I(inode
)->extent_tree
)
406 set_inode_flag(inode
, FI_NO_EXTENT
);
409 static bool f2fs_lookup_extent_tree(struct inode
*inode
, pgoff_t pgofs
,
410 struct extent_info
*ei
)
412 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
413 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
414 struct extent_node
*en
;
417 f2fs_bug_on(sbi
, !et
);
419 trace_f2fs_lookup_extent_tree_start(inode
, pgofs
);
421 read_lock(&et
->lock
);
423 if (et
->largest
.fofs
<= pgofs
&&
424 et
->largest
.fofs
+ et
->largest
.len
> pgofs
) {
427 stat_inc_largest_node_hit(sbi
);
431 en
= (struct extent_node
*)f2fs_lookup_rb_tree(&et
->root
,
432 (struct rb_entry
*)et
->cached_en
, pgofs
);
436 if (en
== et
->cached_en
)
437 stat_inc_cached_node_hit(sbi
);
439 stat_inc_rbtree_node_hit(sbi
);
442 spin_lock(&sbi
->extent_lock
);
443 if (!list_empty(&en
->list
)) {
444 list_move_tail(&en
->list
, &sbi
->extent_list
);
447 spin_unlock(&sbi
->extent_lock
);
450 stat_inc_total_hit(sbi
);
451 read_unlock(&et
->lock
);
453 trace_f2fs_lookup_extent_tree_end(inode
, pgofs
, ei
);
457 static struct extent_node
*__try_merge_extent_node(struct f2fs_sb_info
*sbi
,
458 struct extent_tree
*et
, struct extent_info
*ei
,
459 struct extent_node
*prev_ex
,
460 struct extent_node
*next_ex
)
462 struct extent_node
*en
= NULL
;
464 if (prev_ex
&& __is_back_mergeable(ei
, &prev_ex
->ei
)) {
465 prev_ex
->ei
.len
+= ei
->len
;
470 if (next_ex
&& __is_front_mergeable(ei
, &next_ex
->ei
)) {
471 next_ex
->ei
.fofs
= ei
->fofs
;
472 next_ex
->ei
.blk
= ei
->blk
;
473 next_ex
->ei
.len
+= ei
->len
;
475 __release_extent_node(sbi
, et
, prev_ex
);
483 __try_update_largest_extent(et
, en
);
485 spin_lock(&sbi
->extent_lock
);
486 if (!list_empty(&en
->list
)) {
487 list_move_tail(&en
->list
, &sbi
->extent_list
);
490 spin_unlock(&sbi
->extent_lock
);
494 static struct extent_node
*__insert_extent_tree(struct f2fs_sb_info
*sbi
,
495 struct extent_tree
*et
, struct extent_info
*ei
,
496 struct rb_node
**insert_p
,
497 struct rb_node
*insert_parent
,
501 struct rb_node
*parent
= NULL
;
502 struct extent_node
*en
= NULL
;
504 if (insert_p
&& insert_parent
) {
505 parent
= insert_parent
;
512 p
= f2fs_lookup_rb_tree_for_insert(sbi
, &et
->root
, &parent
,
513 ei
->fofs
, &leftmost
);
515 en
= __attach_extent_node(sbi
, et
, ei
, parent
, p
, leftmost
);
519 __try_update_largest_extent(et
, en
);
521 /* update in global extent list */
522 spin_lock(&sbi
->extent_lock
);
523 list_add_tail(&en
->list
, &sbi
->extent_list
);
525 spin_unlock(&sbi
->extent_lock
);
529 static void f2fs_update_extent_tree_range(struct inode
*inode
,
530 pgoff_t fofs
, block_t blkaddr
, unsigned int len
)
532 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
533 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
534 struct extent_node
*en
= NULL
, *en1
= NULL
;
535 struct extent_node
*prev_en
= NULL
, *next_en
= NULL
;
536 struct extent_info ei
, dei
, prev
;
537 struct rb_node
**insert_p
= NULL
, *insert_parent
= NULL
;
538 unsigned int end
= fofs
+ len
;
539 unsigned int pos
= (unsigned int)fofs
;
540 bool updated
= false;
541 bool leftmost
= false;
546 trace_f2fs_update_extent_tree_range(inode
, fofs
, blkaddr
, len
);
548 write_lock(&et
->lock
);
550 if (is_inode_flag_set(inode
, FI_NO_EXTENT
)) {
551 write_unlock(&et
->lock
);
559 * drop largest extent before lookup, in case it's already
560 * been shrunk from extent tree
562 __drop_largest_extent(et
, fofs
, len
);
564 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
565 en
= (struct extent_node
*)f2fs_lookup_rb_tree_ret(&et
->root
,
566 (struct rb_entry
*)et
->cached_en
, fofs
,
567 (struct rb_entry
**)&prev_en
,
568 (struct rb_entry
**)&next_en
,
569 &insert_p
, &insert_parent
, false,
574 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
575 while (en
&& en
->ei
.fofs
< end
) {
576 unsigned int org_end
;
577 int parts
= 0; /* # of parts current extent split into */
579 next_en
= en1
= NULL
;
582 org_end
= dei
.fofs
+ dei
.len
;
583 f2fs_bug_on(sbi
, pos
>= org_end
);
585 if (pos
> dei
.fofs
&& pos
- dei
.fofs
>= F2FS_MIN_EXTENT_LEN
) {
586 en
->ei
.len
= pos
- en
->ei
.fofs
;
591 if (end
< org_end
&& org_end
- end
>= F2FS_MIN_EXTENT_LEN
) {
593 set_extent_info(&ei
, end
,
594 end
- dei
.fofs
+ dei
.blk
,
596 en1
= __insert_extent_tree(sbi
, et
, &ei
,
601 en
->ei
.blk
+= end
- dei
.fofs
;
602 en
->ei
.len
-= end
- dei
.fofs
;
609 struct rb_node
*node
= rb_next(&en
->rb_node
);
611 next_en
= rb_entry_safe(node
, struct extent_node
,
616 __try_update_largest_extent(et
, en
);
618 __release_extent_node(sbi
, et
, en
);
621 * if original extent is split into zero or two parts, extent
622 * tree has been altered by deletion or insertion, therefore
623 * invalidate pointers regard to tree.
627 insert_parent
= NULL
;
632 /* 3. update extent in extent cache */
635 set_extent_info(&ei
, fofs
, blkaddr
, len
);
636 if (!__try_merge_extent_node(sbi
, et
, &ei
, prev_en
, next_en
))
637 __insert_extent_tree(sbi
, et
, &ei
,
638 insert_p
, insert_parent
, leftmost
);
640 /* give up extent_cache, if split and small updates happen */
642 prev
.len
< F2FS_MIN_EXTENT_LEN
&&
643 et
->largest
.len
< F2FS_MIN_EXTENT_LEN
) {
645 et
->largest_updated
= true;
646 set_inode_flag(inode
, FI_NO_EXTENT
);
650 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
651 __free_extent_tree(sbi
, et
);
653 if (et
->largest_updated
) {
654 et
->largest_updated
= false;
658 write_unlock(&et
->lock
);
661 f2fs_mark_inode_dirty_sync(inode
, true);
664 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
666 struct extent_tree
*et
, *next
;
667 struct extent_node
*en
;
668 unsigned int node_cnt
= 0, tree_cnt
= 0;
671 if (!test_opt(sbi
, EXTENT_CACHE
))
674 if (!atomic_read(&sbi
->total_zombie_tree
))
677 if (!mutex_trylock(&sbi
->extent_tree_lock
))
680 /* 1. remove unreferenced extent tree */
681 list_for_each_entry_safe(et
, next
, &sbi
->zombie_list
, list
) {
682 if (atomic_read(&et
->node_cnt
)) {
683 write_lock(&et
->lock
);
684 node_cnt
+= __free_extent_tree(sbi
, et
);
685 write_unlock(&et
->lock
);
687 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
688 list_del_init(&et
->list
);
689 radix_tree_delete(&sbi
->extent_tree_root
, et
->ino
);
690 kmem_cache_free(extent_tree_slab
, et
);
691 atomic_dec(&sbi
->total_ext_tree
);
692 atomic_dec(&sbi
->total_zombie_tree
);
695 if (node_cnt
+ tree_cnt
>= nr_shrink
)
699 mutex_unlock(&sbi
->extent_tree_lock
);
702 /* 2. remove LRU extent entries */
703 if (!mutex_trylock(&sbi
->extent_tree_lock
))
706 remained
= nr_shrink
- (node_cnt
+ tree_cnt
);
708 spin_lock(&sbi
->extent_lock
);
709 for (; remained
> 0; remained
--) {
710 if (list_empty(&sbi
->extent_list
))
712 en
= list_first_entry(&sbi
->extent_list
,
713 struct extent_node
, list
);
715 if (!write_trylock(&et
->lock
)) {
716 /* refresh this extent node's position in extent list */
717 list_move_tail(&en
->list
, &sbi
->extent_list
);
721 list_del_init(&en
->list
);
722 spin_unlock(&sbi
->extent_lock
);
724 __detach_extent_node(sbi
, et
, en
);
726 write_unlock(&et
->lock
);
728 spin_lock(&sbi
->extent_lock
);
730 spin_unlock(&sbi
->extent_lock
);
733 mutex_unlock(&sbi
->extent_tree_lock
);
735 trace_f2fs_shrink_extent_tree(sbi
, node_cnt
, tree_cnt
);
737 return node_cnt
+ tree_cnt
;
740 unsigned int f2fs_destroy_extent_node(struct inode
*inode
)
742 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
743 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
744 unsigned int node_cnt
= 0;
746 if (!et
|| !atomic_read(&et
->node_cnt
))
749 write_lock(&et
->lock
);
750 node_cnt
= __free_extent_tree(sbi
, et
);
751 write_unlock(&et
->lock
);
756 void f2fs_drop_extent_tree(struct inode
*inode
)
758 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
759 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
760 bool updated
= false;
762 if (!f2fs_may_extent_tree(inode
))
765 set_inode_flag(inode
, FI_NO_EXTENT
);
767 write_lock(&et
->lock
);
768 __free_extent_tree(sbi
, et
);
769 if (et
->largest
.len
) {
773 write_unlock(&et
->lock
);
775 f2fs_mark_inode_dirty_sync(inode
, true);
778 void f2fs_destroy_extent_tree(struct inode
*inode
)
780 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
781 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
782 unsigned int node_cnt
= 0;
787 if (inode
->i_nlink
&& !is_bad_inode(inode
) &&
788 atomic_read(&et
->node_cnt
)) {
789 mutex_lock(&sbi
->extent_tree_lock
);
790 list_add_tail(&et
->list
, &sbi
->zombie_list
);
791 atomic_inc(&sbi
->total_zombie_tree
);
792 mutex_unlock(&sbi
->extent_tree_lock
);
796 /* free all extent info belong to this extent tree */
797 node_cnt
= f2fs_destroy_extent_node(inode
);
799 /* delete extent tree entry in radix tree */
800 mutex_lock(&sbi
->extent_tree_lock
);
801 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
802 radix_tree_delete(&sbi
->extent_tree_root
, inode
->i_ino
);
803 kmem_cache_free(extent_tree_slab
, et
);
804 atomic_dec(&sbi
->total_ext_tree
);
805 mutex_unlock(&sbi
->extent_tree_lock
);
807 F2FS_I(inode
)->extent_tree
= NULL
;
809 trace_f2fs_destroy_extent_tree(inode
, node_cnt
);
812 bool f2fs_lookup_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
813 struct extent_info
*ei
)
815 if (!f2fs_may_extent_tree(inode
))
818 return f2fs_lookup_extent_tree(inode
, pgofs
, ei
);
821 void f2fs_update_extent_cache(struct dnode_of_data
*dn
)
826 if (!f2fs_may_extent_tree(dn
->inode
))
829 if (dn
->data_blkaddr
== NEW_ADDR
)
832 blkaddr
= dn
->data_blkaddr
;
834 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
836 f2fs_update_extent_tree_range(dn
->inode
, fofs
, blkaddr
, 1);
839 void f2fs_update_extent_cache_range(struct dnode_of_data
*dn
,
840 pgoff_t fofs
, block_t blkaddr
, unsigned int len
)
843 if (!f2fs_may_extent_tree(dn
->inode
))
846 f2fs_update_extent_tree_range(dn
->inode
, fofs
, blkaddr
, len
);
849 void f2fs_init_extent_cache_info(struct f2fs_sb_info
*sbi
)
851 INIT_RADIX_TREE(&sbi
->extent_tree_root
, GFP_NOIO
);
852 mutex_init(&sbi
->extent_tree_lock
);
853 INIT_LIST_HEAD(&sbi
->extent_list
);
854 spin_lock_init(&sbi
->extent_lock
);
855 atomic_set(&sbi
->total_ext_tree
, 0);
856 INIT_LIST_HEAD(&sbi
->zombie_list
);
857 atomic_set(&sbi
->total_zombie_tree
, 0);
858 atomic_set(&sbi
->total_ext_node
, 0);
861 int __init
f2fs_create_extent_cache(void)
863 extent_tree_slab
= f2fs_kmem_cache_create("f2fs_extent_tree",
864 sizeof(struct extent_tree
));
865 if (!extent_tree_slab
)
867 extent_node_slab
= f2fs_kmem_cache_create("f2fs_extent_node",
868 sizeof(struct extent_node
));
869 if (!extent_node_slab
) {
870 kmem_cache_destroy(extent_tree_slab
);
876 void f2fs_destroy_extent_cache(void)
878 kmem_cache_destroy(extent_node_slab
);
879 kmem_cache_destroy(extent_tree_slab
);