1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs extent cache support
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
12 #include <linux/f2fs_fs.h>
16 #include <trace/events/f2fs.h>
18 static struct rb_entry
*__lookup_rb_tree_fast(struct rb_entry
*cached_re
,
22 if (cached_re
->ofs
<= ofs
&&
23 cached_re
->ofs
+ cached_re
->len
> ofs
) {
30 static struct rb_entry
*__lookup_rb_tree_slow(struct rb_root_cached
*root
,
33 struct rb_node
*node
= root
->rb_root
.rb_node
;
37 re
= rb_entry(node
, struct rb_entry
, rb_node
);
41 else if (ofs
>= re
->ofs
+ re
->len
)
42 node
= node
->rb_right
;
49 struct rb_entry
*f2fs_lookup_rb_tree(struct rb_root_cached
*root
,
50 struct rb_entry
*cached_re
, unsigned int ofs
)
54 re
= __lookup_rb_tree_fast(cached_re
, ofs
);
56 return __lookup_rb_tree_slow(root
, ofs
);
61 struct rb_node
**f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info
*sbi
,
62 struct rb_root_cached
*root
,
63 struct rb_node
**parent
,
64 unsigned int ofs
, bool *leftmost
)
66 struct rb_node
**p
= &root
->rb_root
.rb_node
;
71 re
= rb_entry(*parent
, struct rb_entry
, rb_node
);
75 } else if (ofs
>= re
->ofs
+ re
->len
) {
87 * lookup rb entry in position of @ofs in rb-tree,
88 * if hit, return the entry, otherwise, return NULL
89 * @prev_ex: extent before ofs
90 * @next_ex: extent after ofs
91 * @insert_p: insert point for new extent at ofs
92 * in order to simpfy the insertion after.
93 * tree must stay unchanged between lookup and insertion.
95 struct rb_entry
*f2fs_lookup_rb_tree_ret(struct rb_root_cached
*root
,
96 struct rb_entry
*cached_re
,
98 struct rb_entry
**prev_entry
,
99 struct rb_entry
**next_entry
,
100 struct rb_node
***insert_p
,
101 struct rb_node
**insert_parent
,
102 bool force
, bool *leftmost
)
104 struct rb_node
**pnode
= &root
->rb_root
.rb_node
;
105 struct rb_node
*parent
= NULL
, *tmp_node
;
106 struct rb_entry
*re
= cached_re
;
109 *insert_parent
= NULL
;
113 if (RB_EMPTY_ROOT(&root
->rb_root
))
117 if (re
->ofs
<= ofs
&& re
->ofs
+ re
->len
> ofs
)
118 goto lookup_neighbors
;
126 re
= rb_entry(*pnode
, struct rb_entry
, rb_node
);
129 pnode
= &(*pnode
)->rb_left
;
130 } else if (ofs
>= re
->ofs
+ re
->len
) {
131 pnode
= &(*pnode
)->rb_right
;
135 goto lookup_neighbors
;
140 *insert_parent
= parent
;
142 re
= rb_entry(parent
, struct rb_entry
, rb_node
);
144 if (parent
&& ofs
> re
->ofs
)
145 tmp_node
= rb_next(parent
);
146 *next_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
149 if (parent
&& ofs
< re
->ofs
)
150 tmp_node
= rb_prev(parent
);
151 *prev_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
155 if (ofs
== re
->ofs
|| force
) {
156 /* lookup prev node for merging backward later */
157 tmp_node
= rb_prev(&re
->rb_node
);
158 *prev_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
160 if (ofs
== re
->ofs
+ re
->len
- 1 || force
) {
161 /* lookup next node for merging frontward later */
162 tmp_node
= rb_next(&re
->rb_node
);
163 *next_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
168 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info
*sbi
,
169 struct rb_root_cached
*root
)
171 #ifdef CONFIG_F2FS_CHECK_FS
172 struct rb_node
*cur
= rb_first_cached(root
), *next
;
173 struct rb_entry
*cur_re
, *next_re
;
183 cur_re
= rb_entry(cur
, struct rb_entry
, rb_node
);
184 next_re
= rb_entry(next
, struct rb_entry
, rb_node
);
186 if (cur_re
->ofs
+ cur_re
->len
> next_re
->ofs
) {
187 f2fs_info(sbi
, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
188 cur_re
->ofs
, cur_re
->len
,
189 next_re
->ofs
, next_re
->len
);
199 static struct kmem_cache
*extent_tree_slab
;
200 static struct kmem_cache
*extent_node_slab
;
202 static struct extent_node
*__attach_extent_node(struct f2fs_sb_info
*sbi
,
203 struct extent_tree
*et
, struct extent_info
*ei
,
204 struct rb_node
*parent
, struct rb_node
**p
,
207 struct extent_node
*en
;
209 en
= kmem_cache_alloc(extent_node_slab
, GFP_ATOMIC
);
214 INIT_LIST_HEAD(&en
->list
);
217 rb_link_node(&en
->rb_node
, parent
, p
);
218 rb_insert_color_cached(&en
->rb_node
, &et
->root
, leftmost
);
219 atomic_inc(&et
->node_cnt
);
220 atomic_inc(&sbi
->total_ext_node
);
224 static void __detach_extent_node(struct f2fs_sb_info
*sbi
,
225 struct extent_tree
*et
, struct extent_node
*en
)
227 rb_erase_cached(&en
->rb_node
, &et
->root
);
228 atomic_dec(&et
->node_cnt
);
229 atomic_dec(&sbi
->total_ext_node
);
231 if (et
->cached_en
== en
)
232 et
->cached_en
= NULL
;
233 kmem_cache_free(extent_node_slab
, en
);
237 * Flow to release an extent_node:
239 * 2. __detach_extent_node
240 * 3. kmem_cache_free.
242 static void __release_extent_node(struct f2fs_sb_info
*sbi
,
243 struct extent_tree
*et
, struct extent_node
*en
)
245 spin_lock(&sbi
->extent_lock
);
246 f2fs_bug_on(sbi
, list_empty(&en
->list
));
247 list_del_init(&en
->list
);
248 spin_unlock(&sbi
->extent_lock
);
250 __detach_extent_node(sbi
, et
, en
);
253 static struct extent_tree
*__grab_extent_tree(struct inode
*inode
)
255 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
256 struct extent_tree
*et
;
257 nid_t ino
= inode
->i_ino
;
259 mutex_lock(&sbi
->extent_tree_lock
);
260 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
262 et
= f2fs_kmem_cache_alloc(extent_tree_slab
, GFP_NOFS
);
263 f2fs_radix_tree_insert(&sbi
->extent_tree_root
, ino
, et
);
264 memset(et
, 0, sizeof(struct extent_tree
));
266 et
->root
= RB_ROOT_CACHED
;
267 et
->cached_en
= NULL
;
268 rwlock_init(&et
->lock
);
269 INIT_LIST_HEAD(&et
->list
);
270 atomic_set(&et
->node_cnt
, 0);
271 atomic_inc(&sbi
->total_ext_tree
);
273 atomic_dec(&sbi
->total_zombie_tree
);
274 list_del_init(&et
->list
);
276 mutex_unlock(&sbi
->extent_tree_lock
);
278 /* never died until evict_inode */
279 F2FS_I(inode
)->extent_tree
= et
;
284 static struct extent_node
*__init_extent_tree(struct f2fs_sb_info
*sbi
,
285 struct extent_tree
*et
, struct extent_info
*ei
)
287 struct rb_node
**p
= &et
->root
.rb_root
.rb_node
;
288 struct extent_node
*en
;
290 en
= __attach_extent_node(sbi
, et
, ei
, NULL
, p
, true);
294 et
->largest
= en
->ei
;
299 static unsigned int __free_extent_tree(struct f2fs_sb_info
*sbi
,
300 struct extent_tree
*et
)
302 struct rb_node
*node
, *next
;
303 struct extent_node
*en
;
304 unsigned int count
= atomic_read(&et
->node_cnt
);
306 node
= rb_first_cached(&et
->root
);
308 next
= rb_next(node
);
309 en
= rb_entry(node
, struct extent_node
, rb_node
);
310 __release_extent_node(sbi
, et
, en
);
314 return count
- atomic_read(&et
->node_cnt
);
317 static void __drop_largest_extent(struct extent_tree
*et
,
318 pgoff_t fofs
, unsigned int len
)
320 if (fofs
< et
->largest
.fofs
+ et
->largest
.len
&&
321 fofs
+ len
> et
->largest
.fofs
) {
323 et
->largest_updated
= true;
327 /* return true, if inode page is changed */
328 static bool __f2fs_init_extent_tree(struct inode
*inode
, struct f2fs_extent
*i_ext
)
330 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
331 struct extent_tree
*et
;
332 struct extent_node
*en
;
333 struct extent_info ei
;
335 if (!f2fs_may_extent_tree(inode
)) {
336 /* drop largest extent */
337 if (i_ext
&& i_ext
->len
) {
344 et
= __grab_extent_tree(inode
);
346 if (!i_ext
|| !i_ext
->len
)
349 get_extent_info(&ei
, i_ext
);
351 write_lock(&et
->lock
);
352 if (atomic_read(&et
->node_cnt
))
355 en
= __init_extent_tree(sbi
, et
, &ei
);
357 spin_lock(&sbi
->extent_lock
);
358 list_add_tail(&en
->list
, &sbi
->extent_list
);
359 spin_unlock(&sbi
->extent_lock
);
362 write_unlock(&et
->lock
);
366 bool f2fs_init_extent_tree(struct inode
*inode
, struct f2fs_extent
*i_ext
)
368 bool ret
= __f2fs_init_extent_tree(inode
, i_ext
);
370 if (!F2FS_I(inode
)->extent_tree
)
371 set_inode_flag(inode
, FI_NO_EXTENT
);
376 static bool f2fs_lookup_extent_tree(struct inode
*inode
, pgoff_t pgofs
,
377 struct extent_info
*ei
)
379 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
380 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
381 struct extent_node
*en
;
384 f2fs_bug_on(sbi
, !et
);
386 trace_f2fs_lookup_extent_tree_start(inode
, pgofs
);
388 read_lock(&et
->lock
);
390 if (et
->largest
.fofs
<= pgofs
&&
391 et
->largest
.fofs
+ et
->largest
.len
> pgofs
) {
394 stat_inc_largest_node_hit(sbi
);
398 en
= (struct extent_node
*)f2fs_lookup_rb_tree(&et
->root
,
399 (struct rb_entry
*)et
->cached_en
, pgofs
);
403 if (en
== et
->cached_en
)
404 stat_inc_cached_node_hit(sbi
);
406 stat_inc_rbtree_node_hit(sbi
);
409 spin_lock(&sbi
->extent_lock
);
410 if (!list_empty(&en
->list
)) {
411 list_move_tail(&en
->list
, &sbi
->extent_list
);
414 spin_unlock(&sbi
->extent_lock
);
417 stat_inc_total_hit(sbi
);
418 read_unlock(&et
->lock
);
420 trace_f2fs_lookup_extent_tree_end(inode
, pgofs
, ei
);
424 static struct extent_node
*__try_merge_extent_node(struct f2fs_sb_info
*sbi
,
425 struct extent_tree
*et
, struct extent_info
*ei
,
426 struct extent_node
*prev_ex
,
427 struct extent_node
*next_ex
)
429 struct extent_node
*en
= NULL
;
431 if (prev_ex
&& __is_back_mergeable(ei
, &prev_ex
->ei
)) {
432 prev_ex
->ei
.len
+= ei
->len
;
437 if (next_ex
&& __is_front_mergeable(ei
, &next_ex
->ei
)) {
438 next_ex
->ei
.fofs
= ei
->fofs
;
439 next_ex
->ei
.blk
= ei
->blk
;
440 next_ex
->ei
.len
+= ei
->len
;
442 __release_extent_node(sbi
, et
, prev_ex
);
450 __try_update_largest_extent(et
, en
);
452 spin_lock(&sbi
->extent_lock
);
453 if (!list_empty(&en
->list
)) {
454 list_move_tail(&en
->list
, &sbi
->extent_list
);
457 spin_unlock(&sbi
->extent_lock
);
461 static struct extent_node
*__insert_extent_tree(struct f2fs_sb_info
*sbi
,
462 struct extent_tree
*et
, struct extent_info
*ei
,
463 struct rb_node
**insert_p
,
464 struct rb_node
*insert_parent
,
468 struct rb_node
*parent
= NULL
;
469 struct extent_node
*en
= NULL
;
471 if (insert_p
&& insert_parent
) {
472 parent
= insert_parent
;
479 p
= f2fs_lookup_rb_tree_for_insert(sbi
, &et
->root
, &parent
,
480 ei
->fofs
, &leftmost
);
482 en
= __attach_extent_node(sbi
, et
, ei
, parent
, p
, leftmost
);
486 __try_update_largest_extent(et
, en
);
488 /* update in global extent list */
489 spin_lock(&sbi
->extent_lock
);
490 list_add_tail(&en
->list
, &sbi
->extent_list
);
492 spin_unlock(&sbi
->extent_lock
);
496 static void f2fs_update_extent_tree_range(struct inode
*inode
,
497 pgoff_t fofs
, block_t blkaddr
, unsigned int len
)
499 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
500 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
501 struct extent_node
*en
= NULL
, *en1
= NULL
;
502 struct extent_node
*prev_en
= NULL
, *next_en
= NULL
;
503 struct extent_info ei
, dei
, prev
;
504 struct rb_node
**insert_p
= NULL
, *insert_parent
= NULL
;
505 unsigned int end
= fofs
+ len
;
506 unsigned int pos
= (unsigned int)fofs
;
507 bool updated
= false;
508 bool leftmost
= false;
513 trace_f2fs_update_extent_tree_range(inode
, fofs
, blkaddr
, len
);
515 write_lock(&et
->lock
);
517 if (is_inode_flag_set(inode
, FI_NO_EXTENT
)) {
518 write_unlock(&et
->lock
);
526 * drop largest extent before lookup, in case it's already
527 * been shrunk from extent tree
529 __drop_largest_extent(et
, fofs
, len
);
531 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
532 en
= (struct extent_node
*)f2fs_lookup_rb_tree_ret(&et
->root
,
533 (struct rb_entry
*)et
->cached_en
, fofs
,
534 (struct rb_entry
**)&prev_en
,
535 (struct rb_entry
**)&next_en
,
536 &insert_p
, &insert_parent
, false,
541 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
542 while (en
&& en
->ei
.fofs
< end
) {
543 unsigned int org_end
;
544 int parts
= 0; /* # of parts current extent split into */
546 next_en
= en1
= NULL
;
549 org_end
= dei
.fofs
+ dei
.len
;
550 f2fs_bug_on(sbi
, pos
>= org_end
);
552 if (pos
> dei
.fofs
&& pos
- dei
.fofs
>= F2FS_MIN_EXTENT_LEN
) {
553 en
->ei
.len
= pos
- en
->ei
.fofs
;
558 if (end
< org_end
&& org_end
- end
>= F2FS_MIN_EXTENT_LEN
) {
560 set_extent_info(&ei
, end
,
561 end
- dei
.fofs
+ dei
.blk
,
563 en1
= __insert_extent_tree(sbi
, et
, &ei
,
568 en
->ei
.blk
+= end
- dei
.fofs
;
569 en
->ei
.len
-= end
- dei
.fofs
;
576 struct rb_node
*node
= rb_next(&en
->rb_node
);
578 next_en
= rb_entry_safe(node
, struct extent_node
,
583 __try_update_largest_extent(et
, en
);
585 __release_extent_node(sbi
, et
, en
);
588 * if original extent is split into zero or two parts, extent
589 * tree has been altered by deletion or insertion, therefore
590 * invalidate pointers regard to tree.
594 insert_parent
= NULL
;
599 /* 3. update extent in extent cache */
602 set_extent_info(&ei
, fofs
, blkaddr
, len
);
603 if (!__try_merge_extent_node(sbi
, et
, &ei
, prev_en
, next_en
))
604 __insert_extent_tree(sbi
, et
, &ei
,
605 insert_p
, insert_parent
, leftmost
);
607 /* give up extent_cache, if split and small updates happen */
609 prev
.len
< F2FS_MIN_EXTENT_LEN
&&
610 et
->largest
.len
< F2FS_MIN_EXTENT_LEN
) {
612 et
->largest_updated
= true;
613 set_inode_flag(inode
, FI_NO_EXTENT
);
617 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
618 __free_extent_tree(sbi
, et
);
620 if (et
->largest_updated
) {
621 et
->largest_updated
= false;
625 write_unlock(&et
->lock
);
628 f2fs_mark_inode_dirty_sync(inode
, true);
631 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
633 struct extent_tree
*et
, *next
;
634 struct extent_node
*en
;
635 unsigned int node_cnt
= 0, tree_cnt
= 0;
638 if (!test_opt(sbi
, EXTENT_CACHE
))
641 if (!atomic_read(&sbi
->total_zombie_tree
))
644 if (!mutex_trylock(&sbi
->extent_tree_lock
))
647 /* 1. remove unreferenced extent tree */
648 list_for_each_entry_safe(et
, next
, &sbi
->zombie_list
, list
) {
649 if (atomic_read(&et
->node_cnt
)) {
650 write_lock(&et
->lock
);
651 node_cnt
+= __free_extent_tree(sbi
, et
);
652 write_unlock(&et
->lock
);
654 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
655 list_del_init(&et
->list
);
656 radix_tree_delete(&sbi
->extent_tree_root
, et
->ino
);
657 kmem_cache_free(extent_tree_slab
, et
);
658 atomic_dec(&sbi
->total_ext_tree
);
659 atomic_dec(&sbi
->total_zombie_tree
);
662 if (node_cnt
+ tree_cnt
>= nr_shrink
)
666 mutex_unlock(&sbi
->extent_tree_lock
);
669 /* 2. remove LRU extent entries */
670 if (!mutex_trylock(&sbi
->extent_tree_lock
))
673 remained
= nr_shrink
- (node_cnt
+ tree_cnt
);
675 spin_lock(&sbi
->extent_lock
);
676 for (; remained
> 0; remained
--) {
677 if (list_empty(&sbi
->extent_list
))
679 en
= list_first_entry(&sbi
->extent_list
,
680 struct extent_node
, list
);
682 if (!write_trylock(&et
->lock
)) {
683 /* refresh this extent node's position in extent list */
684 list_move_tail(&en
->list
, &sbi
->extent_list
);
688 list_del_init(&en
->list
);
689 spin_unlock(&sbi
->extent_lock
);
691 __detach_extent_node(sbi
, et
, en
);
693 write_unlock(&et
->lock
);
695 spin_lock(&sbi
->extent_lock
);
697 spin_unlock(&sbi
->extent_lock
);
700 mutex_unlock(&sbi
->extent_tree_lock
);
702 trace_f2fs_shrink_extent_tree(sbi
, node_cnt
, tree_cnt
);
704 return node_cnt
+ tree_cnt
;
707 unsigned int f2fs_destroy_extent_node(struct inode
*inode
)
709 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
710 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
711 unsigned int node_cnt
= 0;
713 if (!et
|| !atomic_read(&et
->node_cnt
))
716 write_lock(&et
->lock
);
717 node_cnt
= __free_extent_tree(sbi
, et
);
718 write_unlock(&et
->lock
);
723 void f2fs_drop_extent_tree(struct inode
*inode
)
725 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
726 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
727 bool updated
= false;
729 if (!f2fs_may_extent_tree(inode
))
732 set_inode_flag(inode
, FI_NO_EXTENT
);
734 write_lock(&et
->lock
);
735 __free_extent_tree(sbi
, et
);
736 if (et
->largest
.len
) {
740 write_unlock(&et
->lock
);
742 f2fs_mark_inode_dirty_sync(inode
, true);
745 void f2fs_destroy_extent_tree(struct inode
*inode
)
747 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
748 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
749 unsigned int node_cnt
= 0;
754 if (inode
->i_nlink
&& !is_bad_inode(inode
) &&
755 atomic_read(&et
->node_cnt
)) {
756 mutex_lock(&sbi
->extent_tree_lock
);
757 list_add_tail(&et
->list
, &sbi
->zombie_list
);
758 atomic_inc(&sbi
->total_zombie_tree
);
759 mutex_unlock(&sbi
->extent_tree_lock
);
763 /* free all extent info belong to this extent tree */
764 node_cnt
= f2fs_destroy_extent_node(inode
);
766 /* delete extent tree entry in radix tree */
767 mutex_lock(&sbi
->extent_tree_lock
);
768 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
769 radix_tree_delete(&sbi
->extent_tree_root
, inode
->i_ino
);
770 kmem_cache_free(extent_tree_slab
, et
);
771 atomic_dec(&sbi
->total_ext_tree
);
772 mutex_unlock(&sbi
->extent_tree_lock
);
774 F2FS_I(inode
)->extent_tree
= NULL
;
776 trace_f2fs_destroy_extent_tree(inode
, node_cnt
);
779 bool f2fs_lookup_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
780 struct extent_info
*ei
)
782 if (!f2fs_may_extent_tree(inode
))
785 return f2fs_lookup_extent_tree(inode
, pgofs
, ei
);
788 void f2fs_update_extent_cache(struct dnode_of_data
*dn
)
793 if (!f2fs_may_extent_tree(dn
->inode
))
796 if (dn
->data_blkaddr
== NEW_ADDR
)
799 blkaddr
= dn
->data_blkaddr
;
801 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
803 f2fs_update_extent_tree_range(dn
->inode
, fofs
, blkaddr
, 1);
806 void f2fs_update_extent_cache_range(struct dnode_of_data
*dn
,
807 pgoff_t fofs
, block_t blkaddr
, unsigned int len
)
810 if (!f2fs_may_extent_tree(dn
->inode
))
813 f2fs_update_extent_tree_range(dn
->inode
, fofs
, blkaddr
, len
);
816 void f2fs_init_extent_cache_info(struct f2fs_sb_info
*sbi
)
818 INIT_RADIX_TREE(&sbi
->extent_tree_root
, GFP_NOIO
);
819 mutex_init(&sbi
->extent_tree_lock
);
820 INIT_LIST_HEAD(&sbi
->extent_list
);
821 spin_lock_init(&sbi
->extent_lock
);
822 atomic_set(&sbi
->total_ext_tree
, 0);
823 INIT_LIST_HEAD(&sbi
->zombie_list
);
824 atomic_set(&sbi
->total_zombie_tree
, 0);
825 atomic_set(&sbi
->total_ext_node
, 0);
828 int __init
f2fs_create_extent_cache(void)
830 extent_tree_slab
= f2fs_kmem_cache_create("f2fs_extent_tree",
831 sizeof(struct extent_tree
));
832 if (!extent_tree_slab
)
834 extent_node_slab
= f2fs_kmem_cache_create("f2fs_extent_node",
835 sizeof(struct extent_node
));
836 if (!extent_node_slab
) {
837 kmem_cache_destroy(extent_tree_slab
);
843 void f2fs_destroy_extent_cache(void)
845 kmem_cache_destroy(extent_node_slab
);
846 kmem_cache_destroy(extent_tree_slab
);