1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs extent cache support
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
12 #include <linux/f2fs_fs.h>
16 #include <trace/events/f2fs.h>
18 static struct rb_entry
*__lookup_rb_tree_fast(struct rb_entry
*cached_re
,
22 if (cached_re
->ofs
<= ofs
&&
23 cached_re
->ofs
+ cached_re
->len
> ofs
) {
30 static struct rb_entry
*__lookup_rb_tree_slow(struct rb_root_cached
*root
,
33 struct rb_node
*node
= root
->rb_root
.rb_node
;
37 re
= rb_entry(node
, struct rb_entry
, rb_node
);
41 else if (ofs
>= re
->ofs
+ re
->len
)
42 node
= node
->rb_right
;
49 struct rb_entry
*f2fs_lookup_rb_tree(struct rb_root_cached
*root
,
50 struct rb_entry
*cached_re
, unsigned int ofs
)
54 re
= __lookup_rb_tree_fast(cached_re
, ofs
);
56 return __lookup_rb_tree_slow(root
, ofs
);
61 struct rb_node
**f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info
*sbi
,
62 struct rb_root_cached
*root
,
63 struct rb_node
**parent
,
64 unsigned int ofs
, bool *leftmost
)
66 struct rb_node
**p
= &root
->rb_root
.rb_node
;
71 re
= rb_entry(*parent
, struct rb_entry
, rb_node
);
75 } else if (ofs
>= re
->ofs
+ re
->len
) {
87 * lookup rb entry in position of @ofs in rb-tree,
88 * if hit, return the entry, otherwise, return NULL
89 * @prev_ex: extent before ofs
90 * @next_ex: extent after ofs
91 * @insert_p: insert point for new extent at ofs
92 * in order to simpfy the insertion after.
93 * tree must stay unchanged between lookup and insertion.
95 struct rb_entry
*f2fs_lookup_rb_tree_ret(struct rb_root_cached
*root
,
96 struct rb_entry
*cached_re
,
98 struct rb_entry
**prev_entry
,
99 struct rb_entry
**next_entry
,
100 struct rb_node
***insert_p
,
101 struct rb_node
**insert_parent
,
102 bool force
, bool *leftmost
)
104 struct rb_node
**pnode
= &root
->rb_root
.rb_node
;
105 struct rb_node
*parent
= NULL
, *tmp_node
;
106 struct rb_entry
*re
= cached_re
;
109 *insert_parent
= NULL
;
113 if (RB_EMPTY_ROOT(&root
->rb_root
))
117 if (re
->ofs
<= ofs
&& re
->ofs
+ re
->len
> ofs
)
118 goto lookup_neighbors
;
126 re
= rb_entry(*pnode
, struct rb_entry
, rb_node
);
129 pnode
= &(*pnode
)->rb_left
;
130 } else if (ofs
>= re
->ofs
+ re
->len
) {
131 pnode
= &(*pnode
)->rb_right
;
135 goto lookup_neighbors
;
140 *insert_parent
= parent
;
142 re
= rb_entry(parent
, struct rb_entry
, rb_node
);
144 if (parent
&& ofs
> re
->ofs
)
145 tmp_node
= rb_next(parent
);
146 *next_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
149 if (parent
&& ofs
< re
->ofs
)
150 tmp_node
= rb_prev(parent
);
151 *prev_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
155 if (ofs
== re
->ofs
|| force
) {
156 /* lookup prev node for merging backward later */
157 tmp_node
= rb_prev(&re
->rb_node
);
158 *prev_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
160 if (ofs
== re
->ofs
+ re
->len
- 1 || force
) {
161 /* lookup next node for merging frontward later */
162 tmp_node
= rb_next(&re
->rb_node
);
163 *next_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
168 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info
*sbi
,
169 struct rb_root_cached
*root
)
171 #ifdef CONFIG_F2FS_CHECK_FS
172 struct rb_node
*cur
= rb_first_cached(root
), *next
;
173 struct rb_entry
*cur_re
, *next_re
;
183 cur_re
= rb_entry(cur
, struct rb_entry
, rb_node
);
184 next_re
= rb_entry(next
, struct rb_entry
, rb_node
);
186 if (cur_re
->ofs
+ cur_re
->len
> next_re
->ofs
) {
187 f2fs_msg(sbi
->sb
, KERN_INFO
, "inconsistent rbtree, "
188 "cur(%u, %u) next(%u, %u)",
189 cur_re
->ofs
, cur_re
->len
,
190 next_re
->ofs
, next_re
->len
);
200 static struct kmem_cache
*extent_tree_slab
;
201 static struct kmem_cache
*extent_node_slab
;
203 static struct extent_node
*__attach_extent_node(struct f2fs_sb_info
*sbi
,
204 struct extent_tree
*et
, struct extent_info
*ei
,
205 struct rb_node
*parent
, struct rb_node
**p
,
208 struct extent_node
*en
;
210 en
= kmem_cache_alloc(extent_node_slab
, GFP_ATOMIC
);
215 INIT_LIST_HEAD(&en
->list
);
218 rb_link_node(&en
->rb_node
, parent
, p
);
219 rb_insert_color_cached(&en
->rb_node
, &et
->root
, leftmost
);
220 atomic_inc(&et
->node_cnt
);
221 atomic_inc(&sbi
->total_ext_node
);
225 static void __detach_extent_node(struct f2fs_sb_info
*sbi
,
226 struct extent_tree
*et
, struct extent_node
*en
)
228 rb_erase_cached(&en
->rb_node
, &et
->root
);
229 atomic_dec(&et
->node_cnt
);
230 atomic_dec(&sbi
->total_ext_node
);
232 if (et
->cached_en
== en
)
233 et
->cached_en
= NULL
;
234 kmem_cache_free(extent_node_slab
, en
);
238 * Flow to release an extent_node:
240 * 2. __detach_extent_node
241 * 3. kmem_cache_free.
243 static void __release_extent_node(struct f2fs_sb_info
*sbi
,
244 struct extent_tree
*et
, struct extent_node
*en
)
246 spin_lock(&sbi
->extent_lock
);
247 f2fs_bug_on(sbi
, list_empty(&en
->list
));
248 list_del_init(&en
->list
);
249 spin_unlock(&sbi
->extent_lock
);
251 __detach_extent_node(sbi
, et
, en
);
254 static struct extent_tree
*__grab_extent_tree(struct inode
*inode
)
256 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
257 struct extent_tree
*et
;
258 nid_t ino
= inode
->i_ino
;
260 mutex_lock(&sbi
->extent_tree_lock
);
261 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
263 et
= f2fs_kmem_cache_alloc(extent_tree_slab
, GFP_NOFS
);
264 f2fs_radix_tree_insert(&sbi
->extent_tree_root
, ino
, et
);
265 memset(et
, 0, sizeof(struct extent_tree
));
267 et
->root
= RB_ROOT_CACHED
;
268 et
->cached_en
= NULL
;
269 rwlock_init(&et
->lock
);
270 INIT_LIST_HEAD(&et
->list
);
271 atomic_set(&et
->node_cnt
, 0);
272 atomic_inc(&sbi
->total_ext_tree
);
274 atomic_dec(&sbi
->total_zombie_tree
);
275 list_del_init(&et
->list
);
277 mutex_unlock(&sbi
->extent_tree_lock
);
279 /* never died until evict_inode */
280 F2FS_I(inode
)->extent_tree
= et
;
285 static struct extent_node
*__init_extent_tree(struct f2fs_sb_info
*sbi
,
286 struct extent_tree
*et
, struct extent_info
*ei
)
288 struct rb_node
**p
= &et
->root
.rb_root
.rb_node
;
289 struct extent_node
*en
;
291 en
= __attach_extent_node(sbi
, et
, ei
, NULL
, p
, true);
295 et
->largest
= en
->ei
;
300 static unsigned int __free_extent_tree(struct f2fs_sb_info
*sbi
,
301 struct extent_tree
*et
)
303 struct rb_node
*node
, *next
;
304 struct extent_node
*en
;
305 unsigned int count
= atomic_read(&et
->node_cnt
);
307 node
= rb_first_cached(&et
->root
);
309 next
= rb_next(node
);
310 en
= rb_entry(node
, struct extent_node
, rb_node
);
311 __release_extent_node(sbi
, et
, en
);
315 return count
- atomic_read(&et
->node_cnt
);
318 static void __drop_largest_extent(struct extent_tree
*et
,
319 pgoff_t fofs
, unsigned int len
)
321 if (fofs
< et
->largest
.fofs
+ et
->largest
.len
&&
322 fofs
+ len
> et
->largest
.fofs
) {
324 et
->largest_updated
= true;
328 /* return true, if inode page is changed */
329 static bool __f2fs_init_extent_tree(struct inode
*inode
, struct f2fs_extent
*i_ext
)
331 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
332 struct extent_tree
*et
;
333 struct extent_node
*en
;
334 struct extent_info ei
;
336 if (!f2fs_may_extent_tree(inode
)) {
337 /* drop largest extent */
338 if (i_ext
&& i_ext
->len
) {
345 et
= __grab_extent_tree(inode
);
347 if (!i_ext
|| !i_ext
->len
)
350 get_extent_info(&ei
, i_ext
);
352 write_lock(&et
->lock
);
353 if (atomic_read(&et
->node_cnt
))
356 en
= __init_extent_tree(sbi
, et
, &ei
);
358 spin_lock(&sbi
->extent_lock
);
359 list_add_tail(&en
->list
, &sbi
->extent_list
);
360 spin_unlock(&sbi
->extent_lock
);
363 write_unlock(&et
->lock
);
367 bool f2fs_init_extent_tree(struct inode
*inode
, struct f2fs_extent
*i_ext
)
369 bool ret
= __f2fs_init_extent_tree(inode
, i_ext
);
371 if (!F2FS_I(inode
)->extent_tree
)
372 set_inode_flag(inode
, FI_NO_EXTENT
);
377 static bool f2fs_lookup_extent_tree(struct inode
*inode
, pgoff_t pgofs
,
378 struct extent_info
*ei
)
380 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
381 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
382 struct extent_node
*en
;
385 f2fs_bug_on(sbi
, !et
);
387 trace_f2fs_lookup_extent_tree_start(inode
, pgofs
);
389 read_lock(&et
->lock
);
391 if (et
->largest
.fofs
<= pgofs
&&
392 et
->largest
.fofs
+ et
->largest
.len
> pgofs
) {
395 stat_inc_largest_node_hit(sbi
);
399 en
= (struct extent_node
*)f2fs_lookup_rb_tree(&et
->root
,
400 (struct rb_entry
*)et
->cached_en
, pgofs
);
404 if (en
== et
->cached_en
)
405 stat_inc_cached_node_hit(sbi
);
407 stat_inc_rbtree_node_hit(sbi
);
410 spin_lock(&sbi
->extent_lock
);
411 if (!list_empty(&en
->list
)) {
412 list_move_tail(&en
->list
, &sbi
->extent_list
);
415 spin_unlock(&sbi
->extent_lock
);
418 stat_inc_total_hit(sbi
);
419 read_unlock(&et
->lock
);
421 trace_f2fs_lookup_extent_tree_end(inode
, pgofs
, ei
);
425 static struct extent_node
*__try_merge_extent_node(struct f2fs_sb_info
*sbi
,
426 struct extent_tree
*et
, struct extent_info
*ei
,
427 struct extent_node
*prev_ex
,
428 struct extent_node
*next_ex
)
430 struct extent_node
*en
= NULL
;
432 if (prev_ex
&& __is_back_mergeable(ei
, &prev_ex
->ei
)) {
433 prev_ex
->ei
.len
+= ei
->len
;
438 if (next_ex
&& __is_front_mergeable(ei
, &next_ex
->ei
)) {
439 next_ex
->ei
.fofs
= ei
->fofs
;
440 next_ex
->ei
.blk
= ei
->blk
;
441 next_ex
->ei
.len
+= ei
->len
;
443 __release_extent_node(sbi
, et
, prev_ex
);
451 __try_update_largest_extent(et
, en
);
453 spin_lock(&sbi
->extent_lock
);
454 if (!list_empty(&en
->list
)) {
455 list_move_tail(&en
->list
, &sbi
->extent_list
);
458 spin_unlock(&sbi
->extent_lock
);
462 static struct extent_node
*__insert_extent_tree(struct f2fs_sb_info
*sbi
,
463 struct extent_tree
*et
, struct extent_info
*ei
,
464 struct rb_node
**insert_p
,
465 struct rb_node
*insert_parent
,
469 struct rb_node
*parent
= NULL
;
470 struct extent_node
*en
= NULL
;
472 if (insert_p
&& insert_parent
) {
473 parent
= insert_parent
;
480 p
= f2fs_lookup_rb_tree_for_insert(sbi
, &et
->root
, &parent
,
481 ei
->fofs
, &leftmost
);
483 en
= __attach_extent_node(sbi
, et
, ei
, parent
, p
, leftmost
);
487 __try_update_largest_extent(et
, en
);
489 /* update in global extent list */
490 spin_lock(&sbi
->extent_lock
);
491 list_add_tail(&en
->list
, &sbi
->extent_list
);
493 spin_unlock(&sbi
->extent_lock
);
497 static void f2fs_update_extent_tree_range(struct inode
*inode
,
498 pgoff_t fofs
, block_t blkaddr
, unsigned int len
)
500 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
501 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
502 struct extent_node
*en
= NULL
, *en1
= NULL
;
503 struct extent_node
*prev_en
= NULL
, *next_en
= NULL
;
504 struct extent_info ei
, dei
, prev
;
505 struct rb_node
**insert_p
= NULL
, *insert_parent
= NULL
;
506 unsigned int end
= fofs
+ len
;
507 unsigned int pos
= (unsigned int)fofs
;
508 bool updated
= false;
509 bool leftmost
= false;
514 trace_f2fs_update_extent_tree_range(inode
, fofs
, blkaddr
, len
);
516 write_lock(&et
->lock
);
518 if (is_inode_flag_set(inode
, FI_NO_EXTENT
)) {
519 write_unlock(&et
->lock
);
527 * drop largest extent before lookup, in case it's already
528 * been shrunk from extent tree
530 __drop_largest_extent(et
, fofs
, len
);
532 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
533 en
= (struct extent_node
*)f2fs_lookup_rb_tree_ret(&et
->root
,
534 (struct rb_entry
*)et
->cached_en
, fofs
,
535 (struct rb_entry
**)&prev_en
,
536 (struct rb_entry
**)&next_en
,
537 &insert_p
, &insert_parent
, false,
542 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
543 while (en
&& en
->ei
.fofs
< end
) {
544 unsigned int org_end
;
545 int parts
= 0; /* # of parts current extent split into */
547 next_en
= en1
= NULL
;
550 org_end
= dei
.fofs
+ dei
.len
;
551 f2fs_bug_on(sbi
, pos
>= org_end
);
553 if (pos
> dei
.fofs
&& pos
- dei
.fofs
>= F2FS_MIN_EXTENT_LEN
) {
554 en
->ei
.len
= pos
- en
->ei
.fofs
;
559 if (end
< org_end
&& org_end
- end
>= F2FS_MIN_EXTENT_LEN
) {
561 set_extent_info(&ei
, end
,
562 end
- dei
.fofs
+ dei
.blk
,
564 en1
= __insert_extent_tree(sbi
, et
, &ei
,
569 en
->ei
.blk
+= end
- dei
.fofs
;
570 en
->ei
.len
-= end
- dei
.fofs
;
577 struct rb_node
*node
= rb_next(&en
->rb_node
);
579 next_en
= rb_entry_safe(node
, struct extent_node
,
584 __try_update_largest_extent(et
, en
);
586 __release_extent_node(sbi
, et
, en
);
589 * if original extent is split into zero or two parts, extent
590 * tree has been altered by deletion or insertion, therefore
591 * invalidate pointers regard to tree.
595 insert_parent
= NULL
;
600 /* 3. update extent in extent cache */
603 set_extent_info(&ei
, fofs
, blkaddr
, len
);
604 if (!__try_merge_extent_node(sbi
, et
, &ei
, prev_en
, next_en
))
605 __insert_extent_tree(sbi
, et
, &ei
,
606 insert_p
, insert_parent
, leftmost
);
608 /* give up extent_cache, if split and small updates happen */
610 prev
.len
< F2FS_MIN_EXTENT_LEN
&&
611 et
->largest
.len
< F2FS_MIN_EXTENT_LEN
) {
613 et
->largest_updated
= true;
614 set_inode_flag(inode
, FI_NO_EXTENT
);
618 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
619 __free_extent_tree(sbi
, et
);
621 if (et
->largest_updated
) {
622 et
->largest_updated
= false;
626 write_unlock(&et
->lock
);
629 f2fs_mark_inode_dirty_sync(inode
, true);
632 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
634 struct extent_tree
*et
, *next
;
635 struct extent_node
*en
;
636 unsigned int node_cnt
= 0, tree_cnt
= 0;
639 if (!test_opt(sbi
, EXTENT_CACHE
))
642 if (!atomic_read(&sbi
->total_zombie_tree
))
645 if (!mutex_trylock(&sbi
->extent_tree_lock
))
648 /* 1. remove unreferenced extent tree */
649 list_for_each_entry_safe(et
, next
, &sbi
->zombie_list
, list
) {
650 if (atomic_read(&et
->node_cnt
)) {
651 write_lock(&et
->lock
);
652 node_cnt
+= __free_extent_tree(sbi
, et
);
653 write_unlock(&et
->lock
);
655 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
656 list_del_init(&et
->list
);
657 radix_tree_delete(&sbi
->extent_tree_root
, et
->ino
);
658 kmem_cache_free(extent_tree_slab
, et
);
659 atomic_dec(&sbi
->total_ext_tree
);
660 atomic_dec(&sbi
->total_zombie_tree
);
663 if (node_cnt
+ tree_cnt
>= nr_shrink
)
667 mutex_unlock(&sbi
->extent_tree_lock
);
670 /* 2. remove LRU extent entries */
671 if (!mutex_trylock(&sbi
->extent_tree_lock
))
674 remained
= nr_shrink
- (node_cnt
+ tree_cnt
);
676 spin_lock(&sbi
->extent_lock
);
677 for (; remained
> 0; remained
--) {
678 if (list_empty(&sbi
->extent_list
))
680 en
= list_first_entry(&sbi
->extent_list
,
681 struct extent_node
, list
);
683 if (!write_trylock(&et
->lock
)) {
684 /* refresh this extent node's position in extent list */
685 list_move_tail(&en
->list
, &sbi
->extent_list
);
689 list_del_init(&en
->list
);
690 spin_unlock(&sbi
->extent_lock
);
692 __detach_extent_node(sbi
, et
, en
);
694 write_unlock(&et
->lock
);
696 spin_lock(&sbi
->extent_lock
);
698 spin_unlock(&sbi
->extent_lock
);
701 mutex_unlock(&sbi
->extent_tree_lock
);
703 trace_f2fs_shrink_extent_tree(sbi
, node_cnt
, tree_cnt
);
705 return node_cnt
+ tree_cnt
;
708 unsigned int f2fs_destroy_extent_node(struct inode
*inode
)
710 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
711 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
712 unsigned int node_cnt
= 0;
714 if (!et
|| !atomic_read(&et
->node_cnt
))
717 write_lock(&et
->lock
);
718 node_cnt
= __free_extent_tree(sbi
, et
);
719 write_unlock(&et
->lock
);
724 void f2fs_drop_extent_tree(struct inode
*inode
)
726 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
727 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
728 bool updated
= false;
730 if (!f2fs_may_extent_tree(inode
))
733 set_inode_flag(inode
, FI_NO_EXTENT
);
735 write_lock(&et
->lock
);
736 __free_extent_tree(sbi
, et
);
737 if (et
->largest
.len
) {
741 write_unlock(&et
->lock
);
743 f2fs_mark_inode_dirty_sync(inode
, true);
746 void f2fs_destroy_extent_tree(struct inode
*inode
)
748 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
749 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
750 unsigned int node_cnt
= 0;
755 if (inode
->i_nlink
&& !is_bad_inode(inode
) &&
756 atomic_read(&et
->node_cnt
)) {
757 mutex_lock(&sbi
->extent_tree_lock
);
758 list_add_tail(&et
->list
, &sbi
->zombie_list
);
759 atomic_inc(&sbi
->total_zombie_tree
);
760 mutex_unlock(&sbi
->extent_tree_lock
);
764 /* free all extent info belong to this extent tree */
765 node_cnt
= f2fs_destroy_extent_node(inode
);
767 /* delete extent tree entry in radix tree */
768 mutex_lock(&sbi
->extent_tree_lock
);
769 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
770 radix_tree_delete(&sbi
->extent_tree_root
, inode
->i_ino
);
771 kmem_cache_free(extent_tree_slab
, et
);
772 atomic_dec(&sbi
->total_ext_tree
);
773 mutex_unlock(&sbi
->extent_tree_lock
);
775 F2FS_I(inode
)->extent_tree
= NULL
;
777 trace_f2fs_destroy_extent_tree(inode
, node_cnt
);
780 bool f2fs_lookup_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
781 struct extent_info
*ei
)
783 if (!f2fs_may_extent_tree(inode
))
786 return f2fs_lookup_extent_tree(inode
, pgofs
, ei
);
789 void f2fs_update_extent_cache(struct dnode_of_data
*dn
)
794 if (!f2fs_may_extent_tree(dn
->inode
))
797 if (dn
->data_blkaddr
== NEW_ADDR
)
800 blkaddr
= dn
->data_blkaddr
;
802 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
804 f2fs_update_extent_tree_range(dn
->inode
, fofs
, blkaddr
, 1);
807 void f2fs_update_extent_cache_range(struct dnode_of_data
*dn
,
808 pgoff_t fofs
, block_t blkaddr
, unsigned int len
)
811 if (!f2fs_may_extent_tree(dn
->inode
))
814 f2fs_update_extent_tree_range(dn
->inode
, fofs
, blkaddr
, len
);
817 void f2fs_init_extent_cache_info(struct f2fs_sb_info
*sbi
)
819 INIT_RADIX_TREE(&sbi
->extent_tree_root
, GFP_NOIO
);
820 mutex_init(&sbi
->extent_tree_lock
);
821 INIT_LIST_HEAD(&sbi
->extent_list
);
822 spin_lock_init(&sbi
->extent_lock
);
823 atomic_set(&sbi
->total_ext_tree
, 0);
824 INIT_LIST_HEAD(&sbi
->zombie_list
);
825 atomic_set(&sbi
->total_zombie_tree
, 0);
826 atomic_set(&sbi
->total_ext_node
, 0);
829 int __init
f2fs_create_extent_cache(void)
831 extent_tree_slab
= f2fs_kmem_cache_create("f2fs_extent_tree",
832 sizeof(struct extent_tree
));
833 if (!extent_tree_slab
)
835 extent_node_slab
= f2fs_kmem_cache_create("f2fs_extent_node",
836 sizeof(struct extent_node
));
837 if (!extent_node_slab
) {
838 kmem_cache_destroy(extent_tree_slab
);
844 void f2fs_destroy_extent_cache(void)
846 kmem_cache_destroy(extent_node_slab
);
847 kmem_cache_destroy(extent_tree_slab
);