1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs extent cache support
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
10 * block_age-based extent cache added by:
11 * Copyright (c) 2022 xiaomi Co., Ltd.
12 * http://www.xiaomi.com/
16 #include <linux/f2fs_fs.h>
20 #include <trace/events/f2fs.h>
22 bool sanity_check_extent_cache(struct inode
*inode
, struct page
*ipage
)
24 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
25 struct f2fs_extent
*i_ext
= &F2FS_INODE(ipage
)->i_ext
;
26 struct extent_info ei
;
29 get_read_extent_info(&ei
, i_ext
);
34 if (!f2fs_is_valid_blkaddr(sbi
, ei
.blk
, DATA_GENERIC_ENHANCE
) ||
35 !f2fs_is_valid_blkaddr(sbi
, ei
.blk
+ ei
.len
- 1,
36 DATA_GENERIC_ENHANCE
)) {
37 f2fs_warn(sbi
, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
38 __func__
, inode
->i_ino
,
39 ei
.blk
, ei
.fofs
, ei
.len
);
43 if (!IS_DEVICE_ALIASING(inode
))
46 for (devi
= 0; devi
< sbi
->s_ndevs
; devi
++) {
47 if (FDEV(devi
).start_blk
!= ei
.blk
||
48 FDEV(devi
).end_blk
!= ei
.blk
+ ei
.len
- 1)
53 "%s: inode (ino=%lx) is an alias of meta device",
54 __func__
, inode
->i_ino
);
58 if (bdev_is_zoned(FDEV(devi
).bdev
)) {
60 "%s: device alias inode (ino=%lx)'s extent info "
61 "[%u, %u, %u] maps to zoned block device",
62 __func__
, inode
->i_ino
, ei
.blk
, ei
.fofs
, ei
.len
);
68 f2fs_warn(sbi
, "%s: device alias inode (ino=%lx)'s extent info "
69 "[%u, %u, %u] is inconsistent w/ any devices",
70 __func__
, inode
->i_ino
, ei
.blk
, ei
.fofs
, ei
.len
);
74 static void __set_extent_info(struct extent_info
*ei
,
75 unsigned int fofs
, unsigned int len
,
76 block_t blk
, bool keep_clen
,
77 unsigned long age
, unsigned long last_blocks
,
78 enum extent_type type
)
83 if (type
== EX_READ
) {
87 #ifdef CONFIG_F2FS_FS_COMPRESSION
90 } else if (type
== EX_BLOCK_AGE
) {
92 ei
->last_blocks
= last_blocks
;
96 static bool __init_may_extent_tree(struct inode
*inode
, enum extent_type type
)
99 return test_opt(F2FS_I_SB(inode
), READ_EXTENT_CACHE
) &&
100 S_ISREG(inode
->i_mode
);
101 if (type
== EX_BLOCK_AGE
)
102 return test_opt(F2FS_I_SB(inode
), AGE_EXTENT_CACHE
) &&
103 (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
));
107 static bool __may_extent_tree(struct inode
*inode
, enum extent_type type
)
109 if (IS_DEVICE_ALIASING(inode
) && type
== EX_READ
)
113 * for recovered files during mount do not create extents
114 * if shrinker is not registered.
116 if (list_empty(&F2FS_I_SB(inode
)->s_list
))
119 if (!__init_may_extent_tree(inode
, type
))
122 if (type
== EX_READ
) {
123 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
125 if (is_inode_flag_set(inode
, FI_COMPRESSED_FILE
) &&
126 !f2fs_sb_has_readonly(F2FS_I_SB(inode
)))
128 } else if (type
== EX_BLOCK_AGE
) {
129 if (is_inode_flag_set(inode
, FI_COMPRESSED_FILE
))
131 if (file_is_cold(inode
))
137 static void __try_update_largest_extent(struct extent_tree
*et
,
138 struct extent_node
*en
)
140 if (et
->type
!= EX_READ
)
142 if (en
->ei
.len
<= et
->largest
.len
)
145 et
->largest
= en
->ei
;
146 et
->largest_updated
= true;
149 static bool __is_extent_mergeable(struct extent_info
*back
,
150 struct extent_info
*front
, enum extent_type type
)
152 if (type
== EX_READ
) {
153 #ifdef CONFIG_F2FS_FS_COMPRESSION
154 if (back
->c_len
&& back
->len
!= back
->c_len
)
156 if (front
->c_len
&& front
->len
!= front
->c_len
)
159 return (back
->fofs
+ back
->len
== front
->fofs
&&
160 back
->blk
+ back
->len
== front
->blk
);
161 } else if (type
== EX_BLOCK_AGE
) {
162 return (back
->fofs
+ back
->len
== front
->fofs
&&
163 abs(back
->age
- front
->age
) <= SAME_AGE_REGION
&&
164 abs(back
->last_blocks
- front
->last_blocks
) <=
170 static bool __is_back_mergeable(struct extent_info
*cur
,
171 struct extent_info
*back
, enum extent_type type
)
173 return __is_extent_mergeable(back
, cur
, type
);
176 static bool __is_front_mergeable(struct extent_info
*cur
,
177 struct extent_info
*front
, enum extent_type type
)
179 return __is_extent_mergeable(cur
, front
, type
);
182 static struct extent_node
*__lookup_extent_node(struct rb_root_cached
*root
,
183 struct extent_node
*cached_en
, unsigned int fofs
)
185 struct rb_node
*node
= root
->rb_root
.rb_node
;
186 struct extent_node
*en
;
188 /* check a cached entry */
189 if (cached_en
&& cached_en
->ei
.fofs
<= fofs
&&
190 cached_en
->ei
.fofs
+ cached_en
->ei
.len
> fofs
)
195 en
= rb_entry(node
, struct extent_node
, rb_node
);
197 if (fofs
< en
->ei
.fofs
)
198 node
= node
->rb_left
;
199 else if (fofs
>= en
->ei
.fofs
+ en
->ei
.len
)
200 node
= node
->rb_right
;
208 * lookup rb entry in position of @fofs in rb-tree,
209 * if hit, return the entry, otherwise, return NULL
210 * @prev_ex: extent before fofs
211 * @next_ex: extent after fofs
212 * @insert_p: insert point for new extent at fofs
213 * in order to simplify the insertion after.
214 * tree must stay unchanged between lookup and insertion.
216 static struct extent_node
*__lookup_extent_node_ret(struct rb_root_cached
*root
,
217 struct extent_node
*cached_en
,
219 struct extent_node
**prev_entry
,
220 struct extent_node
**next_entry
,
221 struct rb_node
***insert_p
,
222 struct rb_node
**insert_parent
,
225 struct rb_node
**pnode
= &root
->rb_root
.rb_node
;
226 struct rb_node
*parent
= NULL
, *tmp_node
;
227 struct extent_node
*en
= cached_en
;
230 *insert_parent
= NULL
;
234 if (RB_EMPTY_ROOT(&root
->rb_root
))
237 if (en
&& en
->ei
.fofs
<= fofs
&& en
->ei
.fofs
+ en
->ei
.len
> fofs
)
238 goto lookup_neighbors
;
244 en
= rb_entry(*pnode
, struct extent_node
, rb_node
);
246 if (fofs
< en
->ei
.fofs
) {
247 pnode
= &(*pnode
)->rb_left
;
248 } else if (fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
249 pnode
= &(*pnode
)->rb_right
;
252 goto lookup_neighbors
;
257 *insert_parent
= parent
;
259 en
= rb_entry(parent
, struct extent_node
, rb_node
);
261 if (parent
&& fofs
> en
->ei
.fofs
)
262 tmp_node
= rb_next(parent
);
263 *next_entry
= rb_entry_safe(tmp_node
, struct extent_node
, rb_node
);
266 if (parent
&& fofs
< en
->ei
.fofs
)
267 tmp_node
= rb_prev(parent
);
268 *prev_entry
= rb_entry_safe(tmp_node
, struct extent_node
, rb_node
);
272 if (fofs
== en
->ei
.fofs
) {
273 /* lookup prev node for merging backward later */
274 tmp_node
= rb_prev(&en
->rb_node
);
275 *prev_entry
= rb_entry_safe(tmp_node
,
276 struct extent_node
, rb_node
);
278 if (fofs
== en
->ei
.fofs
+ en
->ei
.len
- 1) {
279 /* lookup next node for merging frontward later */
280 tmp_node
= rb_next(&en
->rb_node
);
281 *next_entry
= rb_entry_safe(tmp_node
,
282 struct extent_node
, rb_node
);
287 static struct kmem_cache
*extent_tree_slab
;
288 static struct kmem_cache
*extent_node_slab
;
290 static struct extent_node
*__attach_extent_node(struct f2fs_sb_info
*sbi
,
291 struct extent_tree
*et
, struct extent_info
*ei
,
292 struct rb_node
*parent
, struct rb_node
**p
,
295 struct extent_tree_info
*eti
= &sbi
->extent_tree
[et
->type
];
296 struct extent_node
*en
;
298 en
= f2fs_kmem_cache_alloc(extent_node_slab
, GFP_ATOMIC
, false, sbi
);
303 INIT_LIST_HEAD(&en
->list
);
306 rb_link_node(&en
->rb_node
, parent
, p
);
307 rb_insert_color_cached(&en
->rb_node
, &et
->root
, leftmost
);
308 atomic_inc(&et
->node_cnt
);
309 atomic_inc(&eti
->total_ext_node
);
313 static void __detach_extent_node(struct f2fs_sb_info
*sbi
,
314 struct extent_tree
*et
, struct extent_node
*en
)
316 struct extent_tree_info
*eti
= &sbi
->extent_tree
[et
->type
];
318 rb_erase_cached(&en
->rb_node
, &et
->root
);
319 atomic_dec(&et
->node_cnt
);
320 atomic_dec(&eti
->total_ext_node
);
322 if (et
->cached_en
== en
)
323 et
->cached_en
= NULL
;
324 kmem_cache_free(extent_node_slab
, en
);
328 * Flow to release an extent_node:
330 * 2. __detach_extent_node
331 * 3. kmem_cache_free.
333 static void __release_extent_node(struct f2fs_sb_info
*sbi
,
334 struct extent_tree
*et
, struct extent_node
*en
)
336 struct extent_tree_info
*eti
= &sbi
->extent_tree
[et
->type
];
338 spin_lock(&eti
->extent_lock
);
339 f2fs_bug_on(sbi
, list_empty(&en
->list
));
340 list_del_init(&en
->list
);
341 spin_unlock(&eti
->extent_lock
);
343 __detach_extent_node(sbi
, et
, en
);
346 static struct extent_tree
*__grab_extent_tree(struct inode
*inode
,
347 enum extent_type type
)
349 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
350 struct extent_tree_info
*eti
= &sbi
->extent_tree
[type
];
351 struct extent_tree
*et
;
352 nid_t ino
= inode
->i_ino
;
354 mutex_lock(&eti
->extent_tree_lock
);
355 et
= radix_tree_lookup(&eti
->extent_tree_root
, ino
);
357 et
= f2fs_kmem_cache_alloc(extent_tree_slab
,
358 GFP_NOFS
, true, NULL
);
359 f2fs_radix_tree_insert(&eti
->extent_tree_root
, ino
, et
);
360 memset(et
, 0, sizeof(struct extent_tree
));
363 et
->root
= RB_ROOT_CACHED
;
364 et
->cached_en
= NULL
;
365 rwlock_init(&et
->lock
);
366 INIT_LIST_HEAD(&et
->list
);
367 atomic_set(&et
->node_cnt
, 0);
368 atomic_inc(&eti
->total_ext_tree
);
370 atomic_dec(&eti
->total_zombie_tree
);
371 list_del_init(&et
->list
);
373 mutex_unlock(&eti
->extent_tree_lock
);
375 /* never died until evict_inode */
376 F2FS_I(inode
)->extent_tree
[type
] = et
;
381 static unsigned int __free_extent_tree(struct f2fs_sb_info
*sbi
,
382 struct extent_tree
*et
, unsigned int nr_shrink
)
384 struct rb_node
*node
, *next
;
385 struct extent_node
*en
;
388 node
= rb_first_cached(&et
->root
);
390 for (count
= 0; node
&& count
< nr_shrink
; count
++) {
391 next
= rb_next(node
);
392 en
= rb_entry(node
, struct extent_node
, rb_node
);
393 __release_extent_node(sbi
, et
, en
);
400 static void __drop_largest_extent(struct extent_tree
*et
,
401 pgoff_t fofs
, unsigned int len
)
403 if (fofs
< (pgoff_t
)et
->largest
.fofs
+ et
->largest
.len
&&
404 fofs
+ len
> et
->largest
.fofs
) {
406 et
->largest_updated
= true;
410 void f2fs_init_read_extent_tree(struct inode
*inode
, struct page
*ipage
)
412 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
413 struct extent_tree_info
*eti
= &sbi
->extent_tree
[EX_READ
];
414 struct f2fs_extent
*i_ext
= &F2FS_INODE(ipage
)->i_ext
;
415 struct extent_tree
*et
;
416 struct extent_node
*en
;
417 struct extent_info ei
;
419 if (!__may_extent_tree(inode
, EX_READ
)) {
420 /* drop largest read extent */
422 f2fs_wait_on_page_writeback(ipage
, NODE
, true, true);
424 set_page_dirty(ipage
);
426 set_inode_flag(inode
, FI_NO_EXTENT
);
430 et
= __grab_extent_tree(inode
, EX_READ
);
432 get_read_extent_info(&ei
, i_ext
);
434 write_lock(&et
->lock
);
435 if (atomic_read(&et
->node_cnt
) || !ei
.len
)
438 if (IS_DEVICE_ALIASING(inode
)) {
443 en
= __attach_extent_node(sbi
, et
, &ei
, NULL
,
444 &et
->root
.rb_root
.rb_node
, true);
446 et
->largest
= en
->ei
;
449 spin_lock(&eti
->extent_lock
);
450 list_add_tail(&en
->list
, &eti
->extent_list
);
451 spin_unlock(&eti
->extent_lock
);
454 /* Let's drop, if checkpoint got corrupted. */
455 if (f2fs_cp_error(sbi
)) {
457 et
->largest_updated
= true;
459 write_unlock(&et
->lock
);
462 void f2fs_init_age_extent_tree(struct inode
*inode
)
464 if (!__init_may_extent_tree(inode
, EX_BLOCK_AGE
))
466 __grab_extent_tree(inode
, EX_BLOCK_AGE
);
469 void f2fs_init_extent_tree(struct inode
*inode
)
471 /* initialize read cache */
472 if (__init_may_extent_tree(inode
, EX_READ
))
473 __grab_extent_tree(inode
, EX_READ
);
475 /* initialize block age cache */
476 if (__init_may_extent_tree(inode
, EX_BLOCK_AGE
))
477 __grab_extent_tree(inode
, EX_BLOCK_AGE
);
480 static bool __lookup_extent_tree(struct inode
*inode
, pgoff_t pgofs
,
481 struct extent_info
*ei
, enum extent_type type
)
483 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
484 struct extent_tree_info
*eti
= &sbi
->extent_tree
[type
];
485 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
[type
];
486 struct extent_node
*en
;
492 trace_f2fs_lookup_extent_tree_start(inode
, pgofs
, type
);
494 read_lock(&et
->lock
);
496 if (type
== EX_READ
&&
497 et
->largest
.fofs
<= pgofs
&&
498 (pgoff_t
)et
->largest
.fofs
+ et
->largest
.len
> pgofs
) {
501 stat_inc_largest_node_hit(sbi
);
505 if (IS_DEVICE_ALIASING(inode
)) {
510 en
= __lookup_extent_node(&et
->root
, et
->cached_en
, pgofs
);
514 if (en
== et
->cached_en
)
515 stat_inc_cached_node_hit(sbi
, type
);
517 stat_inc_rbtree_node_hit(sbi
, type
);
520 spin_lock(&eti
->extent_lock
);
521 if (!list_empty(&en
->list
)) {
522 list_move_tail(&en
->list
, &eti
->extent_list
);
525 spin_unlock(&eti
->extent_lock
);
528 stat_inc_total_hit(sbi
, type
);
529 read_unlock(&et
->lock
);
532 trace_f2fs_lookup_read_extent_tree_end(inode
, pgofs
, ei
);
533 else if (type
== EX_BLOCK_AGE
)
534 trace_f2fs_lookup_age_extent_tree_end(inode
, pgofs
, ei
);
538 static struct extent_node
*__try_merge_extent_node(struct f2fs_sb_info
*sbi
,
539 struct extent_tree
*et
, struct extent_info
*ei
,
540 struct extent_node
*prev_ex
,
541 struct extent_node
*next_ex
)
543 struct extent_tree_info
*eti
= &sbi
->extent_tree
[et
->type
];
544 struct extent_node
*en
= NULL
;
546 if (prev_ex
&& __is_back_mergeable(ei
, &prev_ex
->ei
, et
->type
)) {
547 prev_ex
->ei
.len
+= ei
->len
;
552 if (next_ex
&& __is_front_mergeable(ei
, &next_ex
->ei
, et
->type
)) {
553 next_ex
->ei
.fofs
= ei
->fofs
;
554 next_ex
->ei
.len
+= ei
->len
;
555 if (et
->type
== EX_READ
)
556 next_ex
->ei
.blk
= ei
->blk
;
558 __release_extent_node(sbi
, et
, prev_ex
);
566 __try_update_largest_extent(et
, en
);
568 spin_lock(&eti
->extent_lock
);
569 if (!list_empty(&en
->list
)) {
570 list_move_tail(&en
->list
, &eti
->extent_list
);
573 spin_unlock(&eti
->extent_lock
);
577 static struct extent_node
*__insert_extent_tree(struct f2fs_sb_info
*sbi
,
578 struct extent_tree
*et
, struct extent_info
*ei
,
579 struct rb_node
**insert_p
,
580 struct rb_node
*insert_parent
,
583 struct extent_tree_info
*eti
= &sbi
->extent_tree
[et
->type
];
584 struct rb_node
**p
= &et
->root
.rb_root
.rb_node
;
585 struct rb_node
*parent
= NULL
;
586 struct extent_node
*en
= NULL
;
588 if (insert_p
&& insert_parent
) {
589 parent
= insert_parent
;
596 /* look up extent_node in the rb tree */
599 en
= rb_entry(parent
, struct extent_node
, rb_node
);
601 if (ei
->fofs
< en
->ei
.fofs
) {
603 } else if (ei
->fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
612 en
= __attach_extent_node(sbi
, et
, ei
, parent
, p
, leftmost
);
616 __try_update_largest_extent(et
, en
);
618 /* update in global extent list */
619 spin_lock(&eti
->extent_lock
);
620 list_add_tail(&en
->list
, &eti
->extent_list
);
622 spin_unlock(&eti
->extent_lock
);
626 static unsigned int __destroy_extent_node(struct inode
*inode
,
627 enum extent_type type
)
629 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
630 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
[type
];
631 unsigned int nr_shrink
= type
== EX_READ
?
632 READ_EXTENT_CACHE_SHRINK_NUMBER
:
633 AGE_EXTENT_CACHE_SHRINK_NUMBER
;
634 unsigned int node_cnt
= 0;
636 if (!et
|| !atomic_read(&et
->node_cnt
))
639 while (atomic_read(&et
->node_cnt
)) {
640 write_lock(&et
->lock
);
641 node_cnt
+= __free_extent_tree(sbi
, et
, nr_shrink
);
642 write_unlock(&et
->lock
);
645 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
650 static void __update_extent_tree_range(struct inode
*inode
,
651 struct extent_info
*tei
, enum extent_type type
)
653 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
654 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
[type
];
655 struct extent_node
*en
= NULL
, *en1
= NULL
;
656 struct extent_node
*prev_en
= NULL
, *next_en
= NULL
;
657 struct extent_info ei
, dei
, prev
;
658 struct rb_node
**insert_p
= NULL
, *insert_parent
= NULL
;
659 unsigned int fofs
= tei
->fofs
, len
= tei
->len
;
660 unsigned int end
= fofs
+ len
;
661 bool updated
= false;
662 bool leftmost
= false;
668 trace_f2fs_update_read_extent_tree_range(inode
, fofs
, len
,
670 else if (type
== EX_BLOCK_AGE
)
671 trace_f2fs_update_age_extent_tree_range(inode
, fofs
, len
,
672 tei
->age
, tei
->last_blocks
);
674 write_lock(&et
->lock
);
676 if (type
== EX_READ
) {
677 if (is_inode_flag_set(inode
, FI_NO_EXTENT
)) {
678 write_unlock(&et
->lock
);
686 * drop largest extent before lookup, in case it's already
687 * been shrunk from extent tree
689 __drop_largest_extent(et
, fofs
, len
);
692 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
693 en
= __lookup_extent_node_ret(&et
->root
,
696 &insert_p
, &insert_parent
,
701 /* 2. invalidate all extent nodes in range [fofs, fofs + len - 1] */
702 while (en
&& en
->ei
.fofs
< end
) {
703 unsigned int org_end
;
704 int parts
= 0; /* # of parts current extent split into */
706 next_en
= en1
= NULL
;
709 org_end
= dei
.fofs
+ dei
.len
;
710 f2fs_bug_on(sbi
, fofs
>= org_end
);
712 if (fofs
> dei
.fofs
&& (type
!= EX_READ
||
713 fofs
- dei
.fofs
>= F2FS_MIN_EXTENT_LEN
)) {
714 en
->ei
.len
= fofs
- en
->ei
.fofs
;
719 if (end
< org_end
&& (type
!= EX_READ
||
720 (org_end
- end
>= F2FS_MIN_EXTENT_LEN
&&
721 atomic_read(&et
->node_cnt
) <
722 sbi
->max_read_extent_count
))) {
724 __set_extent_info(&ei
,
726 end
- dei
.fofs
+ dei
.blk
, false,
727 dei
.age
, dei
.last_blocks
,
729 en1
= __insert_extent_tree(sbi
, et
, &ei
,
733 __set_extent_info(&en
->ei
,
734 end
, en
->ei
.len
- (end
- dei
.fofs
),
735 en
->ei
.blk
+ (end
- dei
.fofs
), true,
736 dei
.age
, dei
.last_blocks
,
744 struct rb_node
*node
= rb_next(&en
->rb_node
);
746 next_en
= rb_entry_safe(node
, struct extent_node
,
751 __try_update_largest_extent(et
, en
);
753 __release_extent_node(sbi
, et
, en
);
756 * if original extent is split into zero or two parts, extent
757 * tree has been altered by deletion or insertion, therefore
758 * invalidate pointers regard to tree.
762 insert_parent
= NULL
;
767 if (type
== EX_BLOCK_AGE
)
768 goto update_age_extent_cache
;
770 /* 3. update extent in read extent cache */
771 BUG_ON(type
!= EX_READ
);
774 __set_extent_info(&ei
, fofs
, len
, tei
->blk
, false,
776 if (!__try_merge_extent_node(sbi
, et
, &ei
, prev_en
, next_en
))
777 __insert_extent_tree(sbi
, et
, &ei
,
778 insert_p
, insert_parent
, leftmost
);
780 /* give up extent_cache, if split and small updates happen */
782 prev
.len
< F2FS_MIN_EXTENT_LEN
&&
783 et
->largest
.len
< F2FS_MIN_EXTENT_LEN
) {
785 et
->largest_updated
= true;
786 set_inode_flag(inode
, FI_NO_EXTENT
);
790 if (et
->largest_updated
) {
791 et
->largest_updated
= false;
794 goto out_read_extent_cache
;
795 update_age_extent_cache
:
796 if (!tei
->last_blocks
)
797 goto out_read_extent_cache
;
799 __set_extent_info(&ei
, fofs
, len
, 0, false,
800 tei
->age
, tei
->last_blocks
, EX_BLOCK_AGE
);
801 if (!__try_merge_extent_node(sbi
, et
, &ei
, prev_en
, next_en
))
802 __insert_extent_tree(sbi
, et
, &ei
,
803 insert_p
, insert_parent
, leftmost
);
804 out_read_extent_cache
:
805 write_unlock(&et
->lock
);
807 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
808 __destroy_extent_node(inode
, EX_READ
);
811 f2fs_mark_inode_dirty_sync(inode
, true);
814 #ifdef CONFIG_F2FS_FS_COMPRESSION
815 void f2fs_update_read_extent_tree_range_compressed(struct inode
*inode
,
816 pgoff_t fofs
, block_t blkaddr
, unsigned int llen
,
819 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
820 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
[EX_READ
];
821 struct extent_node
*en
= NULL
;
822 struct extent_node
*prev_en
= NULL
, *next_en
= NULL
;
823 struct extent_info ei
;
824 struct rb_node
**insert_p
= NULL
, *insert_parent
= NULL
;
825 bool leftmost
= false;
827 trace_f2fs_update_read_extent_tree_range(inode
, fofs
, llen
,
830 /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
831 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
834 write_lock(&et
->lock
);
836 en
= __lookup_extent_node_ret(&et
->root
,
839 &insert_p
, &insert_parent
,
844 __set_extent_info(&ei
, fofs
, llen
, blkaddr
, true, 0, 0, EX_READ
);
847 if (!__try_merge_extent_node(sbi
, et
, &ei
, prev_en
, next_en
))
848 __insert_extent_tree(sbi
, et
, &ei
,
849 insert_p
, insert_parent
, leftmost
);
851 write_unlock(&et
->lock
);
855 static unsigned long long __calculate_block_age(struct f2fs_sb_info
*sbi
,
856 unsigned long long new,
857 unsigned long long old
)
859 unsigned int rem_old
, rem_new
;
860 unsigned long long res
;
861 unsigned int weight
= sbi
->last_age_weight
;
863 res
= div_u64_rem(new, 100, &rem_new
) * (100 - weight
)
864 + div_u64_rem(old
, 100, &rem_old
) * weight
;
867 res
+= rem_new
* (100 - weight
) / 100;
869 res
+= rem_old
* weight
/ 100;
874 /* This returns a new age and allocated blocks in ei */
875 static int __get_new_block_age(struct inode
*inode
, struct extent_info
*ei
,
878 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
879 loff_t f_size
= i_size_read(inode
);
880 unsigned long long cur_blocks
=
881 atomic64_read(&sbi
->allocated_data_blocks
);
882 struct extent_info tei
= *ei
; /* only fofs and len are valid */
885 * When I/O is not aligned to a PAGE_SIZE, update will happen to the last
886 * file block even in seq write. So don't record age for newly last file
889 if ((f_size
>> PAGE_SHIFT
) == ei
->fofs
&& f_size
& (PAGE_SIZE
- 1) &&
893 if (__lookup_extent_tree(inode
, ei
->fofs
, &tei
, EX_BLOCK_AGE
)) {
894 unsigned long long cur_age
;
896 if (cur_blocks
>= tei
.last_blocks
)
897 cur_age
= cur_blocks
- tei
.last_blocks
;
899 /* allocated_data_blocks overflow */
900 cur_age
= ULLONG_MAX
- tei
.last_blocks
+ cur_blocks
;
903 ei
->age
= __calculate_block_age(sbi
, cur_age
, tei
.age
);
906 ei
->last_blocks
= cur_blocks
;
907 WARN_ON(ei
->age
> cur_blocks
);
911 f2fs_bug_on(sbi
, blkaddr
== NULL_ADDR
);
913 /* the data block was allocated for the first time */
914 if (blkaddr
== NEW_ADDR
)
917 if (__is_valid_data_blkaddr(blkaddr
) &&
918 !f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC_ENHANCE
))
922 * init block age with zero, this can happen when the block age extent
923 * was reclaimed due to memory constraint or system reboot
926 ei
->last_blocks
= cur_blocks
;
930 static void __update_extent_cache(struct dnode_of_data
*dn
, enum extent_type type
)
932 struct extent_info ei
= {};
934 if (!__may_extent_tree(dn
->inode
, type
))
937 ei
.fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
941 if (type
== EX_READ
) {
942 if (dn
->data_blkaddr
== NEW_ADDR
)
945 ei
.blk
= dn
->data_blkaddr
;
946 } else if (type
== EX_BLOCK_AGE
) {
947 if (__get_new_block_age(dn
->inode
, &ei
, dn
->data_blkaddr
))
950 __update_extent_tree_range(dn
->inode
, &ei
, type
);
953 static unsigned int __shrink_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
,
954 enum extent_type type
)
956 struct extent_tree_info
*eti
= &sbi
->extent_tree
[type
];
957 struct extent_tree
*et
, *next
;
958 struct extent_node
*en
;
959 unsigned int node_cnt
= 0, tree_cnt
= 0;
962 if (!atomic_read(&eti
->total_zombie_tree
))
965 if (!mutex_trylock(&eti
->extent_tree_lock
))
968 /* 1. remove unreferenced extent tree */
969 list_for_each_entry_safe(et
, next
, &eti
->zombie_list
, list
) {
970 if (atomic_read(&et
->node_cnt
)) {
971 write_lock(&et
->lock
);
972 node_cnt
+= __free_extent_tree(sbi
, et
,
973 nr_shrink
- node_cnt
- tree_cnt
);
974 write_unlock(&et
->lock
);
977 if (atomic_read(&et
->node_cnt
))
980 list_del_init(&et
->list
);
981 radix_tree_delete(&eti
->extent_tree_root
, et
->ino
);
982 kmem_cache_free(extent_tree_slab
, et
);
983 atomic_dec(&eti
->total_ext_tree
);
984 atomic_dec(&eti
->total_zombie_tree
);
987 if (node_cnt
+ tree_cnt
>= nr_shrink
)
991 mutex_unlock(&eti
->extent_tree_lock
);
994 /* 2. remove LRU extent entries */
995 if (!mutex_trylock(&eti
->extent_tree_lock
))
998 remained
= nr_shrink
- (node_cnt
+ tree_cnt
);
1000 spin_lock(&eti
->extent_lock
);
1001 for (; remained
> 0; remained
--) {
1002 if (list_empty(&eti
->extent_list
))
1004 en
= list_first_entry(&eti
->extent_list
,
1005 struct extent_node
, list
);
1007 if (!write_trylock(&et
->lock
)) {
1008 /* refresh this extent node's position in extent list */
1009 list_move_tail(&en
->list
, &eti
->extent_list
);
1013 list_del_init(&en
->list
);
1014 spin_unlock(&eti
->extent_lock
);
1016 __detach_extent_node(sbi
, et
, en
);
1018 write_unlock(&et
->lock
);
1020 spin_lock(&eti
->extent_lock
);
1022 spin_unlock(&eti
->extent_lock
);
1025 mutex_unlock(&eti
->extent_tree_lock
);
1027 trace_f2fs_shrink_extent_tree(sbi
, node_cnt
, tree_cnt
, type
);
1029 return node_cnt
+ tree_cnt
;
1032 /* read extent cache operations */
1033 bool f2fs_lookup_read_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
1034 struct extent_info
*ei
)
1036 if (!__may_extent_tree(inode
, EX_READ
))
1039 return __lookup_extent_tree(inode
, pgofs
, ei
, EX_READ
);
1042 bool f2fs_lookup_read_extent_cache_block(struct inode
*inode
, pgoff_t index
,
1045 struct extent_info ei
= {};
1047 if (!f2fs_lookup_read_extent_cache(inode
, index
, &ei
))
1049 *blkaddr
= ei
.blk
+ index
- ei
.fofs
;
1053 void f2fs_update_read_extent_cache(struct dnode_of_data
*dn
)
1055 return __update_extent_cache(dn
, EX_READ
);
1058 void f2fs_update_read_extent_cache_range(struct dnode_of_data
*dn
,
1059 pgoff_t fofs
, block_t blkaddr
, unsigned int len
)
1061 struct extent_info ei
= {
1067 if (!__may_extent_tree(dn
->inode
, EX_READ
))
1070 __update_extent_tree_range(dn
->inode
, &ei
, EX_READ
);
1073 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
1075 if (!test_opt(sbi
, READ_EXTENT_CACHE
))
1078 return __shrink_extent_tree(sbi
, nr_shrink
, EX_READ
);
1081 /* block age extent cache operations */
1082 bool f2fs_lookup_age_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
1083 struct extent_info
*ei
)
1085 if (!__may_extent_tree(inode
, EX_BLOCK_AGE
))
1088 return __lookup_extent_tree(inode
, pgofs
, ei
, EX_BLOCK_AGE
);
1091 void f2fs_update_age_extent_cache(struct dnode_of_data
*dn
)
1093 return __update_extent_cache(dn
, EX_BLOCK_AGE
);
1096 void f2fs_update_age_extent_cache_range(struct dnode_of_data
*dn
,
1097 pgoff_t fofs
, unsigned int len
)
1099 struct extent_info ei
= {
1104 if (!__may_extent_tree(dn
->inode
, EX_BLOCK_AGE
))
1107 __update_extent_tree_range(dn
->inode
, &ei
, EX_BLOCK_AGE
);
1110 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
1112 if (!test_opt(sbi
, AGE_EXTENT_CACHE
))
1115 return __shrink_extent_tree(sbi
, nr_shrink
, EX_BLOCK_AGE
);
1118 void f2fs_destroy_extent_node(struct inode
*inode
)
1120 __destroy_extent_node(inode
, EX_READ
);
1121 __destroy_extent_node(inode
, EX_BLOCK_AGE
);
1124 static void __drop_extent_tree(struct inode
*inode
, enum extent_type type
)
1126 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
[type
];
1127 bool updated
= false;
1129 if (!__may_extent_tree(inode
, type
))
1132 write_lock(&et
->lock
);
1133 if (type
== EX_READ
) {
1134 set_inode_flag(inode
, FI_NO_EXTENT
);
1135 if (et
->largest
.len
) {
1136 et
->largest
.len
= 0;
1140 write_unlock(&et
->lock
);
1142 __destroy_extent_node(inode
, type
);
1145 f2fs_mark_inode_dirty_sync(inode
, true);
1148 void f2fs_drop_extent_tree(struct inode
*inode
)
1150 __drop_extent_tree(inode
, EX_READ
);
1151 __drop_extent_tree(inode
, EX_BLOCK_AGE
);
1154 static void __destroy_extent_tree(struct inode
*inode
, enum extent_type type
)
1156 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1157 struct extent_tree_info
*eti
= &sbi
->extent_tree
[type
];
1158 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
[type
];
1159 unsigned int node_cnt
= 0;
1164 if (inode
->i_nlink
&& !is_bad_inode(inode
) &&
1165 atomic_read(&et
->node_cnt
)) {
1166 mutex_lock(&eti
->extent_tree_lock
);
1167 list_add_tail(&et
->list
, &eti
->zombie_list
);
1168 atomic_inc(&eti
->total_zombie_tree
);
1169 mutex_unlock(&eti
->extent_tree_lock
);
1173 /* free all extent info belong to this extent tree */
1174 node_cnt
= __destroy_extent_node(inode
, type
);
1176 /* delete extent tree entry in radix tree */
1177 mutex_lock(&eti
->extent_tree_lock
);
1178 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
1179 radix_tree_delete(&eti
->extent_tree_root
, inode
->i_ino
);
1180 kmem_cache_free(extent_tree_slab
, et
);
1181 atomic_dec(&eti
->total_ext_tree
);
1182 mutex_unlock(&eti
->extent_tree_lock
);
1184 F2FS_I(inode
)->extent_tree
[type
] = NULL
;
1186 trace_f2fs_destroy_extent_tree(inode
, node_cnt
, type
);
1189 void f2fs_destroy_extent_tree(struct inode
*inode
)
1191 __destroy_extent_tree(inode
, EX_READ
);
1192 __destroy_extent_tree(inode
, EX_BLOCK_AGE
);
1195 static void __init_extent_tree_info(struct extent_tree_info
*eti
)
1197 INIT_RADIX_TREE(&eti
->extent_tree_root
, GFP_NOIO
);
1198 mutex_init(&eti
->extent_tree_lock
);
1199 INIT_LIST_HEAD(&eti
->extent_list
);
1200 spin_lock_init(&eti
->extent_lock
);
1201 atomic_set(&eti
->total_ext_tree
, 0);
1202 INIT_LIST_HEAD(&eti
->zombie_list
);
1203 atomic_set(&eti
->total_zombie_tree
, 0);
1204 atomic_set(&eti
->total_ext_node
, 0);
1207 void f2fs_init_extent_cache_info(struct f2fs_sb_info
*sbi
)
1209 __init_extent_tree_info(&sbi
->extent_tree
[EX_READ
]);
1210 __init_extent_tree_info(&sbi
->extent_tree
[EX_BLOCK_AGE
]);
1212 /* initialize for block age extents */
1213 atomic64_set(&sbi
->allocated_data_blocks
, 0);
1214 sbi
->hot_data_age_threshold
= DEF_HOT_DATA_AGE_THRESHOLD
;
1215 sbi
->warm_data_age_threshold
= DEF_WARM_DATA_AGE_THRESHOLD
;
1216 sbi
->last_age_weight
= LAST_AGE_WEIGHT
;
1217 sbi
->max_read_extent_count
= DEF_MAX_READ_EXTENT_COUNT
;
1220 int __init
f2fs_create_extent_cache(void)
1222 extent_tree_slab
= f2fs_kmem_cache_create("f2fs_extent_tree",
1223 sizeof(struct extent_tree
));
1224 if (!extent_tree_slab
)
1226 extent_node_slab
= f2fs_kmem_cache_create("f2fs_extent_node",
1227 sizeof(struct extent_node
));
1228 if (!extent_node_slab
) {
1229 kmem_cache_destroy(extent_tree_slab
);
1235 void f2fs_destroy_extent_cache(void)
1237 kmem_cache_destroy(extent_node_slab
);
1238 kmem_cache_destroy(extent_tree_slab
);