1 // SPDX-License-Identifier: GPL-2.0
3 * fs/ext4/extents_status.c
5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
7 * Allison Henderson <achender@linux.vnet.ibm.com>
8 * Hugh Dickins <hughd@google.com>
9 * Zheng Liu <wenqing.lz@taobao.com>
11 * Ext4 extents status tree core functions.
13 #include <linux/list_sort.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
18 #include <trace/events/ext4.h>
21 * According to previous discussion in Ext4 Developer Workshop, we
22 * will introduce a new structure called io tree to track all extent
23 * status in order to solve some problems that we have met
24 * (e.g. Reservation space warning), and provide extent-level locking.
25 * Delay extent tree is the first step to achieve this goal. It is
26 * original built by Yongqiang Yang. At that time it is called delay
27 * extent tree, whose goal is only track delayed extents in memory to
28 * simplify the implementation of fiemap and bigalloc, and introduce
29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
30 * delay extent tree at the first commit. But for better understand
31 * what it does, it has been rename to extent status tree.
34 * Currently the first step has been done. All delayed extents are
35 * tracked in the tree. It maintains the delayed extent when a delayed
36 * allocation is issued, and the delayed extent is written out or
37 * invalidated. Therefore the implementation of fiemap and bigalloc
38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
40 * The following comment describes the implemenmtation of extent
41 * status tree and future works.
44 * In this step all extent status are tracked by extent status tree.
45 * Thus, we can first try to lookup a block mapping in this tree before
46 * finding it in extent tree. Hence, single extent cache can be removed
47 * because extent status tree can do a better job. Extents in status
48 * tree are loaded on-demand. Therefore, the extent status tree may not
49 * contain all of the extents in a file. Meanwhile we define a shrinker
50 * to reclaim memory from extent status tree because fragmented extent
51 * tree will make status tree cost too much memory. written/unwritten/-
52 * hole extents in the tree will be reclaimed by this shrinker when we
53 * are under high memory pressure. Delayed extents will not be
54 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
58 * Extent status tree implementation for ext4.
61 * ==========================================================================
62 * Extent status tree tracks all extent status.
64 * 1. Why we need to implement extent status tree?
66 * Without extent status tree, ext4 identifies a delayed extent by looking
67 * up page cache, this has several deficiencies - complicated, buggy,
68 * and inefficient code.
70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
71 * block or a range of blocks are belonged to a delayed extent.
73 * Let us have a look at how they do without extent status tree.
75 * FIEMAP looks up page cache to identify delayed allocations from holes.
78 * SEEK_HOLE/DATA has the same problem as FIEMAP.
81 * bigalloc looks up page cache to figure out if a block is
82 * already under delayed allocation or not to determine whether
83 * quota reserving is needed for the cluster.
86 * Writeout looks up whole page cache to see if a buffer is
87 * mapped, If there are not very many delayed buffers, then it is
90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
91 * bigalloc and writeout can figure out if a block or a range of
92 * blocks is under delayed allocation(belonged to a delayed extent) or
93 * not by searching the extent tree.
96 * ==========================================================================
97 * 2. Ext4 extent status tree impelmentation
100 * A extent is a range of blocks which are contiguous logically and
101 * physically. Unlike extent in extent tree, this extent in ext4 is
102 * a in-memory struct, there is no corresponding on-disk data. There
103 * is no limit on length of extent, so an extent can contain as many
104 * blocks as they are contiguous logically and physically.
106 * -- extent status tree
107 * Every inode has an extent status tree and all allocation blocks
108 * are added to the tree with different status. The extent in the
109 * tree are ordered by logical block no.
111 * -- operations on a extent status tree
112 * There are three important operations on a delayed extent tree: find
113 * next extent, adding a extent(a range of blocks) and removing a extent.
115 * -- race on a extent status tree
116 * Extent status tree is protected by inode->i_es_lock.
118 * -- memory consumption
119 * Fragmented extent tree will make extent status tree cost too much
120 * memory. Hence, we will reclaim written/unwritten/hole extents from
121 * the tree under a heavy memory pressure.
124 * ==========================================================================
125 * 3. Performance analysis
128 * 1. There is a cache extent for write access, so if writes are
129 * not very random, adding space operaions are in O(1) time.
132 * 2. Code is much simpler, more readable, more maintainable and
136 * ==========================================================================
139 * -- Refactor delayed space reservation
141 * -- Extent-level locking
144 static struct kmem_cache
*ext4_es_cachep
;
146 static int __es_insert_extent(struct inode
*inode
, struct extent_status
*newes
);
147 static int __es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
149 static int es_reclaim_extents(struct ext4_inode_info
*ei
, int *nr_to_scan
);
150 static int __es_shrink(struct ext4_sb_info
*sbi
, int nr_to_scan
,
151 struct ext4_inode_info
*locked_ei
);
153 int __init
ext4_init_es(void)
155 ext4_es_cachep
= kmem_cache_create("ext4_extent_status",
156 sizeof(struct extent_status
),
157 0, (SLAB_RECLAIM_ACCOUNT
), NULL
);
158 if (ext4_es_cachep
== NULL
)
163 void ext4_exit_es(void)
166 kmem_cache_destroy(ext4_es_cachep
);
169 void ext4_es_init_tree(struct ext4_es_tree
*tree
)
171 tree
->root
= RB_ROOT
;
172 tree
->cache_es
= NULL
;
176 static void ext4_es_print_tree(struct inode
*inode
)
178 struct ext4_es_tree
*tree
;
179 struct rb_node
*node
;
181 printk(KERN_DEBUG
"status extents for inode %lu:", inode
->i_ino
);
182 tree
= &EXT4_I(inode
)->i_es_tree
;
183 node
= rb_first(&tree
->root
);
185 struct extent_status
*es
;
186 es
= rb_entry(node
, struct extent_status
, rb_node
);
187 printk(KERN_DEBUG
" [%u/%u) %llu %x",
188 es
->es_lblk
, es
->es_len
,
189 ext4_es_pblock(es
), ext4_es_status(es
));
190 node
= rb_next(node
);
192 printk(KERN_DEBUG
"\n");
195 #define ext4_es_print_tree(inode)
198 static inline ext4_lblk_t
ext4_es_end(struct extent_status
*es
)
200 BUG_ON(es
->es_lblk
+ es
->es_len
< es
->es_lblk
);
201 return es
->es_lblk
+ es
->es_len
- 1;
205 * search through the tree for an delayed extent with a given offset. If
206 * it can't be found, try to find next extent.
208 static struct extent_status
*__es_tree_search(struct rb_root
*root
,
211 struct rb_node
*node
= root
->rb_node
;
212 struct extent_status
*es
= NULL
;
215 es
= rb_entry(node
, struct extent_status
, rb_node
);
216 if (lblk
< es
->es_lblk
)
217 node
= node
->rb_left
;
218 else if (lblk
> ext4_es_end(es
))
219 node
= node
->rb_right
;
224 if (es
&& lblk
< es
->es_lblk
)
227 if (es
&& lblk
> ext4_es_end(es
)) {
228 node
= rb_next(&es
->rb_node
);
229 return node
? rb_entry(node
, struct extent_status
, rb_node
) :
237 * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
238 * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
240 * @inode: the inode which owns delayed extents
241 * @lblk: the offset where we start to search
242 * @end: the offset where we stop to search
243 * @es: delayed extent that we found
245 void ext4_es_find_delayed_extent_range(struct inode
*inode
,
246 ext4_lblk_t lblk
, ext4_lblk_t end
,
247 struct extent_status
*es
)
249 struct ext4_es_tree
*tree
= NULL
;
250 struct extent_status
*es1
= NULL
;
251 struct rb_node
*node
;
255 trace_ext4_es_find_delayed_extent_range_enter(inode
, lblk
);
257 read_lock(&EXT4_I(inode
)->i_es_lock
);
258 tree
= &EXT4_I(inode
)->i_es_tree
;
260 /* find extent in cache firstly */
261 es
->es_lblk
= es
->es_len
= es
->es_pblk
= 0;
262 if (tree
->cache_es
) {
263 es1
= tree
->cache_es
;
264 if (in_range(lblk
, es1
->es_lblk
, es1
->es_len
)) {
265 es_debug("%u cached by [%u/%u) %llu %x\n",
266 lblk
, es1
->es_lblk
, es1
->es_len
,
267 ext4_es_pblock(es1
), ext4_es_status(es1
));
272 es1
= __es_tree_search(&tree
->root
, lblk
);
275 if (es1
&& !ext4_es_is_delayed(es1
)) {
276 while ((node
= rb_next(&es1
->rb_node
)) != NULL
) {
277 es1
= rb_entry(node
, struct extent_status
, rb_node
);
278 if (es1
->es_lblk
> end
) {
282 if (ext4_es_is_delayed(es1
))
287 if (es1
&& ext4_es_is_delayed(es1
)) {
288 tree
->cache_es
= es1
;
289 es
->es_lblk
= es1
->es_lblk
;
290 es
->es_len
= es1
->es_len
;
291 es
->es_pblk
= es1
->es_pblk
;
294 read_unlock(&EXT4_I(inode
)->i_es_lock
);
296 trace_ext4_es_find_delayed_extent_range_exit(inode
, es
);
299 static void ext4_es_list_add(struct inode
*inode
)
301 struct ext4_inode_info
*ei
= EXT4_I(inode
);
302 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
304 if (!list_empty(&ei
->i_es_list
))
307 spin_lock(&sbi
->s_es_lock
);
308 if (list_empty(&ei
->i_es_list
)) {
309 list_add_tail(&ei
->i_es_list
, &sbi
->s_es_list
);
310 sbi
->s_es_nr_inode
++;
312 spin_unlock(&sbi
->s_es_lock
);
315 static void ext4_es_list_del(struct inode
*inode
)
317 struct ext4_inode_info
*ei
= EXT4_I(inode
);
318 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
320 spin_lock(&sbi
->s_es_lock
);
321 if (!list_empty(&ei
->i_es_list
)) {
322 list_del_init(&ei
->i_es_list
);
323 sbi
->s_es_nr_inode
--;
324 WARN_ON_ONCE(sbi
->s_es_nr_inode
< 0);
326 spin_unlock(&sbi
->s_es_lock
);
329 static struct extent_status
*
330 ext4_es_alloc_extent(struct inode
*inode
, ext4_lblk_t lblk
, ext4_lblk_t len
,
333 struct extent_status
*es
;
334 es
= kmem_cache_alloc(ext4_es_cachep
, GFP_ATOMIC
);
342 * We don't count delayed extent because we never try to reclaim them
344 if (!ext4_es_is_delayed(es
)) {
345 if (!EXT4_I(inode
)->i_es_shk_nr
++)
346 ext4_es_list_add(inode
);
347 percpu_counter_inc(&EXT4_SB(inode
->i_sb
)->
348 s_es_stats
.es_stats_shk_cnt
);
351 EXT4_I(inode
)->i_es_all_nr
++;
352 percpu_counter_inc(&EXT4_SB(inode
->i_sb
)->s_es_stats
.es_stats_all_cnt
);
357 static void ext4_es_free_extent(struct inode
*inode
, struct extent_status
*es
)
359 EXT4_I(inode
)->i_es_all_nr
--;
360 percpu_counter_dec(&EXT4_SB(inode
->i_sb
)->s_es_stats
.es_stats_all_cnt
);
362 /* Decrease the shrink counter when this es is not delayed */
363 if (!ext4_es_is_delayed(es
)) {
364 BUG_ON(EXT4_I(inode
)->i_es_shk_nr
== 0);
365 if (!--EXT4_I(inode
)->i_es_shk_nr
)
366 ext4_es_list_del(inode
);
367 percpu_counter_dec(&EXT4_SB(inode
->i_sb
)->
368 s_es_stats
.es_stats_shk_cnt
);
371 kmem_cache_free(ext4_es_cachep
, es
);
375 * Check whether or not two extents can be merged
377 * - logical block number is contiguous
378 * - physical block number is contiguous
381 static int ext4_es_can_be_merged(struct extent_status
*es1
,
382 struct extent_status
*es2
)
384 if (ext4_es_type(es1
) != ext4_es_type(es2
))
387 if (((__u64
) es1
->es_len
) + es2
->es_len
> EXT_MAX_BLOCKS
) {
388 pr_warn("ES assertion failed when merging extents. "
389 "The sum of lengths of es1 (%d) and es2 (%d) "
390 "is bigger than allowed file size (%d)\n",
391 es1
->es_len
, es2
->es_len
, EXT_MAX_BLOCKS
);
396 if (((__u64
) es1
->es_lblk
) + es1
->es_len
!= es2
->es_lblk
)
399 if ((ext4_es_is_written(es1
) || ext4_es_is_unwritten(es1
)) &&
400 (ext4_es_pblock(es1
) + es1
->es_len
== ext4_es_pblock(es2
)))
403 if (ext4_es_is_hole(es1
))
406 /* we need to check delayed extent is without unwritten status */
407 if (ext4_es_is_delayed(es1
) && !ext4_es_is_unwritten(es1
))
413 static struct extent_status
*
414 ext4_es_try_to_merge_left(struct inode
*inode
, struct extent_status
*es
)
416 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
417 struct extent_status
*es1
;
418 struct rb_node
*node
;
420 node
= rb_prev(&es
->rb_node
);
424 es1
= rb_entry(node
, struct extent_status
, rb_node
);
425 if (ext4_es_can_be_merged(es1
, es
)) {
426 es1
->es_len
+= es
->es_len
;
427 if (ext4_es_is_referenced(es
))
428 ext4_es_set_referenced(es1
);
429 rb_erase(&es
->rb_node
, &tree
->root
);
430 ext4_es_free_extent(inode
, es
);
437 static struct extent_status
*
438 ext4_es_try_to_merge_right(struct inode
*inode
, struct extent_status
*es
)
440 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
441 struct extent_status
*es1
;
442 struct rb_node
*node
;
444 node
= rb_next(&es
->rb_node
);
448 es1
= rb_entry(node
, struct extent_status
, rb_node
);
449 if (ext4_es_can_be_merged(es
, es1
)) {
450 es
->es_len
+= es1
->es_len
;
451 if (ext4_es_is_referenced(es1
))
452 ext4_es_set_referenced(es
);
453 rb_erase(node
, &tree
->root
);
454 ext4_es_free_extent(inode
, es1
);
460 #ifdef ES_AGGRESSIVE_TEST
461 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
463 static void ext4_es_insert_extent_ext_check(struct inode
*inode
,
464 struct extent_status
*es
)
466 struct ext4_ext_path
*path
= NULL
;
467 struct ext4_extent
*ex
;
468 ext4_lblk_t ee_block
;
469 ext4_fsblk_t ee_start
;
470 unsigned short ee_len
;
471 int depth
, ee_status
, es_status
;
473 path
= ext4_find_extent(inode
, es
->es_lblk
, NULL
, EXT4_EX_NOCACHE
);
477 depth
= ext_depth(inode
);
478 ex
= path
[depth
].p_ext
;
482 ee_block
= le32_to_cpu(ex
->ee_block
);
483 ee_start
= ext4_ext_pblock(ex
);
484 ee_len
= ext4_ext_get_actual_len(ex
);
486 ee_status
= ext4_ext_is_unwritten(ex
) ? 1 : 0;
487 es_status
= ext4_es_is_unwritten(es
) ? 1 : 0;
490 * Make sure ex and es are not overlap when we try to insert
491 * a delayed/hole extent.
493 if (!ext4_es_is_written(es
) && !ext4_es_is_unwritten(es
)) {
494 if (in_range(es
->es_lblk
, ee_block
, ee_len
)) {
495 pr_warn("ES insert assertion failed for "
496 "inode: %lu we can find an extent "
497 "at block [%d/%d/%llu/%c], but we "
498 "want to add a delayed/hole extent "
500 inode
->i_ino
, ee_block
, ee_len
,
501 ee_start
, ee_status
? 'u' : 'w',
502 es
->es_lblk
, es
->es_len
,
503 ext4_es_pblock(es
), ext4_es_status(es
));
509 * We don't check ee_block == es->es_lblk, etc. because es
510 * might be a part of whole extent, vice versa.
512 if (es
->es_lblk
< ee_block
||
513 ext4_es_pblock(es
) != ee_start
+ es
->es_lblk
- ee_block
) {
514 pr_warn("ES insert assertion failed for inode: %lu "
515 "ex_status [%d/%d/%llu/%c] != "
516 "es_status [%d/%d/%llu/%c]\n", inode
->i_ino
,
517 ee_block
, ee_len
, ee_start
,
518 ee_status
? 'u' : 'w', es
->es_lblk
, es
->es_len
,
519 ext4_es_pblock(es
), es_status
? 'u' : 'w');
523 if (ee_status
^ es_status
) {
524 pr_warn("ES insert assertion failed for inode: %lu "
525 "ex_status [%d/%d/%llu/%c] != "
526 "es_status [%d/%d/%llu/%c]\n", inode
->i_ino
,
527 ee_block
, ee_len
, ee_start
,
528 ee_status
? 'u' : 'w', es
->es_lblk
, es
->es_len
,
529 ext4_es_pblock(es
), es_status
? 'u' : 'w');
533 * We can't find an extent on disk. So we need to make sure
534 * that we don't want to add an written/unwritten extent.
536 if (!ext4_es_is_delayed(es
) && !ext4_es_is_hole(es
)) {
537 pr_warn("ES insert assertion failed for inode: %lu "
538 "can't find an extent at block %d but we want "
539 "to add a written/unwritten extent "
540 "[%d/%d/%llu/%x]\n", inode
->i_ino
,
541 es
->es_lblk
, es
->es_lblk
, es
->es_len
,
542 ext4_es_pblock(es
), ext4_es_status(es
));
546 ext4_ext_drop_refs(path
);
550 static void ext4_es_insert_extent_ind_check(struct inode
*inode
,
551 struct extent_status
*es
)
553 struct ext4_map_blocks map
;
557 * Here we call ext4_ind_map_blocks to lookup a block mapping because
558 * 'Indirect' structure is defined in indirect.c. So we couldn't
559 * access direct/indirect tree from outside. It is too dirty to define
560 * this function in indirect.c file.
563 map
.m_lblk
= es
->es_lblk
;
564 map
.m_len
= es
->es_len
;
566 retval
= ext4_ind_map_blocks(NULL
, inode
, &map
, 0);
568 if (ext4_es_is_delayed(es
) || ext4_es_is_hole(es
)) {
570 * We want to add a delayed/hole extent but this
571 * block has been allocated.
573 pr_warn("ES insert assertion failed for inode: %lu "
574 "We can find blocks but we want to add a "
575 "delayed/hole extent [%d/%d/%llu/%x]\n",
576 inode
->i_ino
, es
->es_lblk
, es
->es_len
,
577 ext4_es_pblock(es
), ext4_es_status(es
));
579 } else if (ext4_es_is_written(es
)) {
580 if (retval
!= es
->es_len
) {
581 pr_warn("ES insert assertion failed for "
582 "inode: %lu retval %d != es_len %d\n",
583 inode
->i_ino
, retval
, es
->es_len
);
586 if (map
.m_pblk
!= ext4_es_pblock(es
)) {
587 pr_warn("ES insert assertion failed for "
588 "inode: %lu m_pblk %llu != "
590 inode
->i_ino
, map
.m_pblk
,
596 * We don't need to check unwritten extent because
597 * indirect-based file doesn't have it.
601 } else if (retval
== 0) {
602 if (ext4_es_is_written(es
)) {
603 pr_warn("ES insert assertion failed for inode: %lu "
604 "We can't find the block but we want to add "
605 "a written extent [%d/%d/%llu/%x]\n",
606 inode
->i_ino
, es
->es_lblk
, es
->es_len
,
607 ext4_es_pblock(es
), ext4_es_status(es
));
613 static inline void ext4_es_insert_extent_check(struct inode
*inode
,
614 struct extent_status
*es
)
617 * We don't need to worry about the race condition because
618 * caller takes i_data_sem locking.
620 BUG_ON(!rwsem_is_locked(&EXT4_I(inode
)->i_data_sem
));
621 if (ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))
622 ext4_es_insert_extent_ext_check(inode
, es
);
624 ext4_es_insert_extent_ind_check(inode
, es
);
627 static inline void ext4_es_insert_extent_check(struct inode
*inode
,
628 struct extent_status
*es
)
633 static int __es_insert_extent(struct inode
*inode
, struct extent_status
*newes
)
635 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
636 struct rb_node
**p
= &tree
->root
.rb_node
;
637 struct rb_node
*parent
= NULL
;
638 struct extent_status
*es
;
642 es
= rb_entry(parent
, struct extent_status
, rb_node
);
644 if (newes
->es_lblk
< es
->es_lblk
) {
645 if (ext4_es_can_be_merged(newes
, es
)) {
647 * Here we can modify es_lblk directly
648 * because it isn't overlapped.
650 es
->es_lblk
= newes
->es_lblk
;
651 es
->es_len
+= newes
->es_len
;
652 if (ext4_es_is_written(es
) ||
653 ext4_es_is_unwritten(es
))
654 ext4_es_store_pblock(es
,
656 es
= ext4_es_try_to_merge_left(inode
, es
);
660 } else if (newes
->es_lblk
> ext4_es_end(es
)) {
661 if (ext4_es_can_be_merged(es
, newes
)) {
662 es
->es_len
+= newes
->es_len
;
663 es
= ext4_es_try_to_merge_right(inode
, es
);
673 es
= ext4_es_alloc_extent(inode
, newes
->es_lblk
, newes
->es_len
,
677 rb_link_node(&es
->rb_node
, parent
, p
);
678 rb_insert_color(&es
->rb_node
, &tree
->root
);
686 * ext4_es_insert_extent() adds information to an inode's extent
689 * Return 0 on success, error code on failure.
691 int ext4_es_insert_extent(struct inode
*inode
, ext4_lblk_t lblk
,
692 ext4_lblk_t len
, ext4_fsblk_t pblk
,
695 struct extent_status newes
;
696 ext4_lblk_t end
= lblk
+ len
- 1;
699 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
700 lblk
, len
, pblk
, status
, inode
->i_ino
);
707 if ((status
& EXTENT_STATUS_DELAYED
) &&
708 (status
& EXTENT_STATUS_WRITTEN
)) {
709 ext4_warning(inode
->i_sb
, "Inserting extent [%u/%u] as "
710 " delayed and written which can potentially "
711 " cause data loss.", lblk
, len
);
715 newes
.es_lblk
= lblk
;
717 ext4_es_store_pblock_status(&newes
, pblk
, status
);
718 trace_ext4_es_insert_extent(inode
, &newes
);
720 ext4_es_insert_extent_check(inode
, &newes
);
722 write_lock(&EXT4_I(inode
)->i_es_lock
);
723 err
= __es_remove_extent(inode
, lblk
, end
);
727 err
= __es_insert_extent(inode
, &newes
);
728 if (err
== -ENOMEM
&& __es_shrink(EXT4_SB(inode
->i_sb
),
731 if (err
== -ENOMEM
&& !ext4_es_is_delayed(&newes
))
735 write_unlock(&EXT4_I(inode
)->i_es_lock
);
737 ext4_es_print_tree(inode
);
743 * ext4_es_cache_extent() inserts information into the extent status
744 * tree if and only if there isn't information about the range in
747 void ext4_es_cache_extent(struct inode
*inode
, ext4_lblk_t lblk
,
748 ext4_lblk_t len
, ext4_fsblk_t pblk
,
751 struct extent_status
*es
;
752 struct extent_status newes
;
753 ext4_lblk_t end
= lblk
+ len
- 1;
755 newes
.es_lblk
= lblk
;
757 ext4_es_store_pblock_status(&newes
, pblk
, status
);
758 trace_ext4_es_cache_extent(inode
, &newes
);
765 write_lock(&EXT4_I(inode
)->i_es_lock
);
767 es
= __es_tree_search(&EXT4_I(inode
)->i_es_tree
.root
, lblk
);
768 if (!es
|| es
->es_lblk
> end
)
769 __es_insert_extent(inode
, &newes
);
770 write_unlock(&EXT4_I(inode
)->i_es_lock
);
774 * ext4_es_lookup_extent() looks up an extent in extent status tree.
776 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
778 * Return: 1 on found, 0 on not
780 int ext4_es_lookup_extent(struct inode
*inode
, ext4_lblk_t lblk
,
781 struct extent_status
*es
)
783 struct ext4_es_tree
*tree
;
784 struct ext4_es_stats
*stats
;
785 struct extent_status
*es1
= NULL
;
786 struct rb_node
*node
;
789 trace_ext4_es_lookup_extent_enter(inode
, lblk
);
790 es_debug("lookup extent in block %u\n", lblk
);
792 tree
= &EXT4_I(inode
)->i_es_tree
;
793 read_lock(&EXT4_I(inode
)->i_es_lock
);
795 /* find extent in cache firstly */
796 es
->es_lblk
= es
->es_len
= es
->es_pblk
= 0;
797 if (tree
->cache_es
) {
798 es1
= tree
->cache_es
;
799 if (in_range(lblk
, es1
->es_lblk
, es1
->es_len
)) {
800 es_debug("%u cached by [%u/%u)\n",
801 lblk
, es1
->es_lblk
, es1
->es_len
);
807 node
= tree
->root
.rb_node
;
809 es1
= rb_entry(node
, struct extent_status
, rb_node
);
810 if (lblk
< es1
->es_lblk
)
811 node
= node
->rb_left
;
812 else if (lblk
> ext4_es_end(es1
))
813 node
= node
->rb_right
;
821 stats
= &EXT4_SB(inode
->i_sb
)->s_es_stats
;
824 es
->es_lblk
= es1
->es_lblk
;
825 es
->es_len
= es1
->es_len
;
826 es
->es_pblk
= es1
->es_pblk
;
827 if (!ext4_es_is_referenced(es1
))
828 ext4_es_set_referenced(es1
);
829 stats
->es_stats_cache_hits
++;
831 stats
->es_stats_cache_misses
++;
834 read_unlock(&EXT4_I(inode
)->i_es_lock
);
836 trace_ext4_es_lookup_extent_exit(inode
, es
, found
);
840 static int __es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
843 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
844 struct rb_node
*node
;
845 struct extent_status
*es
;
846 struct extent_status orig_es
;
847 ext4_lblk_t len1
, len2
;
853 es
= __es_tree_search(&tree
->root
, lblk
);
856 if (es
->es_lblk
> end
)
859 /* Simply invalidate cache_es. */
860 tree
->cache_es
= NULL
;
862 orig_es
.es_lblk
= es
->es_lblk
;
863 orig_es
.es_len
= es
->es_len
;
864 orig_es
.es_pblk
= es
->es_pblk
;
866 len1
= lblk
> es
->es_lblk
? lblk
- es
->es_lblk
: 0;
867 len2
= ext4_es_end(es
) > end
? ext4_es_end(es
) - end
: 0;
872 struct extent_status newes
;
874 newes
.es_lblk
= end
+ 1;
876 block
= 0x7FDEADBEEFULL
;
877 if (ext4_es_is_written(&orig_es
) ||
878 ext4_es_is_unwritten(&orig_es
))
879 block
= ext4_es_pblock(&orig_es
) +
880 orig_es
.es_len
- len2
;
881 ext4_es_store_pblock_status(&newes
, block
,
882 ext4_es_status(&orig_es
));
883 err
= __es_insert_extent(inode
, &newes
);
885 es
->es_lblk
= orig_es
.es_lblk
;
886 es
->es_len
= orig_es
.es_len
;
887 if ((err
== -ENOMEM
) &&
888 __es_shrink(EXT4_SB(inode
->i_sb
),
894 es
->es_lblk
= end
+ 1;
896 if (ext4_es_is_written(es
) ||
897 ext4_es_is_unwritten(es
)) {
898 block
= orig_es
.es_pblk
+ orig_es
.es_len
- len2
;
899 ext4_es_store_pblock(es
, block
);
906 node
= rb_next(&es
->rb_node
);
908 es
= rb_entry(node
, struct extent_status
, rb_node
);
913 while (es
&& ext4_es_end(es
) <= end
) {
914 node
= rb_next(&es
->rb_node
);
915 rb_erase(&es
->rb_node
, &tree
->root
);
916 ext4_es_free_extent(inode
, es
);
921 es
= rb_entry(node
, struct extent_status
, rb_node
);
924 if (es
&& es
->es_lblk
< end
+ 1) {
925 ext4_lblk_t orig_len
= es
->es_len
;
927 len1
= ext4_es_end(es
) - end
;
928 es
->es_lblk
= end
+ 1;
930 if (ext4_es_is_written(es
) || ext4_es_is_unwritten(es
)) {
931 block
= es
->es_pblk
+ orig_len
- len1
;
932 ext4_es_store_pblock(es
, block
);
941 * ext4_es_remove_extent() removes a space from a extent status tree.
943 * Return 0 on success, error code on failure.
945 int ext4_es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
951 trace_ext4_es_remove_extent(inode
, lblk
, len
);
952 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
953 lblk
, len
, inode
->i_ino
);
958 end
= lblk
+ len
- 1;
962 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
963 * so that we are sure __es_shrink() is done with the inode before it
966 write_lock(&EXT4_I(inode
)->i_es_lock
);
967 err
= __es_remove_extent(inode
, lblk
, end
);
968 write_unlock(&EXT4_I(inode
)->i_es_lock
);
969 ext4_es_print_tree(inode
);
973 static int __es_shrink(struct ext4_sb_info
*sbi
, int nr_to_scan
,
974 struct ext4_inode_info
*locked_ei
)
976 struct ext4_inode_info
*ei
;
977 struct ext4_es_stats
*es_stats
;
982 int retried
= 0, nr_skipped
= 0;
984 es_stats
= &sbi
->s_es_stats
;
985 start_time
= ktime_get();
988 spin_lock(&sbi
->s_es_lock
);
989 nr_to_walk
= sbi
->s_es_nr_inode
;
990 while (nr_to_walk
-- > 0) {
991 if (list_empty(&sbi
->s_es_list
)) {
992 spin_unlock(&sbi
->s_es_lock
);
995 ei
= list_first_entry(&sbi
->s_es_list
, struct ext4_inode_info
,
997 /* Move the inode to the tail */
998 list_move_tail(&ei
->i_es_list
, &sbi
->s_es_list
);
1001 * Normally we try hard to avoid shrinking precached inodes,
1002 * but we will as a last resort.
1004 if (!retried
&& ext4_test_inode_state(&ei
->vfs_inode
,
1005 EXT4_STATE_EXT_PRECACHED
)) {
1010 if (ei
== locked_ei
|| !write_trylock(&ei
->i_es_lock
)) {
1015 * Now we hold i_es_lock which protects us from inode reclaim
1016 * freeing inode under us
1018 spin_unlock(&sbi
->s_es_lock
);
1020 nr_shrunk
+= es_reclaim_extents(ei
, &nr_to_scan
);
1021 write_unlock(&ei
->i_es_lock
);
1023 if (nr_to_scan
<= 0)
1025 spin_lock(&sbi
->s_es_lock
);
1027 spin_unlock(&sbi
->s_es_lock
);
1030 * If we skipped any inodes, and we weren't able to make any
1031 * forward progress, try again to scan precached inodes.
1033 if ((nr_shrunk
== 0) && nr_skipped
&& !retried
) {
1038 if (locked_ei
&& nr_shrunk
== 0)
1039 nr_shrunk
= es_reclaim_extents(locked_ei
, &nr_to_scan
);
1042 scan_time
= ktime_to_ns(ktime_sub(ktime_get(), start_time
));
1043 if (likely(es_stats
->es_stats_scan_time
))
1044 es_stats
->es_stats_scan_time
= (scan_time
+
1045 es_stats
->es_stats_scan_time
*3) / 4;
1047 es_stats
->es_stats_scan_time
= scan_time
;
1048 if (scan_time
> es_stats
->es_stats_max_scan_time
)
1049 es_stats
->es_stats_max_scan_time
= scan_time
;
1050 if (likely(es_stats
->es_stats_shrunk
))
1051 es_stats
->es_stats_shrunk
= (nr_shrunk
+
1052 es_stats
->es_stats_shrunk
*3) / 4;
1054 es_stats
->es_stats_shrunk
= nr_shrunk
;
1056 trace_ext4_es_shrink(sbi
->s_sb
, nr_shrunk
, scan_time
,
1057 nr_skipped
, retried
);
1061 static unsigned long ext4_es_count(struct shrinker
*shrink
,
1062 struct shrink_control
*sc
)
1065 struct ext4_sb_info
*sbi
;
1067 sbi
= container_of(shrink
, struct ext4_sb_info
, s_es_shrinker
);
1068 nr
= percpu_counter_read_positive(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1069 trace_ext4_es_shrink_count(sbi
->s_sb
, sc
->nr_to_scan
, nr
);
1073 static unsigned long ext4_es_scan(struct shrinker
*shrink
,
1074 struct shrink_control
*sc
)
1076 struct ext4_sb_info
*sbi
= container_of(shrink
,
1077 struct ext4_sb_info
, s_es_shrinker
);
1078 int nr_to_scan
= sc
->nr_to_scan
;
1081 ret
= percpu_counter_read_positive(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1082 trace_ext4_es_shrink_scan_enter(sbi
->s_sb
, nr_to_scan
, ret
);
1087 nr_shrunk
= __es_shrink(sbi
, nr_to_scan
, NULL
);
1089 trace_ext4_es_shrink_scan_exit(sbi
->s_sb
, nr_shrunk
, ret
);
1093 int ext4_seq_es_shrinker_info_show(struct seq_file
*seq
, void *v
)
1095 struct ext4_sb_info
*sbi
= EXT4_SB((struct super_block
*) seq
->private);
1096 struct ext4_es_stats
*es_stats
= &sbi
->s_es_stats
;
1097 struct ext4_inode_info
*ei
, *max
= NULL
;
1098 unsigned int inode_cnt
= 0;
1100 if (v
!= SEQ_START_TOKEN
)
1103 /* here we just find an inode that has the max nr. of objects */
1104 spin_lock(&sbi
->s_es_lock
);
1105 list_for_each_entry(ei
, &sbi
->s_es_list
, i_es_list
) {
1107 if (max
&& max
->i_es_all_nr
< ei
->i_es_all_nr
)
1112 spin_unlock(&sbi
->s_es_lock
);
1114 seq_printf(seq
, "stats:\n %lld objects\n %lld reclaimable objects\n",
1115 percpu_counter_sum_positive(&es_stats
->es_stats_all_cnt
),
1116 percpu_counter_sum_positive(&es_stats
->es_stats_shk_cnt
));
1117 seq_printf(seq
, " %lu/%lu cache hits/misses\n",
1118 es_stats
->es_stats_cache_hits
,
1119 es_stats
->es_stats_cache_misses
);
1121 seq_printf(seq
, " %d inodes on list\n", inode_cnt
);
1123 seq_printf(seq
, "average:\n %llu us scan time\n",
1124 div_u64(es_stats
->es_stats_scan_time
, 1000));
1125 seq_printf(seq
, " %lu shrunk objects\n", es_stats
->es_stats_shrunk
);
1128 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1129 " %llu us max scan time\n",
1130 max
->vfs_inode
.i_ino
, max
->i_es_all_nr
, max
->i_es_shk_nr
,
1131 div_u64(es_stats
->es_stats_max_scan_time
, 1000));
1136 int ext4_es_register_shrinker(struct ext4_sb_info
*sbi
)
1140 /* Make sure we have enough bits for physical block number */
1141 BUILD_BUG_ON(ES_SHIFT
< 48);
1142 INIT_LIST_HEAD(&sbi
->s_es_list
);
1143 sbi
->s_es_nr_inode
= 0;
1144 spin_lock_init(&sbi
->s_es_lock
);
1145 sbi
->s_es_stats
.es_stats_shrunk
= 0;
1146 sbi
->s_es_stats
.es_stats_cache_hits
= 0;
1147 sbi
->s_es_stats
.es_stats_cache_misses
= 0;
1148 sbi
->s_es_stats
.es_stats_scan_time
= 0;
1149 sbi
->s_es_stats
.es_stats_max_scan_time
= 0;
1150 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_all_cnt
, 0, GFP_KERNEL
);
1153 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_shk_cnt
, 0, GFP_KERNEL
);
1157 sbi
->s_es_shrinker
.scan_objects
= ext4_es_scan
;
1158 sbi
->s_es_shrinker
.count_objects
= ext4_es_count
;
1159 sbi
->s_es_shrinker
.seeks
= DEFAULT_SEEKS
;
1160 err
= register_shrinker(&sbi
->s_es_shrinker
);
1167 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1169 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_all_cnt
);
1173 void ext4_es_unregister_shrinker(struct ext4_sb_info
*sbi
)
1175 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_all_cnt
);
1176 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1177 unregister_shrinker(&sbi
->s_es_shrinker
);
1181 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
1182 * most *nr_to_scan extents, update *nr_to_scan accordingly.
1184 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
1185 * Increment *nr_shrunk by the number of reclaimed extents. Also update
1186 * ei->i_es_shrink_lblk to where we should continue scanning.
1188 static int es_do_reclaim_extents(struct ext4_inode_info
*ei
, ext4_lblk_t end
,
1189 int *nr_to_scan
, int *nr_shrunk
)
1191 struct inode
*inode
= &ei
->vfs_inode
;
1192 struct ext4_es_tree
*tree
= &ei
->i_es_tree
;
1193 struct extent_status
*es
;
1194 struct rb_node
*node
;
1196 es
= __es_tree_search(&tree
->root
, ei
->i_es_shrink_lblk
);
1199 node
= &es
->rb_node
;
1200 while (*nr_to_scan
> 0) {
1201 if (es
->es_lblk
> end
) {
1202 ei
->i_es_shrink_lblk
= end
+ 1;
1207 node
= rb_next(&es
->rb_node
);
1209 * We can't reclaim delayed extent from status tree because
1210 * fiemap, bigallic, and seek_data/hole need to use it.
1212 if (ext4_es_is_delayed(es
))
1214 if (ext4_es_is_referenced(es
)) {
1215 ext4_es_clear_referenced(es
);
1219 rb_erase(&es
->rb_node
, &tree
->root
);
1220 ext4_es_free_extent(inode
, es
);
1225 es
= rb_entry(node
, struct extent_status
, rb_node
);
1227 ei
->i_es_shrink_lblk
= es
->es_lblk
;
1230 ei
->i_es_shrink_lblk
= 0;
1234 static int es_reclaim_extents(struct ext4_inode_info
*ei
, int *nr_to_scan
)
1236 struct inode
*inode
= &ei
->vfs_inode
;
1238 ext4_lblk_t start
= ei
->i_es_shrink_lblk
;
1239 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
1240 DEFAULT_RATELIMIT_BURST
);
1242 if (ei
->i_es_shk_nr
== 0)
1245 if (ext4_test_inode_state(inode
, EXT4_STATE_EXT_PRECACHED
) &&
1247 ext4_warning(inode
->i_sb
, "forced shrink of precached extents");
1249 if (!es_do_reclaim_extents(ei
, EXT_MAX_BLOCKS
, nr_to_scan
, &nr_shrunk
) &&
1251 es_do_reclaim_extents(ei
, start
- 1, nr_to_scan
, &nr_shrunk
);
1253 ei
->i_es_tree
.cache_es
= NULL
;