Merge tag 'for-5.8/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[linux/fpc-iii.git] / fs / ext4 / extents_status.c
blobe751715353756d6ad2595641490bb36eca86db9d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/ext4/extents_status.c
5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
6 * Modified by
7 * Allison Henderson <achender@linux.vnet.ibm.com>
8 * Hugh Dickins <hughd@google.com>
9 * Zheng Liu <wenqing.lz@taobao.com>
11 * Ext4 extents status tree core functions.
13 #include <linux/list_sort.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
16 #include "ext4.h"
18 #include <trace/events/ext4.h>
21 * According to previous discussion in Ext4 Developer Workshop, we
22 * will introduce a new structure called io tree to track all extent
23 * status in order to solve some problems that we have met
24 * (e.g. Reservation space warning), and provide extent-level locking.
25 * Delay extent tree is the first step to achieve this goal. It is
26 * original built by Yongqiang Yang. At that time it is called delay
27 * extent tree, whose goal is only track delayed extents in memory to
28 * simplify the implementation of fiemap and bigalloc, and introduce
29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
30 * delay extent tree at the first commit. But for better understand
31 * what it does, it has been rename to extent status tree.
33 * Step1:
34 * Currently the first step has been done. All delayed extents are
35 * tracked in the tree. It maintains the delayed extent when a delayed
36 * allocation is issued, and the delayed extent is written out or
37 * invalidated. Therefore the implementation of fiemap and bigalloc
38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
40 * The following comment describes the implemenmtation of extent
41 * status tree and future works.
43 * Step2:
44 * In this step all extent status are tracked by extent status tree.
45 * Thus, we can first try to lookup a block mapping in this tree before
46 * finding it in extent tree. Hence, single extent cache can be removed
47 * because extent status tree can do a better job. Extents in status
48 * tree are loaded on-demand. Therefore, the extent status tree may not
49 * contain all of the extents in a file. Meanwhile we define a shrinker
50 * to reclaim memory from extent status tree because fragmented extent
51 * tree will make status tree cost too much memory. written/unwritten/-
52 * hole extents in the tree will be reclaimed by this shrinker when we
53 * are under high memory pressure. Delayed extents will not be
54 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
58 * Extent status tree implementation for ext4.
61 * ==========================================================================
62 * Extent status tree tracks all extent status.
64 * 1. Why we need to implement extent status tree?
66 * Without extent status tree, ext4 identifies a delayed extent by looking
67 * up page cache, this has several deficiencies - complicated, buggy,
68 * and inefficient code.
70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
71 * block or a range of blocks are belonged to a delayed extent.
73 * Let us have a look at how they do without extent status tree.
74 * -- FIEMAP
75 * FIEMAP looks up page cache to identify delayed allocations from holes.
77 * -- SEEK_HOLE/DATA
78 * SEEK_HOLE/DATA has the same problem as FIEMAP.
80 * -- bigalloc
81 * bigalloc looks up page cache to figure out if a block is
82 * already under delayed allocation or not to determine whether
83 * quota reserving is needed for the cluster.
85 * -- writeout
86 * Writeout looks up whole page cache to see if a buffer is
87 * mapped, If there are not very many delayed buffers, then it is
88 * time consuming.
90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
91 * bigalloc and writeout can figure out if a block or a range of
92 * blocks is under delayed allocation(belonged to a delayed extent) or
93 * not by searching the extent tree.
96 * ==========================================================================
97 * 2. Ext4 extent status tree impelmentation
99 * -- extent
100 * A extent is a range of blocks which are contiguous logically and
101 * physically. Unlike extent in extent tree, this extent in ext4 is
102 * a in-memory struct, there is no corresponding on-disk data. There
103 * is no limit on length of extent, so an extent can contain as many
104 * blocks as they are contiguous logically and physically.
106 * -- extent status tree
107 * Every inode has an extent status tree and all allocation blocks
108 * are added to the tree with different status. The extent in the
109 * tree are ordered by logical block no.
111 * -- operations on a extent status tree
112 * There are three important operations on a delayed extent tree: find
113 * next extent, adding a extent(a range of blocks) and removing a extent.
115 * -- race on a extent status tree
116 * Extent status tree is protected by inode->i_es_lock.
118 * -- memory consumption
119 * Fragmented extent tree will make extent status tree cost too much
120 * memory. Hence, we will reclaim written/unwritten/hole extents from
121 * the tree under a heavy memory pressure.
124 * ==========================================================================
125 * 3. Performance analysis
127 * -- overhead
128 * 1. There is a cache extent for write access, so if writes are
129 * not very random, adding space operaions are in O(1) time.
131 * -- gain
132 * 2. Code is much simpler, more readable, more maintainable and
133 * more efficient.
136 * ==========================================================================
137 * 4. TODO list
139 * -- Refactor delayed space reservation
141 * -- Extent-level locking
144 static struct kmem_cache *ext4_es_cachep;
145 static struct kmem_cache *ext4_pending_cachep;
147 static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
148 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
149 ext4_lblk_t end, int *reserved);
150 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
151 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
152 struct ext4_inode_info *locked_ei);
153 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
154 ext4_lblk_t len);
156 int __init ext4_init_es(void)
158 ext4_es_cachep = kmem_cache_create("ext4_extent_status",
159 sizeof(struct extent_status),
160 0, (SLAB_RECLAIM_ACCOUNT), NULL);
161 if (ext4_es_cachep == NULL)
162 return -ENOMEM;
163 return 0;
166 void ext4_exit_es(void)
168 kmem_cache_destroy(ext4_es_cachep);
171 void ext4_es_init_tree(struct ext4_es_tree *tree)
173 tree->root = RB_ROOT;
174 tree->cache_es = NULL;
177 #ifdef ES_DEBUG__
178 static void ext4_es_print_tree(struct inode *inode)
180 struct ext4_es_tree *tree;
181 struct rb_node *node;
183 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
184 tree = &EXT4_I(inode)->i_es_tree;
185 node = rb_first(&tree->root);
186 while (node) {
187 struct extent_status *es;
188 es = rb_entry(node, struct extent_status, rb_node);
189 printk(KERN_DEBUG " [%u/%u) %llu %x",
190 es->es_lblk, es->es_len,
191 ext4_es_pblock(es), ext4_es_status(es));
192 node = rb_next(node);
194 printk(KERN_DEBUG "\n");
196 #else
197 #define ext4_es_print_tree(inode)
198 #endif
200 static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
202 BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
203 return es->es_lblk + es->es_len - 1;
207 * search through the tree for an delayed extent with a given offset. If
208 * it can't be found, try to find next extent.
210 static struct extent_status *__es_tree_search(struct rb_root *root,
211 ext4_lblk_t lblk)
213 struct rb_node *node = root->rb_node;
214 struct extent_status *es = NULL;
216 while (node) {
217 es = rb_entry(node, struct extent_status, rb_node);
218 if (lblk < es->es_lblk)
219 node = node->rb_left;
220 else if (lblk > ext4_es_end(es))
221 node = node->rb_right;
222 else
223 return es;
226 if (es && lblk < es->es_lblk)
227 return es;
229 if (es && lblk > ext4_es_end(es)) {
230 node = rb_next(&es->rb_node);
231 return node ? rb_entry(node, struct extent_status, rb_node) :
232 NULL;
235 return NULL;
239 * ext4_es_find_extent_range - find extent with specified status within block
240 * range or next extent following block range in
241 * extents status tree
243 * @inode - file containing the range
244 * @matching_fn - pointer to function that matches extents with desired status
245 * @lblk - logical block defining start of range
246 * @end - logical block defining end of range
247 * @es - extent found, if any
249 * Find the first extent within the block range specified by @lblk and @end
250 * in the extents status tree that satisfies @matching_fn. If a match
251 * is found, it's returned in @es. If not, and a matching extent is found
252 * beyond the block range, it's returned in @es. If no match is found, an
253 * extent is returned in @es whose es_lblk, es_len, and es_pblk components
254 * are 0.
256 static void __es_find_extent_range(struct inode *inode,
257 int (*matching_fn)(struct extent_status *es),
258 ext4_lblk_t lblk, ext4_lblk_t end,
259 struct extent_status *es)
261 struct ext4_es_tree *tree = NULL;
262 struct extent_status *es1 = NULL;
263 struct rb_node *node;
265 WARN_ON(es == NULL);
266 WARN_ON(end < lblk);
268 tree = &EXT4_I(inode)->i_es_tree;
270 /* see if the extent has been cached */
271 es->es_lblk = es->es_len = es->es_pblk = 0;
272 if (tree->cache_es) {
273 es1 = tree->cache_es;
274 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
275 es_debug("%u cached by [%u/%u) %llu %x\n",
276 lblk, es1->es_lblk, es1->es_len,
277 ext4_es_pblock(es1), ext4_es_status(es1));
278 goto out;
282 es1 = __es_tree_search(&tree->root, lblk);
284 out:
285 if (es1 && !matching_fn(es1)) {
286 while ((node = rb_next(&es1->rb_node)) != NULL) {
287 es1 = rb_entry(node, struct extent_status, rb_node);
288 if (es1->es_lblk > end) {
289 es1 = NULL;
290 break;
292 if (matching_fn(es1))
293 break;
297 if (es1 && matching_fn(es1)) {
298 tree->cache_es = es1;
299 es->es_lblk = es1->es_lblk;
300 es->es_len = es1->es_len;
301 es->es_pblk = es1->es_pblk;
307 * Locking for __es_find_extent_range() for external use
309 void ext4_es_find_extent_range(struct inode *inode,
310 int (*matching_fn)(struct extent_status *es),
311 ext4_lblk_t lblk, ext4_lblk_t end,
312 struct extent_status *es)
314 trace_ext4_es_find_extent_range_enter(inode, lblk);
316 read_lock(&EXT4_I(inode)->i_es_lock);
317 __es_find_extent_range(inode, matching_fn, lblk, end, es);
318 read_unlock(&EXT4_I(inode)->i_es_lock);
320 trace_ext4_es_find_extent_range_exit(inode, es);
324 * __es_scan_range - search block range for block with specified status
325 * in extents status tree
327 * @inode - file containing the range
328 * @matching_fn - pointer to function that matches extents with desired status
329 * @lblk - logical block defining start of range
330 * @end - logical block defining end of range
332 * Returns true if at least one block in the specified block range satisfies
333 * the criterion specified by @matching_fn, and false if not. If at least
334 * one extent has the specified status, then there is at least one block
335 * in the cluster with that status. Should only be called by code that has
336 * taken i_es_lock.
338 static bool __es_scan_range(struct inode *inode,
339 int (*matching_fn)(struct extent_status *es),
340 ext4_lblk_t start, ext4_lblk_t end)
342 struct extent_status es;
344 __es_find_extent_range(inode, matching_fn, start, end, &es);
345 if (es.es_len == 0)
346 return false; /* no matching extent in the tree */
347 else if (es.es_lblk <= start &&
348 start < es.es_lblk + es.es_len)
349 return true;
350 else if (start <= es.es_lblk && es.es_lblk <= end)
351 return true;
352 else
353 return false;
356 * Locking for __es_scan_range() for external use
358 bool ext4_es_scan_range(struct inode *inode,
359 int (*matching_fn)(struct extent_status *es),
360 ext4_lblk_t lblk, ext4_lblk_t end)
362 bool ret;
364 read_lock(&EXT4_I(inode)->i_es_lock);
365 ret = __es_scan_range(inode, matching_fn, lblk, end);
366 read_unlock(&EXT4_I(inode)->i_es_lock);
368 return ret;
372 * __es_scan_clu - search cluster for block with specified status in
373 * extents status tree
375 * @inode - file containing the cluster
376 * @matching_fn - pointer to function that matches extents with desired status
377 * @lblk - logical block in cluster to be searched
379 * Returns true if at least one extent in the cluster containing @lblk
380 * satisfies the criterion specified by @matching_fn, and false if not. If at
381 * least one extent has the specified status, then there is at least one block
382 * in the cluster with that status. Should only be called by code that has
383 * taken i_es_lock.
385 static bool __es_scan_clu(struct inode *inode,
386 int (*matching_fn)(struct extent_status *es),
387 ext4_lblk_t lblk)
389 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
390 ext4_lblk_t lblk_start, lblk_end;
392 lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
393 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
395 return __es_scan_range(inode, matching_fn, lblk_start, lblk_end);
399 * Locking for __es_scan_clu() for external use
401 bool ext4_es_scan_clu(struct inode *inode,
402 int (*matching_fn)(struct extent_status *es),
403 ext4_lblk_t lblk)
405 bool ret;
407 read_lock(&EXT4_I(inode)->i_es_lock);
408 ret = __es_scan_clu(inode, matching_fn, lblk);
409 read_unlock(&EXT4_I(inode)->i_es_lock);
411 return ret;
414 static void ext4_es_list_add(struct inode *inode)
416 struct ext4_inode_info *ei = EXT4_I(inode);
417 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
419 if (!list_empty(&ei->i_es_list))
420 return;
422 spin_lock(&sbi->s_es_lock);
423 if (list_empty(&ei->i_es_list)) {
424 list_add_tail(&ei->i_es_list, &sbi->s_es_list);
425 sbi->s_es_nr_inode++;
427 spin_unlock(&sbi->s_es_lock);
430 static void ext4_es_list_del(struct inode *inode)
432 struct ext4_inode_info *ei = EXT4_I(inode);
433 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
435 spin_lock(&sbi->s_es_lock);
436 if (!list_empty(&ei->i_es_list)) {
437 list_del_init(&ei->i_es_list);
438 sbi->s_es_nr_inode--;
439 WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
441 spin_unlock(&sbi->s_es_lock);
444 static struct extent_status *
445 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
446 ext4_fsblk_t pblk)
448 struct extent_status *es;
449 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
450 if (es == NULL)
451 return NULL;
452 es->es_lblk = lblk;
453 es->es_len = len;
454 es->es_pblk = pblk;
457 * We don't count delayed extent because we never try to reclaim them
459 if (!ext4_es_is_delayed(es)) {
460 if (!EXT4_I(inode)->i_es_shk_nr++)
461 ext4_es_list_add(inode);
462 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
463 s_es_stats.es_stats_shk_cnt);
466 EXT4_I(inode)->i_es_all_nr++;
467 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
469 return es;
472 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
474 EXT4_I(inode)->i_es_all_nr--;
475 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
477 /* Decrease the shrink counter when this es is not delayed */
478 if (!ext4_es_is_delayed(es)) {
479 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
480 if (!--EXT4_I(inode)->i_es_shk_nr)
481 ext4_es_list_del(inode);
482 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
483 s_es_stats.es_stats_shk_cnt);
486 kmem_cache_free(ext4_es_cachep, es);
490 * Check whether or not two extents can be merged
491 * Condition:
492 * - logical block number is contiguous
493 * - physical block number is contiguous
494 * - status is equal
496 static int ext4_es_can_be_merged(struct extent_status *es1,
497 struct extent_status *es2)
499 if (ext4_es_type(es1) != ext4_es_type(es2))
500 return 0;
502 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
503 pr_warn("ES assertion failed when merging extents. "
504 "The sum of lengths of es1 (%d) and es2 (%d) "
505 "is bigger than allowed file size (%d)\n",
506 es1->es_len, es2->es_len, EXT_MAX_BLOCKS);
507 WARN_ON(1);
508 return 0;
511 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
512 return 0;
514 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
515 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
516 return 1;
518 if (ext4_es_is_hole(es1))
519 return 1;
521 /* we need to check delayed extent is without unwritten status */
522 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
523 return 1;
525 return 0;
528 static struct extent_status *
529 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
531 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
532 struct extent_status *es1;
533 struct rb_node *node;
535 node = rb_prev(&es->rb_node);
536 if (!node)
537 return es;
539 es1 = rb_entry(node, struct extent_status, rb_node);
540 if (ext4_es_can_be_merged(es1, es)) {
541 es1->es_len += es->es_len;
542 if (ext4_es_is_referenced(es))
543 ext4_es_set_referenced(es1);
544 rb_erase(&es->rb_node, &tree->root);
545 ext4_es_free_extent(inode, es);
546 es = es1;
549 return es;
552 static struct extent_status *
553 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
555 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
556 struct extent_status *es1;
557 struct rb_node *node;
559 node = rb_next(&es->rb_node);
560 if (!node)
561 return es;
563 es1 = rb_entry(node, struct extent_status, rb_node);
564 if (ext4_es_can_be_merged(es, es1)) {
565 es->es_len += es1->es_len;
566 if (ext4_es_is_referenced(es1))
567 ext4_es_set_referenced(es);
568 rb_erase(node, &tree->root);
569 ext4_es_free_extent(inode, es1);
572 return es;
575 #ifdef ES_AGGRESSIVE_TEST
576 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
578 static void ext4_es_insert_extent_ext_check(struct inode *inode,
579 struct extent_status *es)
581 struct ext4_ext_path *path = NULL;
582 struct ext4_extent *ex;
583 ext4_lblk_t ee_block;
584 ext4_fsblk_t ee_start;
585 unsigned short ee_len;
586 int depth, ee_status, es_status;
588 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
589 if (IS_ERR(path))
590 return;
592 depth = ext_depth(inode);
593 ex = path[depth].p_ext;
595 if (ex) {
597 ee_block = le32_to_cpu(ex->ee_block);
598 ee_start = ext4_ext_pblock(ex);
599 ee_len = ext4_ext_get_actual_len(ex);
601 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
602 es_status = ext4_es_is_unwritten(es) ? 1 : 0;
605 * Make sure ex and es are not overlap when we try to insert
606 * a delayed/hole extent.
608 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
609 if (in_range(es->es_lblk, ee_block, ee_len)) {
610 pr_warn("ES insert assertion failed for "
611 "inode: %lu we can find an extent "
612 "at block [%d/%d/%llu/%c], but we "
613 "want to add a delayed/hole extent "
614 "[%d/%d/%llu/%x]\n",
615 inode->i_ino, ee_block, ee_len,
616 ee_start, ee_status ? 'u' : 'w',
617 es->es_lblk, es->es_len,
618 ext4_es_pblock(es), ext4_es_status(es));
620 goto out;
624 * We don't check ee_block == es->es_lblk, etc. because es
625 * might be a part of whole extent, vice versa.
627 if (es->es_lblk < ee_block ||
628 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
629 pr_warn("ES insert assertion failed for inode: %lu "
630 "ex_status [%d/%d/%llu/%c] != "
631 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
632 ee_block, ee_len, ee_start,
633 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
634 ext4_es_pblock(es), es_status ? 'u' : 'w');
635 goto out;
638 if (ee_status ^ es_status) {
639 pr_warn("ES insert assertion failed for inode: %lu "
640 "ex_status [%d/%d/%llu/%c] != "
641 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
642 ee_block, ee_len, ee_start,
643 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
644 ext4_es_pblock(es), es_status ? 'u' : 'w');
646 } else {
648 * We can't find an extent on disk. So we need to make sure
649 * that we don't want to add an written/unwritten extent.
651 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
652 pr_warn("ES insert assertion failed for inode: %lu "
653 "can't find an extent at block %d but we want "
654 "to add a written/unwritten extent "
655 "[%d/%d/%llu/%x]\n", inode->i_ino,
656 es->es_lblk, es->es_lblk, es->es_len,
657 ext4_es_pblock(es), ext4_es_status(es));
660 out:
661 ext4_ext_drop_refs(path);
662 kfree(path);
665 static void ext4_es_insert_extent_ind_check(struct inode *inode,
666 struct extent_status *es)
668 struct ext4_map_blocks map;
669 int retval;
672 * Here we call ext4_ind_map_blocks to lookup a block mapping because
673 * 'Indirect' structure is defined in indirect.c. So we couldn't
674 * access direct/indirect tree from outside. It is too dirty to define
675 * this function in indirect.c file.
678 map.m_lblk = es->es_lblk;
679 map.m_len = es->es_len;
681 retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
682 if (retval > 0) {
683 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
685 * We want to add a delayed/hole extent but this
686 * block has been allocated.
688 pr_warn("ES insert assertion failed for inode: %lu "
689 "We can find blocks but we want to add a "
690 "delayed/hole extent [%d/%d/%llu/%x]\n",
691 inode->i_ino, es->es_lblk, es->es_len,
692 ext4_es_pblock(es), ext4_es_status(es));
693 return;
694 } else if (ext4_es_is_written(es)) {
695 if (retval != es->es_len) {
696 pr_warn("ES insert assertion failed for "
697 "inode: %lu retval %d != es_len %d\n",
698 inode->i_ino, retval, es->es_len);
699 return;
701 if (map.m_pblk != ext4_es_pblock(es)) {
702 pr_warn("ES insert assertion failed for "
703 "inode: %lu m_pblk %llu != "
704 "es_pblk %llu\n",
705 inode->i_ino, map.m_pblk,
706 ext4_es_pblock(es));
707 return;
709 } else {
711 * We don't need to check unwritten extent because
712 * indirect-based file doesn't have it.
714 BUG();
716 } else if (retval == 0) {
717 if (ext4_es_is_written(es)) {
718 pr_warn("ES insert assertion failed for inode: %lu "
719 "We can't find the block but we want to add "
720 "a written extent [%d/%d/%llu/%x]\n",
721 inode->i_ino, es->es_lblk, es->es_len,
722 ext4_es_pblock(es), ext4_es_status(es));
723 return;
728 static inline void ext4_es_insert_extent_check(struct inode *inode,
729 struct extent_status *es)
732 * We don't need to worry about the race condition because
733 * caller takes i_data_sem locking.
735 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
736 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
737 ext4_es_insert_extent_ext_check(inode, es);
738 else
739 ext4_es_insert_extent_ind_check(inode, es);
741 #else
742 static inline void ext4_es_insert_extent_check(struct inode *inode,
743 struct extent_status *es)
746 #endif
748 static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
750 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
751 struct rb_node **p = &tree->root.rb_node;
752 struct rb_node *parent = NULL;
753 struct extent_status *es;
755 while (*p) {
756 parent = *p;
757 es = rb_entry(parent, struct extent_status, rb_node);
759 if (newes->es_lblk < es->es_lblk) {
760 if (ext4_es_can_be_merged(newes, es)) {
762 * Here we can modify es_lblk directly
763 * because it isn't overlapped.
765 es->es_lblk = newes->es_lblk;
766 es->es_len += newes->es_len;
767 if (ext4_es_is_written(es) ||
768 ext4_es_is_unwritten(es))
769 ext4_es_store_pblock(es,
770 newes->es_pblk);
771 es = ext4_es_try_to_merge_left(inode, es);
772 goto out;
774 p = &(*p)->rb_left;
775 } else if (newes->es_lblk > ext4_es_end(es)) {
776 if (ext4_es_can_be_merged(es, newes)) {
777 es->es_len += newes->es_len;
778 es = ext4_es_try_to_merge_right(inode, es);
779 goto out;
781 p = &(*p)->rb_right;
782 } else {
783 BUG();
784 return -EINVAL;
788 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
789 newes->es_pblk);
790 if (!es)
791 return -ENOMEM;
792 rb_link_node(&es->rb_node, parent, p);
793 rb_insert_color(&es->rb_node, &tree->root);
795 out:
796 tree->cache_es = es;
797 return 0;
801 * ext4_es_insert_extent() adds information to an inode's extent
802 * status tree.
804 * Return 0 on success, error code on failure.
806 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
807 ext4_lblk_t len, ext4_fsblk_t pblk,
808 unsigned int status)
810 struct extent_status newes;
811 ext4_lblk_t end = lblk + len - 1;
812 int err = 0;
813 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
815 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
816 lblk, len, pblk, status, inode->i_ino);
818 if (!len)
819 return 0;
821 BUG_ON(end < lblk);
823 if ((status & EXTENT_STATUS_DELAYED) &&
824 (status & EXTENT_STATUS_WRITTEN)) {
825 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
826 " delayed and written which can potentially "
827 " cause data loss.", lblk, len);
828 WARN_ON(1);
831 newes.es_lblk = lblk;
832 newes.es_len = len;
833 ext4_es_store_pblock_status(&newes, pblk, status);
834 trace_ext4_es_insert_extent(inode, &newes);
836 ext4_es_insert_extent_check(inode, &newes);
838 write_lock(&EXT4_I(inode)->i_es_lock);
839 err = __es_remove_extent(inode, lblk, end, NULL);
840 if (err != 0)
841 goto error;
842 retry:
843 err = __es_insert_extent(inode, &newes);
844 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
845 128, EXT4_I(inode)))
846 goto retry;
847 if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
848 err = 0;
850 if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
851 (status & EXTENT_STATUS_WRITTEN ||
852 status & EXTENT_STATUS_UNWRITTEN))
853 __revise_pending(inode, lblk, len);
855 error:
856 write_unlock(&EXT4_I(inode)->i_es_lock);
858 ext4_es_print_tree(inode);
860 return err;
864 * ext4_es_cache_extent() inserts information into the extent status
865 * tree if and only if there isn't information about the range in
866 * question already.
868 void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
869 ext4_lblk_t len, ext4_fsblk_t pblk,
870 unsigned int status)
872 struct extent_status *es;
873 struct extent_status newes;
874 ext4_lblk_t end = lblk + len - 1;
876 newes.es_lblk = lblk;
877 newes.es_len = len;
878 ext4_es_store_pblock_status(&newes, pblk, status);
879 trace_ext4_es_cache_extent(inode, &newes);
881 if (!len)
882 return;
884 BUG_ON(end < lblk);
886 write_lock(&EXT4_I(inode)->i_es_lock);
888 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
889 if (!es || es->es_lblk > end)
890 __es_insert_extent(inode, &newes);
891 write_unlock(&EXT4_I(inode)->i_es_lock);
895 * ext4_es_lookup_extent() looks up an extent in extent status tree.
897 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
899 * Return: 1 on found, 0 on not
901 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
902 ext4_lblk_t *next_lblk,
903 struct extent_status *es)
905 struct ext4_es_tree *tree;
906 struct ext4_es_stats *stats;
907 struct extent_status *es1 = NULL;
908 struct rb_node *node;
909 int found = 0;
911 trace_ext4_es_lookup_extent_enter(inode, lblk);
912 es_debug("lookup extent in block %u\n", lblk);
914 tree = &EXT4_I(inode)->i_es_tree;
915 read_lock(&EXT4_I(inode)->i_es_lock);
917 /* find extent in cache firstly */
918 es->es_lblk = es->es_len = es->es_pblk = 0;
919 if (tree->cache_es) {
920 es1 = tree->cache_es;
921 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
922 es_debug("%u cached by [%u/%u)\n",
923 lblk, es1->es_lblk, es1->es_len);
924 found = 1;
925 goto out;
929 node = tree->root.rb_node;
930 while (node) {
931 es1 = rb_entry(node, struct extent_status, rb_node);
932 if (lblk < es1->es_lblk)
933 node = node->rb_left;
934 else if (lblk > ext4_es_end(es1))
935 node = node->rb_right;
936 else {
937 found = 1;
938 break;
942 out:
943 stats = &EXT4_SB(inode->i_sb)->s_es_stats;
944 if (found) {
945 BUG_ON(!es1);
946 es->es_lblk = es1->es_lblk;
947 es->es_len = es1->es_len;
948 es->es_pblk = es1->es_pblk;
949 if (!ext4_es_is_referenced(es1))
950 ext4_es_set_referenced(es1);
951 percpu_counter_inc(&stats->es_stats_cache_hits);
952 if (next_lblk) {
953 node = rb_next(&es1->rb_node);
954 if (node) {
955 es1 = rb_entry(node, struct extent_status,
956 rb_node);
957 *next_lblk = es1->es_lblk;
958 } else
959 *next_lblk = 0;
961 } else {
962 percpu_counter_inc(&stats->es_stats_cache_misses);
965 read_unlock(&EXT4_I(inode)->i_es_lock);
967 trace_ext4_es_lookup_extent_exit(inode, es, found);
968 return found;
971 struct rsvd_count {
972 int ndelonly;
973 bool first_do_lblk_found;
974 ext4_lblk_t first_do_lblk;
975 ext4_lblk_t last_do_lblk;
976 struct extent_status *left_es;
977 bool partial;
978 ext4_lblk_t lclu;
982 * init_rsvd - initialize reserved count data before removing block range
983 * in file from extent status tree
985 * @inode - file containing range
986 * @lblk - first block in range
987 * @es - pointer to first extent in range
988 * @rc - pointer to reserved count data
990 * Assumes es is not NULL
992 static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
993 struct extent_status *es, struct rsvd_count *rc)
995 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
996 struct rb_node *node;
998 rc->ndelonly = 0;
1001 * for bigalloc, note the first delonly block in the range has not
1002 * been found, record the extent containing the block to the left of
1003 * the region to be removed, if any, and note that there's no partial
1004 * cluster to track
1006 if (sbi->s_cluster_ratio > 1) {
1007 rc->first_do_lblk_found = false;
1008 if (lblk > es->es_lblk) {
1009 rc->left_es = es;
1010 } else {
1011 node = rb_prev(&es->rb_node);
1012 rc->left_es = node ? rb_entry(node,
1013 struct extent_status,
1014 rb_node) : NULL;
1016 rc->partial = false;
1021 * count_rsvd - count the clusters containing delayed and not unwritten
1022 * (delonly) blocks in a range within an extent and add to
1023 * the running tally in rsvd_count
1025 * @inode - file containing extent
1026 * @lblk - first block in range
1027 * @len - length of range in blocks
1028 * @es - pointer to extent containing clusters to be counted
1029 * @rc - pointer to reserved count data
1031 * Tracks partial clusters found at the beginning and end of extents so
1032 * they aren't overcounted when they span adjacent extents
1034 static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
1035 struct extent_status *es, struct rsvd_count *rc)
1037 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1038 ext4_lblk_t i, end, nclu;
1040 if (!ext4_es_is_delonly(es))
1041 return;
1043 WARN_ON(len <= 0);
1045 if (sbi->s_cluster_ratio == 1) {
1046 rc->ndelonly += (int) len;
1047 return;
1050 /* bigalloc */
1052 i = (lblk < es->es_lblk) ? es->es_lblk : lblk;
1053 end = lblk + (ext4_lblk_t) len - 1;
1054 end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
1056 /* record the first block of the first delonly extent seen */
1057 if (!rc->first_do_lblk_found) {
1058 rc->first_do_lblk = i;
1059 rc->first_do_lblk_found = true;
1062 /* update the last lblk in the region seen so far */
1063 rc->last_do_lblk = end;
1066 * if we're tracking a partial cluster and the current extent
1067 * doesn't start with it, count it and stop tracking
1069 if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
1070 rc->ndelonly++;
1071 rc->partial = false;
1075 * if the first cluster doesn't start on a cluster boundary but
1076 * ends on one, count it
1078 if (EXT4_LBLK_COFF(sbi, i) != 0) {
1079 if (end >= EXT4_LBLK_CFILL(sbi, i)) {
1080 rc->ndelonly++;
1081 rc->partial = false;
1082 i = EXT4_LBLK_CFILL(sbi, i) + 1;
1087 * if the current cluster starts on a cluster boundary, count the
1088 * number of whole delonly clusters in the extent
1090 if ((i + sbi->s_cluster_ratio - 1) <= end) {
1091 nclu = (end - i + 1) >> sbi->s_cluster_bits;
1092 rc->ndelonly += nclu;
1093 i += nclu << sbi->s_cluster_bits;
1097 * start tracking a partial cluster if there's a partial at the end
1098 * of the current extent and we're not already tracking one
1100 if (!rc->partial && i <= end) {
1101 rc->partial = true;
1102 rc->lclu = EXT4_B2C(sbi, i);
1107 * __pr_tree_search - search for a pending cluster reservation
1109 * @root - root of pending reservation tree
1110 * @lclu - logical cluster to search for
1112 * Returns the pending reservation for the cluster identified by @lclu
1113 * if found. If not, returns a reservation for the next cluster if any,
1114 * and if not, returns NULL.
1116 static struct pending_reservation *__pr_tree_search(struct rb_root *root,
1117 ext4_lblk_t lclu)
1119 struct rb_node *node = root->rb_node;
1120 struct pending_reservation *pr = NULL;
1122 while (node) {
1123 pr = rb_entry(node, struct pending_reservation, rb_node);
1124 if (lclu < pr->lclu)
1125 node = node->rb_left;
1126 else if (lclu > pr->lclu)
1127 node = node->rb_right;
1128 else
1129 return pr;
1131 if (pr && lclu < pr->lclu)
1132 return pr;
1133 if (pr && lclu > pr->lclu) {
1134 node = rb_next(&pr->rb_node);
1135 return node ? rb_entry(node, struct pending_reservation,
1136 rb_node) : NULL;
1138 return NULL;
1142 * get_rsvd - calculates and returns the number of cluster reservations to be
1143 * released when removing a block range from the extent status tree
1144 * and releases any pending reservations within the range
1146 * @inode - file containing block range
1147 * @end - last block in range
1148 * @right_es - pointer to extent containing next block beyond end or NULL
1149 * @rc - pointer to reserved count data
1151 * The number of reservations to be released is equal to the number of
1152 * clusters containing delayed and not unwritten (delonly) blocks within
1153 * the range, minus the number of clusters still containing delonly blocks
1154 * at the ends of the range, and minus the number of pending reservations
1155 * within the range.
1157 static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
1158 struct extent_status *right_es,
1159 struct rsvd_count *rc)
1161 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1162 struct pending_reservation *pr;
1163 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1164 struct rb_node *node;
1165 ext4_lblk_t first_lclu, last_lclu;
1166 bool left_delonly, right_delonly, count_pending;
1167 struct extent_status *es;
1169 if (sbi->s_cluster_ratio > 1) {
1170 /* count any remaining partial cluster */
1171 if (rc->partial)
1172 rc->ndelonly++;
1174 if (rc->ndelonly == 0)
1175 return 0;
1177 first_lclu = EXT4_B2C(sbi, rc->first_do_lblk);
1178 last_lclu = EXT4_B2C(sbi, rc->last_do_lblk);
1181 * decrease the delonly count by the number of clusters at the
1182 * ends of the range that still contain delonly blocks -
1183 * these clusters still need to be reserved
1185 left_delonly = right_delonly = false;
1187 es = rc->left_es;
1188 while (es && ext4_es_end(es) >=
1189 EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) {
1190 if (ext4_es_is_delonly(es)) {
1191 rc->ndelonly--;
1192 left_delonly = true;
1193 break;
1195 node = rb_prev(&es->rb_node);
1196 if (!node)
1197 break;
1198 es = rb_entry(node, struct extent_status, rb_node);
1200 if (right_es && (!left_delonly || first_lclu != last_lclu)) {
1201 if (end < ext4_es_end(right_es)) {
1202 es = right_es;
1203 } else {
1204 node = rb_next(&right_es->rb_node);
1205 es = node ? rb_entry(node, struct extent_status,
1206 rb_node) : NULL;
1208 while (es && es->es_lblk <=
1209 EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) {
1210 if (ext4_es_is_delonly(es)) {
1211 rc->ndelonly--;
1212 right_delonly = true;
1213 break;
1215 node = rb_next(&es->rb_node);
1216 if (!node)
1217 break;
1218 es = rb_entry(node, struct extent_status,
1219 rb_node);
1224 * Determine the block range that should be searched for
1225 * pending reservations, if any. Clusters on the ends of the
1226 * original removed range containing delonly blocks are
1227 * excluded. They've already been accounted for and it's not
1228 * possible to determine if an associated pending reservation
1229 * should be released with the information available in the
1230 * extents status tree.
1232 if (first_lclu == last_lclu) {
1233 if (left_delonly | right_delonly)
1234 count_pending = false;
1235 else
1236 count_pending = true;
1237 } else {
1238 if (left_delonly)
1239 first_lclu++;
1240 if (right_delonly)
1241 last_lclu--;
1242 if (first_lclu <= last_lclu)
1243 count_pending = true;
1244 else
1245 count_pending = false;
1249 * a pending reservation found between first_lclu and last_lclu
1250 * represents an allocated cluster that contained at least one
1251 * delonly block, so the delonly total must be reduced by one
1252 * for each pending reservation found and released
1254 if (count_pending) {
1255 pr = __pr_tree_search(&tree->root, first_lclu);
1256 while (pr && pr->lclu <= last_lclu) {
1257 rc->ndelonly--;
1258 node = rb_next(&pr->rb_node);
1259 rb_erase(&pr->rb_node, &tree->root);
1260 kmem_cache_free(ext4_pending_cachep, pr);
1261 if (!node)
1262 break;
1263 pr = rb_entry(node, struct pending_reservation,
1264 rb_node);
1268 return rc->ndelonly;
1273 * __es_remove_extent - removes block range from extent status tree
1275 * @inode - file containing range
1276 * @lblk - first block in range
1277 * @end - last block in range
1278 * @reserved - number of cluster reservations released
1280 * If @reserved is not NULL and delayed allocation is enabled, counts
1281 * block/cluster reservations freed by removing range and if bigalloc
1282 * enabled cancels pending reservations as needed. Returns 0 on success,
1283 * error code on failure.
1285 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
1286 ext4_lblk_t end, int *reserved)
1288 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
1289 struct rb_node *node;
1290 struct extent_status *es;
1291 struct extent_status orig_es;
1292 ext4_lblk_t len1, len2;
1293 ext4_fsblk_t block;
1294 int err;
1295 bool count_reserved = true;
1296 struct rsvd_count rc;
1298 if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
1299 count_reserved = false;
1300 retry:
1301 err = 0;
1303 es = __es_tree_search(&tree->root, lblk);
1304 if (!es)
1305 goto out;
1306 if (es->es_lblk > end)
1307 goto out;
1309 /* Simply invalidate cache_es. */
1310 tree->cache_es = NULL;
1311 if (count_reserved)
1312 init_rsvd(inode, lblk, es, &rc);
1314 orig_es.es_lblk = es->es_lblk;
1315 orig_es.es_len = es->es_len;
1316 orig_es.es_pblk = es->es_pblk;
1318 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
1319 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
1320 if (len1 > 0)
1321 es->es_len = len1;
1322 if (len2 > 0) {
1323 if (len1 > 0) {
1324 struct extent_status newes;
1326 newes.es_lblk = end + 1;
1327 newes.es_len = len2;
1328 block = 0x7FDEADBEEFULL;
1329 if (ext4_es_is_written(&orig_es) ||
1330 ext4_es_is_unwritten(&orig_es))
1331 block = ext4_es_pblock(&orig_es) +
1332 orig_es.es_len - len2;
1333 ext4_es_store_pblock_status(&newes, block,
1334 ext4_es_status(&orig_es));
1335 err = __es_insert_extent(inode, &newes);
1336 if (err) {
1337 es->es_lblk = orig_es.es_lblk;
1338 es->es_len = orig_es.es_len;
1339 if ((err == -ENOMEM) &&
1340 __es_shrink(EXT4_SB(inode->i_sb),
1341 128, EXT4_I(inode)))
1342 goto retry;
1343 goto out;
1345 } else {
1346 es->es_lblk = end + 1;
1347 es->es_len = len2;
1348 if (ext4_es_is_written(es) ||
1349 ext4_es_is_unwritten(es)) {
1350 block = orig_es.es_pblk + orig_es.es_len - len2;
1351 ext4_es_store_pblock(es, block);
1354 if (count_reserved)
1355 count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
1356 &orig_es, &rc);
1357 goto out;
1360 if (len1 > 0) {
1361 if (count_reserved)
1362 count_rsvd(inode, lblk, orig_es.es_len - len1,
1363 &orig_es, &rc);
1364 node = rb_next(&es->rb_node);
1365 if (node)
1366 es = rb_entry(node, struct extent_status, rb_node);
1367 else
1368 es = NULL;
1371 while (es && ext4_es_end(es) <= end) {
1372 if (count_reserved)
1373 count_rsvd(inode, es->es_lblk, es->es_len, es, &rc);
1374 node = rb_next(&es->rb_node);
1375 rb_erase(&es->rb_node, &tree->root);
1376 ext4_es_free_extent(inode, es);
1377 if (!node) {
1378 es = NULL;
1379 break;
1381 es = rb_entry(node, struct extent_status, rb_node);
1384 if (es && es->es_lblk < end + 1) {
1385 ext4_lblk_t orig_len = es->es_len;
1387 len1 = ext4_es_end(es) - end;
1388 if (count_reserved)
1389 count_rsvd(inode, es->es_lblk, orig_len - len1,
1390 es, &rc);
1391 es->es_lblk = end + 1;
1392 es->es_len = len1;
1393 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
1394 block = es->es_pblk + orig_len - len1;
1395 ext4_es_store_pblock(es, block);
1399 if (count_reserved)
1400 *reserved = get_rsvd(inode, end, es, &rc);
1401 out:
1402 return err;
1406 * ext4_es_remove_extent - removes block range from extent status tree
1408 * @inode - file containing range
1409 * @lblk - first block in range
1410 * @len - number of blocks to remove
1412 * Reduces block/cluster reservation count and for bigalloc cancels pending
1413 * reservations as needed. Returns 0 on success, error code on failure.
1415 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
1416 ext4_lblk_t len)
1418 ext4_lblk_t end;
1419 int err = 0;
1420 int reserved = 0;
1422 trace_ext4_es_remove_extent(inode, lblk, len);
1423 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
1424 lblk, len, inode->i_ino);
1426 if (!len)
1427 return err;
1429 end = lblk + len - 1;
1430 BUG_ON(end < lblk);
1433 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
1434 * so that we are sure __es_shrink() is done with the inode before it
1435 * is reclaimed.
1437 write_lock(&EXT4_I(inode)->i_es_lock);
1438 err = __es_remove_extent(inode, lblk, end, &reserved);
1439 write_unlock(&EXT4_I(inode)->i_es_lock);
1440 ext4_es_print_tree(inode);
1441 ext4_da_release_space(inode, reserved);
1442 return err;
1445 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
1446 struct ext4_inode_info *locked_ei)
1448 struct ext4_inode_info *ei;
1449 struct ext4_es_stats *es_stats;
1450 ktime_t start_time;
1451 u64 scan_time;
1452 int nr_to_walk;
1453 int nr_shrunk = 0;
1454 int retried = 0, nr_skipped = 0;
1456 es_stats = &sbi->s_es_stats;
1457 start_time = ktime_get();
1459 retry:
1460 spin_lock(&sbi->s_es_lock);
1461 nr_to_walk = sbi->s_es_nr_inode;
1462 while (nr_to_walk-- > 0) {
1463 if (list_empty(&sbi->s_es_list)) {
1464 spin_unlock(&sbi->s_es_lock);
1465 goto out;
1467 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
1468 i_es_list);
1469 /* Move the inode to the tail */
1470 list_move_tail(&ei->i_es_list, &sbi->s_es_list);
1473 * Normally we try hard to avoid shrinking precached inodes,
1474 * but we will as a last resort.
1476 if (!retried && ext4_test_inode_state(&ei->vfs_inode,
1477 EXT4_STATE_EXT_PRECACHED)) {
1478 nr_skipped++;
1479 continue;
1482 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
1483 nr_skipped++;
1484 continue;
1487 * Now we hold i_es_lock which protects us from inode reclaim
1488 * freeing inode under us
1490 spin_unlock(&sbi->s_es_lock);
1492 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
1493 write_unlock(&ei->i_es_lock);
1495 if (nr_to_scan <= 0)
1496 goto out;
1497 spin_lock(&sbi->s_es_lock);
1499 spin_unlock(&sbi->s_es_lock);
1502 * If we skipped any inodes, and we weren't able to make any
1503 * forward progress, try again to scan precached inodes.
1505 if ((nr_shrunk == 0) && nr_skipped && !retried) {
1506 retried++;
1507 goto retry;
1510 if (locked_ei && nr_shrunk == 0)
1511 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
1513 out:
1514 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1515 if (likely(es_stats->es_stats_scan_time))
1516 es_stats->es_stats_scan_time = (scan_time +
1517 es_stats->es_stats_scan_time*3) / 4;
1518 else
1519 es_stats->es_stats_scan_time = scan_time;
1520 if (scan_time > es_stats->es_stats_max_scan_time)
1521 es_stats->es_stats_max_scan_time = scan_time;
1522 if (likely(es_stats->es_stats_shrunk))
1523 es_stats->es_stats_shrunk = (nr_shrunk +
1524 es_stats->es_stats_shrunk*3) / 4;
1525 else
1526 es_stats->es_stats_shrunk = nr_shrunk;
1528 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
1529 nr_skipped, retried);
1530 return nr_shrunk;
1533 static unsigned long ext4_es_count(struct shrinker *shrink,
1534 struct shrink_control *sc)
1536 unsigned long nr;
1537 struct ext4_sb_info *sbi;
1539 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
1540 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1541 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
1542 return nr;
1545 static unsigned long ext4_es_scan(struct shrinker *shrink,
1546 struct shrink_control *sc)
1548 struct ext4_sb_info *sbi = container_of(shrink,
1549 struct ext4_sb_info, s_es_shrinker);
1550 int nr_to_scan = sc->nr_to_scan;
1551 int ret, nr_shrunk;
1553 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1554 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
1556 if (!nr_to_scan)
1557 return ret;
1559 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
1561 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
1562 return nr_shrunk;
1565 int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v)
1567 struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private);
1568 struct ext4_es_stats *es_stats = &sbi->s_es_stats;
1569 struct ext4_inode_info *ei, *max = NULL;
1570 unsigned int inode_cnt = 0;
1572 if (v != SEQ_START_TOKEN)
1573 return 0;
1575 /* here we just find an inode that has the max nr. of objects */
1576 spin_lock(&sbi->s_es_lock);
1577 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
1578 inode_cnt++;
1579 if (max && max->i_es_all_nr < ei->i_es_all_nr)
1580 max = ei;
1581 else if (!max)
1582 max = ei;
1584 spin_unlock(&sbi->s_es_lock);
1586 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
1587 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
1588 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
1589 seq_printf(seq, " %lld/%lld cache hits/misses\n",
1590 percpu_counter_sum_positive(&es_stats->es_stats_cache_hits),
1591 percpu_counter_sum_positive(&es_stats->es_stats_cache_misses));
1592 if (inode_cnt)
1593 seq_printf(seq, " %d inodes on list\n", inode_cnt);
1595 seq_printf(seq, "average:\n %llu us scan time\n",
1596 div_u64(es_stats->es_stats_scan_time, 1000));
1597 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk);
1598 if (inode_cnt)
1599 seq_printf(seq,
1600 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1601 " %llu us max scan time\n",
1602 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
1603 div_u64(es_stats->es_stats_max_scan_time, 1000));
1605 return 0;
1608 int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1610 int err;
1612 /* Make sure we have enough bits for physical block number */
1613 BUILD_BUG_ON(ES_SHIFT < 48);
1614 INIT_LIST_HEAD(&sbi->s_es_list);
1615 sbi->s_es_nr_inode = 0;
1616 spin_lock_init(&sbi->s_es_lock);
1617 sbi->s_es_stats.es_stats_shrunk = 0;
1618 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0,
1619 GFP_KERNEL);
1620 if (err)
1621 return err;
1622 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0,
1623 GFP_KERNEL);
1624 if (err)
1625 goto err1;
1626 sbi->s_es_stats.es_stats_scan_time = 0;
1627 sbi->s_es_stats.es_stats_max_scan_time = 0;
1628 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
1629 if (err)
1630 goto err2;
1631 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
1632 if (err)
1633 goto err3;
1635 sbi->s_es_shrinker.scan_objects = ext4_es_scan;
1636 sbi->s_es_shrinker.count_objects = ext4_es_count;
1637 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
1638 err = register_shrinker(&sbi->s_es_shrinker);
1639 if (err)
1640 goto err4;
1642 return 0;
1643 err4:
1644 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1645 err3:
1646 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1647 err2:
1648 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
1649 err1:
1650 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
1651 return err;
1654 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
1656 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
1657 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
1658 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1659 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1660 unregister_shrinker(&sbi->s_es_shrinker);
1664 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
1665 * most *nr_to_scan extents, update *nr_to_scan accordingly.
1667 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
1668 * Increment *nr_shrunk by the number of reclaimed extents. Also update
1669 * ei->i_es_shrink_lblk to where we should continue scanning.
1671 static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
1672 int *nr_to_scan, int *nr_shrunk)
1674 struct inode *inode = &ei->vfs_inode;
1675 struct ext4_es_tree *tree = &ei->i_es_tree;
1676 struct extent_status *es;
1677 struct rb_node *node;
1679 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
1680 if (!es)
1681 goto out_wrap;
1683 while (*nr_to_scan > 0) {
1684 if (es->es_lblk > end) {
1685 ei->i_es_shrink_lblk = end + 1;
1686 return 0;
1689 (*nr_to_scan)--;
1690 node = rb_next(&es->rb_node);
1692 * We can't reclaim delayed extent from status tree because
1693 * fiemap, bigallic, and seek_data/hole need to use it.
1695 if (ext4_es_is_delayed(es))
1696 goto next;
1697 if (ext4_es_is_referenced(es)) {
1698 ext4_es_clear_referenced(es);
1699 goto next;
1702 rb_erase(&es->rb_node, &tree->root);
1703 ext4_es_free_extent(inode, es);
1704 (*nr_shrunk)++;
1705 next:
1706 if (!node)
1707 goto out_wrap;
1708 es = rb_entry(node, struct extent_status, rb_node);
1710 ei->i_es_shrink_lblk = es->es_lblk;
1711 return 1;
1712 out_wrap:
1713 ei->i_es_shrink_lblk = 0;
1714 return 0;
1717 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
1719 struct inode *inode = &ei->vfs_inode;
1720 int nr_shrunk = 0;
1721 ext4_lblk_t start = ei->i_es_shrink_lblk;
1722 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1723 DEFAULT_RATELIMIT_BURST);
1725 if (ei->i_es_shk_nr == 0)
1726 return 0;
1728 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
1729 __ratelimit(&_rs))
1730 ext4_warning(inode->i_sb, "forced shrink of precached extents");
1732 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
1733 start != 0)
1734 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
1736 ei->i_es_tree.cache_es = NULL;
1737 return nr_shrunk;
1741 * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove
1742 * discretionary entries from the extent status cache. (Some entries
1743 * must be present for proper operations.)
1745 void ext4_clear_inode_es(struct inode *inode)
1747 struct ext4_inode_info *ei = EXT4_I(inode);
1748 struct extent_status *es;
1749 struct ext4_es_tree *tree;
1750 struct rb_node *node;
1752 write_lock(&ei->i_es_lock);
1753 tree = &EXT4_I(inode)->i_es_tree;
1754 tree->cache_es = NULL;
1755 node = rb_first(&tree->root);
1756 while (node) {
1757 es = rb_entry(node, struct extent_status, rb_node);
1758 node = rb_next(node);
1759 if (!ext4_es_is_delayed(es)) {
1760 rb_erase(&es->rb_node, &tree->root);
1761 ext4_es_free_extent(inode, es);
1764 ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
1765 write_unlock(&ei->i_es_lock);
1768 #ifdef ES_DEBUG__
1769 static void ext4_print_pending_tree(struct inode *inode)
1771 struct ext4_pending_tree *tree;
1772 struct rb_node *node;
1773 struct pending_reservation *pr;
1775 printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino);
1776 tree = &EXT4_I(inode)->i_pending_tree;
1777 node = rb_first(&tree->root);
1778 while (node) {
1779 pr = rb_entry(node, struct pending_reservation, rb_node);
1780 printk(KERN_DEBUG " %u", pr->lclu);
1781 node = rb_next(node);
1783 printk(KERN_DEBUG "\n");
1785 #else
1786 #define ext4_print_pending_tree(inode)
1787 #endif
1789 int __init ext4_init_pending(void)
1791 ext4_pending_cachep = kmem_cache_create("ext4_pending_reservation",
1792 sizeof(struct pending_reservation),
1793 0, (SLAB_RECLAIM_ACCOUNT), NULL);
1794 if (ext4_pending_cachep == NULL)
1795 return -ENOMEM;
1796 return 0;
1799 void ext4_exit_pending(void)
1801 kmem_cache_destroy(ext4_pending_cachep);
1804 void ext4_init_pending_tree(struct ext4_pending_tree *tree)
1806 tree->root = RB_ROOT;
1810 * __get_pending - retrieve a pointer to a pending reservation
1812 * @inode - file containing the pending cluster reservation
1813 * @lclu - logical cluster of interest
1815 * Returns a pointer to a pending reservation if it's a member of
1816 * the set, and NULL if not. Must be called holding i_es_lock.
1818 static struct pending_reservation *__get_pending(struct inode *inode,
1819 ext4_lblk_t lclu)
1821 struct ext4_pending_tree *tree;
1822 struct rb_node *node;
1823 struct pending_reservation *pr = NULL;
1825 tree = &EXT4_I(inode)->i_pending_tree;
1826 node = (&tree->root)->rb_node;
1828 while (node) {
1829 pr = rb_entry(node, struct pending_reservation, rb_node);
1830 if (lclu < pr->lclu)
1831 node = node->rb_left;
1832 else if (lclu > pr->lclu)
1833 node = node->rb_right;
1834 else if (lclu == pr->lclu)
1835 return pr;
1837 return NULL;
1841 * __insert_pending - adds a pending cluster reservation to the set of
1842 * pending reservations
1844 * @inode - file containing the cluster
1845 * @lblk - logical block in the cluster to be added
1847 * Returns 0 on successful insertion and -ENOMEM on failure. If the
1848 * pending reservation is already in the set, returns successfully.
1850 static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
1852 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1853 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1854 struct rb_node **p = &tree->root.rb_node;
1855 struct rb_node *parent = NULL;
1856 struct pending_reservation *pr;
1857 ext4_lblk_t lclu;
1858 int ret = 0;
1860 lclu = EXT4_B2C(sbi, lblk);
1861 /* search to find parent for insertion */
1862 while (*p) {
1863 parent = *p;
1864 pr = rb_entry(parent, struct pending_reservation, rb_node);
1866 if (lclu < pr->lclu) {
1867 p = &(*p)->rb_left;
1868 } else if (lclu > pr->lclu) {
1869 p = &(*p)->rb_right;
1870 } else {
1871 /* pending reservation already inserted */
1872 goto out;
1876 pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
1877 if (pr == NULL) {
1878 ret = -ENOMEM;
1879 goto out;
1881 pr->lclu = lclu;
1883 rb_link_node(&pr->rb_node, parent, p);
1884 rb_insert_color(&pr->rb_node, &tree->root);
1886 out:
1887 return ret;
1891 * __remove_pending - removes a pending cluster reservation from the set
1892 * of pending reservations
1894 * @inode - file containing the cluster
1895 * @lblk - logical block in the pending cluster reservation to be removed
1897 * Returns successfully if pending reservation is not a member of the set.
1899 static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
1901 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1902 struct pending_reservation *pr;
1903 struct ext4_pending_tree *tree;
1905 pr = __get_pending(inode, EXT4_B2C(sbi, lblk));
1906 if (pr != NULL) {
1907 tree = &EXT4_I(inode)->i_pending_tree;
1908 rb_erase(&pr->rb_node, &tree->root);
1909 kmem_cache_free(ext4_pending_cachep, pr);
1914 * ext4_remove_pending - removes a pending cluster reservation from the set
1915 * of pending reservations
1917 * @inode - file containing the cluster
1918 * @lblk - logical block in the pending cluster reservation to be removed
1920 * Locking for external use of __remove_pending.
1922 void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk)
1924 struct ext4_inode_info *ei = EXT4_I(inode);
1926 write_lock(&ei->i_es_lock);
1927 __remove_pending(inode, lblk);
1928 write_unlock(&ei->i_es_lock);
1932 * ext4_is_pending - determine whether a cluster has a pending reservation
1933 * on it
1935 * @inode - file containing the cluster
1936 * @lblk - logical block in the cluster
1938 * Returns true if there's a pending reservation for the cluster in the
1939 * set of pending reservations, and false if not.
1941 bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
1943 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1944 struct ext4_inode_info *ei = EXT4_I(inode);
1945 bool ret;
1947 read_lock(&ei->i_es_lock);
1948 ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL);
1949 read_unlock(&ei->i_es_lock);
1951 return ret;
1955 * ext4_es_insert_delayed_block - adds a delayed block to the extents status
1956 * tree, adding a pending reservation where
1957 * needed
1959 * @inode - file containing the newly added block
1960 * @lblk - logical block to be added
1961 * @allocated - indicates whether a physical cluster has been allocated for
1962 * the logical cluster that contains the block
1964 * Returns 0 on success, negative error code on failure.
1966 int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
1967 bool allocated)
1969 struct extent_status newes;
1970 int err = 0;
1972 es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
1973 lblk, inode->i_ino);
1975 newes.es_lblk = lblk;
1976 newes.es_len = 1;
1977 ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
1978 trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
1980 ext4_es_insert_extent_check(inode, &newes);
1982 write_lock(&EXT4_I(inode)->i_es_lock);
1984 err = __es_remove_extent(inode, lblk, lblk, NULL);
1985 if (err != 0)
1986 goto error;
1987 retry:
1988 err = __es_insert_extent(inode, &newes);
1989 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
1990 128, EXT4_I(inode)))
1991 goto retry;
1992 if (err != 0)
1993 goto error;
1995 if (allocated)
1996 __insert_pending(inode, lblk);
1998 error:
1999 write_unlock(&EXT4_I(inode)->i_es_lock);
2001 ext4_es_print_tree(inode);
2002 ext4_print_pending_tree(inode);
2004 return err;
2008 * __es_delayed_clu - count number of clusters containing blocks that
2009 * are delayed only
2011 * @inode - file containing block range
2012 * @start - logical block defining start of range
2013 * @end - logical block defining end of range
2015 * Returns the number of clusters containing only delayed (not delayed
2016 * and unwritten) blocks in the range specified by @start and @end. Any
2017 * cluster or part of a cluster within the range and containing a delayed
2018 * and not unwritten block within the range is counted as a whole cluster.
2020 static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
2021 ext4_lblk_t end)
2023 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
2024 struct extent_status *es;
2025 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2026 struct rb_node *node;
2027 ext4_lblk_t first_lclu, last_lclu;
2028 unsigned long long last_counted_lclu;
2029 unsigned int n = 0;
2031 /* guaranteed to be unequal to any ext4_lblk_t value */
2032 last_counted_lclu = ~0ULL;
2034 es = __es_tree_search(&tree->root, start);
2036 while (es && (es->es_lblk <= end)) {
2037 if (ext4_es_is_delonly(es)) {
2038 if (es->es_lblk <= start)
2039 first_lclu = EXT4_B2C(sbi, start);
2040 else
2041 first_lclu = EXT4_B2C(sbi, es->es_lblk);
2043 if (ext4_es_end(es) >= end)
2044 last_lclu = EXT4_B2C(sbi, end);
2045 else
2046 last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
2048 if (first_lclu == last_counted_lclu)
2049 n += last_lclu - first_lclu;
2050 else
2051 n += last_lclu - first_lclu + 1;
2052 last_counted_lclu = last_lclu;
2054 node = rb_next(&es->rb_node);
2055 if (!node)
2056 break;
2057 es = rb_entry(node, struct extent_status, rb_node);
2060 return n;
2064 * ext4_es_delayed_clu - count number of clusters containing blocks that
2065 * are both delayed and unwritten
2067 * @inode - file containing block range
2068 * @lblk - logical block defining start of range
2069 * @len - number of blocks in range
2071 * Locking for external use of __es_delayed_clu().
2073 unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
2074 ext4_lblk_t len)
2076 struct ext4_inode_info *ei = EXT4_I(inode);
2077 ext4_lblk_t end;
2078 unsigned int n;
2080 if (len == 0)
2081 return 0;
2083 end = lblk + len - 1;
2084 WARN_ON(end < lblk);
2086 read_lock(&ei->i_es_lock);
2088 n = __es_delayed_clu(inode, lblk, end);
2090 read_unlock(&ei->i_es_lock);
2092 return n;
2096 * __revise_pending - makes, cancels, or leaves unchanged pending cluster
2097 * reservations for a specified block range depending
2098 * upon the presence or absence of delayed blocks
2099 * outside the range within clusters at the ends of the
2100 * range
2102 * @inode - file containing the range
2103 * @lblk - logical block defining the start of range
2104 * @len - length of range in blocks
2106 * Used after a newly allocated extent is added to the extents status tree.
2107 * Requires that the extents in the range have either written or unwritten
2108 * status. Must be called while holding i_es_lock.
2110 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
2111 ext4_lblk_t len)
2113 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2114 ext4_lblk_t end = lblk + len - 1;
2115 ext4_lblk_t first, last;
2116 bool f_del = false, l_del = false;
2118 if (len == 0)
2119 return;
2122 * Two cases - block range within single cluster and block range
2123 * spanning two or more clusters. Note that a cluster belonging
2124 * to a range starting and/or ending on a cluster boundary is treated
2125 * as if it does not contain a delayed extent. The new range may
2126 * have allocated space for previously delayed blocks out to the
2127 * cluster boundary, requiring that any pre-existing pending
2128 * reservation be canceled. Because this code only looks at blocks
2129 * outside the range, it should revise pending reservations
2130 * correctly even if the extent represented by the range can't be
2131 * inserted in the extents status tree due to ENOSPC.
2134 if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
2135 first = EXT4_LBLK_CMASK(sbi, lblk);
2136 if (first != lblk)
2137 f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2138 first, lblk - 1);
2139 if (f_del) {
2140 __insert_pending(inode, first);
2141 } else {
2142 last = EXT4_LBLK_CMASK(sbi, end) +
2143 sbi->s_cluster_ratio - 1;
2144 if (last != end)
2145 l_del = __es_scan_range(inode,
2146 &ext4_es_is_delonly,
2147 end + 1, last);
2148 if (l_del)
2149 __insert_pending(inode, last);
2150 else
2151 __remove_pending(inode, last);
2153 } else {
2154 first = EXT4_LBLK_CMASK(sbi, lblk);
2155 if (first != lblk)
2156 f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2157 first, lblk - 1);
2158 if (f_del)
2159 __insert_pending(inode, first);
2160 else
2161 __remove_pending(inode, first);
2163 last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
2164 if (last != end)
2165 l_del = __es_scan_range(inode, &ext4_es_is_delonly,
2166 end + 1, last);
2167 if (l_del)
2168 __insert_pending(inode, last);
2169 else
2170 __remove_pending(inode, last);