2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/jbd2.h>
36 #include <linux/highuid.h>
37 #include <linux/pagemap.h>
38 #include <linux/quotaops.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <linux/falloc.h>
42 #include <asm/uaccess.h>
43 #include <linux/fiemap.h>
44 #include "ext4_jbd2.h"
45 #include "ext4_extents.h"
47 #include <trace/events/ext4.h>
49 static int ext4_split_extent(handle_t
*handle
,
51 struct ext4_ext_path
*path
,
52 struct ext4_map_blocks
*map
,
56 static int ext4_ext_truncate_extend_restart(handle_t
*handle
,
62 if (!ext4_handle_valid(handle
))
64 if (handle
->h_buffer_credits
> needed
)
66 err
= ext4_journal_extend(handle
, needed
);
69 err
= ext4_truncate_restart_trans(handle
, inode
, needed
);
81 static int ext4_ext_get_access(handle_t
*handle
, struct inode
*inode
,
82 struct ext4_ext_path
*path
)
85 /* path points to block */
86 return ext4_journal_get_write_access(handle
, path
->p_bh
);
88 /* path points to leaf/index in inode body */
89 /* we use in-core data, no need to protect them */
99 static int ext4_ext_dirty(handle_t
*handle
, struct inode
*inode
,
100 struct ext4_ext_path
*path
)
104 /* path points to block */
105 err
= ext4_handle_dirty_metadata(handle
, inode
, path
->p_bh
);
107 /* path points to leaf/index in inode body */
108 err
= ext4_mark_inode_dirty(handle
, inode
);
113 static ext4_fsblk_t
ext4_ext_find_goal(struct inode
*inode
,
114 struct ext4_ext_path
*path
,
120 struct ext4_extent
*ex
;
121 depth
= path
->p_depth
;
124 * Try to predict block placement assuming that we are
125 * filling in a file which will eventually be
126 * non-sparse --- i.e., in the case of libbfd writing
127 * an ELF object sections out-of-order but in a way
128 * the eventually results in a contiguous object or
129 * executable file, or some database extending a table
130 * space file. However, this is actually somewhat
131 * non-ideal if we are writing a sparse file such as
132 * qemu or KVM writing a raw image file that is going
133 * to stay fairly sparse, since it will end up
134 * fragmenting the file system's free space. Maybe we
135 * should have some hueristics or some way to allow
136 * userspace to pass a hint to file system,
137 * especially if the latter case turns out to be
140 ex
= path
[depth
].p_ext
;
142 ext4_fsblk_t ext_pblk
= ext4_ext_pblock(ex
);
143 ext4_lblk_t ext_block
= le32_to_cpu(ex
->ee_block
);
145 if (block
> ext_block
)
146 return ext_pblk
+ (block
- ext_block
);
148 return ext_pblk
- (ext_block
- block
);
151 /* it looks like index is empty;
152 * try to find starting block from index itself */
153 if (path
[depth
].p_bh
)
154 return path
[depth
].p_bh
->b_blocknr
;
157 /* OK. use inode's group */
158 return ext4_inode_to_goal_block(inode
);
162 * Allocation for a meta data block
165 ext4_ext_new_meta_block(handle_t
*handle
, struct inode
*inode
,
166 struct ext4_ext_path
*path
,
167 struct ext4_extent
*ex
, int *err
, unsigned int flags
)
169 ext4_fsblk_t goal
, newblock
;
171 goal
= ext4_ext_find_goal(inode
, path
, le32_to_cpu(ex
->ee_block
));
172 newblock
= ext4_new_meta_blocks(handle
, inode
, goal
, flags
,
177 static inline int ext4_ext_space_block(struct inode
*inode
, int check
)
181 size
= (inode
->i_sb
->s_blocksize
- sizeof(struct ext4_extent_header
))
182 / sizeof(struct ext4_extent
);
184 #ifdef AGGRESSIVE_TEST
192 static inline int ext4_ext_space_block_idx(struct inode
*inode
, int check
)
196 size
= (inode
->i_sb
->s_blocksize
- sizeof(struct ext4_extent_header
))
197 / sizeof(struct ext4_extent_idx
);
199 #ifdef AGGRESSIVE_TEST
207 static inline int ext4_ext_space_root(struct inode
*inode
, int check
)
211 size
= sizeof(EXT4_I(inode
)->i_data
);
212 size
-= sizeof(struct ext4_extent_header
);
213 size
/= sizeof(struct ext4_extent
);
215 #ifdef AGGRESSIVE_TEST
223 static inline int ext4_ext_space_root_idx(struct inode
*inode
, int check
)
227 size
= sizeof(EXT4_I(inode
)->i_data
);
228 size
-= sizeof(struct ext4_extent_header
);
229 size
/= sizeof(struct ext4_extent_idx
);
231 #ifdef AGGRESSIVE_TEST
240 * Calculate the number of metadata blocks needed
241 * to allocate @blocks
242 * Worse case is one block per extent
244 int ext4_ext_calc_metadata_amount(struct inode
*inode
, ext4_lblk_t lblock
)
246 struct ext4_inode_info
*ei
= EXT4_I(inode
);
249 idxs
= ((inode
->i_sb
->s_blocksize
- sizeof(struct ext4_extent_header
))
250 / sizeof(struct ext4_extent_idx
));
253 * If the new delayed allocation block is contiguous with the
254 * previous da block, it can share index blocks with the
255 * previous block, so we only need to allocate a new index
256 * block every idxs leaf blocks. At ldxs**2 blocks, we need
257 * an additional index block, and at ldxs**3 blocks, yet
258 * another index blocks.
260 if (ei
->i_da_metadata_calc_len
&&
261 ei
->i_da_metadata_calc_last_lblock
+1 == lblock
) {
262 if ((ei
->i_da_metadata_calc_len
% idxs
) == 0)
264 if ((ei
->i_da_metadata_calc_len
% (idxs
*idxs
)) == 0)
266 if ((ei
->i_da_metadata_calc_len
% (idxs
*idxs
*idxs
)) == 0) {
268 ei
->i_da_metadata_calc_len
= 0;
270 ei
->i_da_metadata_calc_len
++;
271 ei
->i_da_metadata_calc_last_lblock
++;
276 * In the worst case we need a new set of index blocks at
277 * every level of the inode's extent tree.
279 ei
->i_da_metadata_calc_len
= 1;
280 ei
->i_da_metadata_calc_last_lblock
= lblock
;
281 return ext_depth(inode
) + 1;
285 ext4_ext_max_entries(struct inode
*inode
, int depth
)
289 if (depth
== ext_depth(inode
)) {
291 max
= ext4_ext_space_root(inode
, 1);
293 max
= ext4_ext_space_root_idx(inode
, 1);
296 max
= ext4_ext_space_block(inode
, 1);
298 max
= ext4_ext_space_block_idx(inode
, 1);
304 static int ext4_valid_extent(struct inode
*inode
, struct ext4_extent
*ext
)
306 ext4_fsblk_t block
= ext4_ext_pblock(ext
);
307 int len
= ext4_ext_get_actual_len(ext
);
309 return ext4_data_block_valid(EXT4_SB(inode
->i_sb
), block
, len
);
312 static int ext4_valid_extent_idx(struct inode
*inode
,
313 struct ext4_extent_idx
*ext_idx
)
315 ext4_fsblk_t block
= ext4_idx_pblock(ext_idx
);
317 return ext4_data_block_valid(EXT4_SB(inode
->i_sb
), block
, 1);
320 static int ext4_valid_extent_entries(struct inode
*inode
,
321 struct ext4_extent_header
*eh
,
324 struct ext4_extent
*ext
;
325 struct ext4_extent_idx
*ext_idx
;
326 unsigned short entries
;
327 if (eh
->eh_entries
== 0)
330 entries
= le16_to_cpu(eh
->eh_entries
);
334 ext
= EXT_FIRST_EXTENT(eh
);
336 if (!ext4_valid_extent(inode
, ext
))
342 ext_idx
= EXT_FIRST_INDEX(eh
);
344 if (!ext4_valid_extent_idx(inode
, ext_idx
))
353 static int __ext4_ext_check(const char *function
, unsigned int line
,
354 struct inode
*inode
, struct ext4_extent_header
*eh
,
357 const char *error_msg
;
360 if (unlikely(eh
->eh_magic
!= EXT4_EXT_MAGIC
)) {
361 error_msg
= "invalid magic";
364 if (unlikely(le16_to_cpu(eh
->eh_depth
) != depth
)) {
365 error_msg
= "unexpected eh_depth";
368 if (unlikely(eh
->eh_max
== 0)) {
369 error_msg
= "invalid eh_max";
372 max
= ext4_ext_max_entries(inode
, depth
);
373 if (unlikely(le16_to_cpu(eh
->eh_max
) > max
)) {
374 error_msg
= "too large eh_max";
377 if (unlikely(le16_to_cpu(eh
->eh_entries
) > le16_to_cpu(eh
->eh_max
))) {
378 error_msg
= "invalid eh_entries";
381 if (!ext4_valid_extent_entries(inode
, eh
, depth
)) {
382 error_msg
= "invalid extent entries";
388 ext4_error_inode(inode
, function
, line
, 0,
389 "bad header/extent: %s - magic %x, "
390 "entries %u, max %u(%u), depth %u(%u)",
391 error_msg
, le16_to_cpu(eh
->eh_magic
),
392 le16_to_cpu(eh
->eh_entries
), le16_to_cpu(eh
->eh_max
),
393 max
, le16_to_cpu(eh
->eh_depth
), depth
);
398 #define ext4_ext_check(inode, eh, depth) \
399 __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
401 int ext4_ext_check_inode(struct inode
*inode
)
403 return ext4_ext_check(inode
, ext_inode_hdr(inode
), ext_depth(inode
));
407 static void ext4_ext_show_path(struct inode
*inode
, struct ext4_ext_path
*path
)
409 int k
, l
= path
->p_depth
;
412 for (k
= 0; k
<= l
; k
++, path
++) {
414 ext_debug(" %d->%llu", le32_to_cpu(path
->p_idx
->ei_block
),
415 ext4_idx_pblock(path
->p_idx
));
416 } else if (path
->p_ext
) {
417 ext_debug(" %d:[%d]%d:%llu ",
418 le32_to_cpu(path
->p_ext
->ee_block
),
419 ext4_ext_is_uninitialized(path
->p_ext
),
420 ext4_ext_get_actual_len(path
->p_ext
),
421 ext4_ext_pblock(path
->p_ext
));
428 static void ext4_ext_show_leaf(struct inode
*inode
, struct ext4_ext_path
*path
)
430 int depth
= ext_depth(inode
);
431 struct ext4_extent_header
*eh
;
432 struct ext4_extent
*ex
;
438 eh
= path
[depth
].p_hdr
;
439 ex
= EXT_FIRST_EXTENT(eh
);
441 ext_debug("Displaying leaf extents for inode %lu\n", inode
->i_ino
);
443 for (i
= 0; i
< le16_to_cpu(eh
->eh_entries
); i
++, ex
++) {
444 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex
->ee_block
),
445 ext4_ext_is_uninitialized(ex
),
446 ext4_ext_get_actual_len(ex
), ext4_ext_pblock(ex
));
451 static void ext4_ext_show_move(struct inode
*inode
, struct ext4_ext_path
*path
,
452 ext4_fsblk_t newblock
, int level
)
454 int depth
= ext_depth(inode
);
455 struct ext4_extent
*ex
;
457 if (depth
!= level
) {
458 struct ext4_extent_idx
*idx
;
459 idx
= path
[level
].p_idx
;
460 while (idx
<= EXT_MAX_INDEX(path
[level
].p_hdr
)) {
461 ext_debug("%d: move %d:%llu in new index %llu\n", level
,
462 le32_to_cpu(idx
->ei_block
),
463 ext4_idx_pblock(idx
),
471 ex
= path
[depth
].p_ext
;
472 while (ex
<= EXT_MAX_EXTENT(path
[depth
].p_hdr
)) {
473 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
474 le32_to_cpu(ex
->ee_block
),
476 ext4_ext_is_uninitialized(ex
),
477 ext4_ext_get_actual_len(ex
),
484 #define ext4_ext_show_path(inode, path)
485 #define ext4_ext_show_leaf(inode, path)
486 #define ext4_ext_show_move(inode, path, newblock, level)
489 void ext4_ext_drop_refs(struct ext4_ext_path
*path
)
491 int depth
= path
->p_depth
;
494 for (i
= 0; i
<= depth
; i
++, path
++)
502 * ext4_ext_binsearch_idx:
503 * binary search for the closest index of the given block
504 * the header must be checked before calling this
507 ext4_ext_binsearch_idx(struct inode
*inode
,
508 struct ext4_ext_path
*path
, ext4_lblk_t block
)
510 struct ext4_extent_header
*eh
= path
->p_hdr
;
511 struct ext4_extent_idx
*r
, *l
, *m
;
514 ext_debug("binsearch for %u(idx): ", block
);
516 l
= EXT_FIRST_INDEX(eh
) + 1;
517 r
= EXT_LAST_INDEX(eh
);
520 if (block
< le32_to_cpu(m
->ei_block
))
524 ext_debug("%p(%u):%p(%u):%p(%u) ", l
, le32_to_cpu(l
->ei_block
),
525 m
, le32_to_cpu(m
->ei_block
),
526 r
, le32_to_cpu(r
->ei_block
));
530 ext_debug(" -> %d->%lld ", le32_to_cpu(path
->p_idx
->ei_block
),
531 ext4_idx_pblock(path
->p_idx
));
533 #ifdef CHECK_BINSEARCH
535 struct ext4_extent_idx
*chix
, *ix
;
538 chix
= ix
= EXT_FIRST_INDEX(eh
);
539 for (k
= 0; k
< le16_to_cpu(eh
->eh_entries
); k
++, ix
++) {
541 le32_to_cpu(ix
->ei_block
) <= le32_to_cpu(ix
[-1].ei_block
)) {
542 printk(KERN_DEBUG
"k=%d, ix=0x%p, "
544 ix
, EXT_FIRST_INDEX(eh
));
545 printk(KERN_DEBUG
"%u <= %u\n",
546 le32_to_cpu(ix
->ei_block
),
547 le32_to_cpu(ix
[-1].ei_block
));
549 BUG_ON(k
&& le32_to_cpu(ix
->ei_block
)
550 <= le32_to_cpu(ix
[-1].ei_block
));
551 if (block
< le32_to_cpu(ix
->ei_block
))
555 BUG_ON(chix
!= path
->p_idx
);
562 * ext4_ext_binsearch:
563 * binary search for closest extent of the given block
564 * the header must be checked before calling this
567 ext4_ext_binsearch(struct inode
*inode
,
568 struct ext4_ext_path
*path
, ext4_lblk_t block
)
570 struct ext4_extent_header
*eh
= path
->p_hdr
;
571 struct ext4_extent
*r
, *l
, *m
;
573 if (eh
->eh_entries
== 0) {
575 * this leaf is empty:
576 * we get such a leaf in split/add case
581 ext_debug("binsearch for %u: ", block
);
583 l
= EXT_FIRST_EXTENT(eh
) + 1;
584 r
= EXT_LAST_EXTENT(eh
);
588 if (block
< le32_to_cpu(m
->ee_block
))
592 ext_debug("%p(%u):%p(%u):%p(%u) ", l
, le32_to_cpu(l
->ee_block
),
593 m
, le32_to_cpu(m
->ee_block
),
594 r
, le32_to_cpu(r
->ee_block
));
598 ext_debug(" -> %d:%llu:[%d]%d ",
599 le32_to_cpu(path
->p_ext
->ee_block
),
600 ext4_ext_pblock(path
->p_ext
),
601 ext4_ext_is_uninitialized(path
->p_ext
),
602 ext4_ext_get_actual_len(path
->p_ext
));
604 #ifdef CHECK_BINSEARCH
606 struct ext4_extent
*chex
, *ex
;
609 chex
= ex
= EXT_FIRST_EXTENT(eh
);
610 for (k
= 0; k
< le16_to_cpu(eh
->eh_entries
); k
++, ex
++) {
611 BUG_ON(k
&& le32_to_cpu(ex
->ee_block
)
612 <= le32_to_cpu(ex
[-1].ee_block
));
613 if (block
< le32_to_cpu(ex
->ee_block
))
617 BUG_ON(chex
!= path
->p_ext
);
623 int ext4_ext_tree_init(handle_t
*handle
, struct inode
*inode
)
625 struct ext4_extent_header
*eh
;
627 eh
= ext_inode_hdr(inode
);
630 eh
->eh_magic
= EXT4_EXT_MAGIC
;
631 eh
->eh_max
= cpu_to_le16(ext4_ext_space_root(inode
, 0));
632 ext4_mark_inode_dirty(handle
, inode
);
633 ext4_ext_invalidate_cache(inode
);
637 struct ext4_ext_path
*
638 ext4_ext_find_extent(struct inode
*inode
, ext4_lblk_t block
,
639 struct ext4_ext_path
*path
)
641 struct ext4_extent_header
*eh
;
642 struct buffer_head
*bh
;
643 short int depth
, i
, ppos
= 0, alloc
= 0;
645 eh
= ext_inode_hdr(inode
);
646 depth
= ext_depth(inode
);
648 /* account possible depth increase */
650 path
= kzalloc(sizeof(struct ext4_ext_path
) * (depth
+ 2),
653 return ERR_PTR(-ENOMEM
);
660 /* walk through the tree */
662 int need_to_validate
= 0;
664 ext_debug("depth %d: num %d, max %d\n",
665 ppos
, le16_to_cpu(eh
->eh_entries
), le16_to_cpu(eh
->eh_max
));
667 ext4_ext_binsearch_idx(inode
, path
+ ppos
, block
);
668 path
[ppos
].p_block
= ext4_idx_pblock(path
[ppos
].p_idx
);
669 path
[ppos
].p_depth
= i
;
670 path
[ppos
].p_ext
= NULL
;
672 bh
= sb_getblk(inode
->i_sb
, path
[ppos
].p_block
);
675 if (!bh_uptodate_or_lock(bh
)) {
676 trace_ext4_ext_load_extent(inode
, block
,
678 if (bh_submit_read(bh
) < 0) {
682 /* validate the extent entries */
683 need_to_validate
= 1;
685 eh
= ext_block_hdr(bh
);
687 if (unlikely(ppos
> depth
)) {
689 EXT4_ERROR_INODE(inode
,
690 "ppos %d > depth %d", ppos
, depth
);
693 path
[ppos
].p_bh
= bh
;
694 path
[ppos
].p_hdr
= eh
;
697 if (need_to_validate
&& ext4_ext_check(inode
, eh
, i
))
701 path
[ppos
].p_depth
= i
;
702 path
[ppos
].p_ext
= NULL
;
703 path
[ppos
].p_idx
= NULL
;
706 ext4_ext_binsearch(inode
, path
+ ppos
, block
);
707 /* if not an empty leaf */
708 if (path
[ppos
].p_ext
)
709 path
[ppos
].p_block
= ext4_ext_pblock(path
[ppos
].p_ext
);
711 ext4_ext_show_path(inode
, path
);
716 ext4_ext_drop_refs(path
);
719 return ERR_PTR(-EIO
);
723 * ext4_ext_insert_index:
724 * insert new index [@logical;@ptr] into the block at @curp;
725 * check where to insert: before @curp or after @curp
727 static int ext4_ext_insert_index(handle_t
*handle
, struct inode
*inode
,
728 struct ext4_ext_path
*curp
,
729 int logical
, ext4_fsblk_t ptr
)
731 struct ext4_extent_idx
*ix
;
734 err
= ext4_ext_get_access(handle
, inode
, curp
);
738 if (unlikely(logical
== le32_to_cpu(curp
->p_idx
->ei_block
))) {
739 EXT4_ERROR_INODE(inode
,
740 "logical %d == ei_block %d!",
741 logical
, le32_to_cpu(curp
->p_idx
->ei_block
));
744 len
= EXT_MAX_INDEX(curp
->p_hdr
) - curp
->p_idx
;
745 if (logical
> le32_to_cpu(curp
->p_idx
->ei_block
)) {
747 if (curp
->p_idx
!= EXT_LAST_INDEX(curp
->p_hdr
)) {
748 len
= (len
- 1) * sizeof(struct ext4_extent_idx
);
749 len
= len
< 0 ? 0 : len
;
750 ext_debug("insert new index %d after: %llu. "
751 "move %d from 0x%p to 0x%p\n",
753 (curp
->p_idx
+ 1), (curp
->p_idx
+ 2));
754 memmove(curp
->p_idx
+ 2, curp
->p_idx
+ 1, len
);
756 ix
= curp
->p_idx
+ 1;
759 len
= len
* sizeof(struct ext4_extent_idx
);
760 len
= len
< 0 ? 0 : len
;
761 ext_debug("insert new index %d before: %llu. "
762 "move %d from 0x%p to 0x%p\n",
764 curp
->p_idx
, (curp
->p_idx
+ 1));
765 memmove(curp
->p_idx
+ 1, curp
->p_idx
, len
);
769 ix
->ei_block
= cpu_to_le32(logical
);
770 ext4_idx_store_pblock(ix
, ptr
);
771 le16_add_cpu(&curp
->p_hdr
->eh_entries
, 1);
773 if (unlikely(le16_to_cpu(curp
->p_hdr
->eh_entries
)
774 > le16_to_cpu(curp
->p_hdr
->eh_max
))) {
775 EXT4_ERROR_INODE(inode
,
776 "eh_entries %d > eh_max %d!",
777 le16_to_cpu(curp
->p_hdr
->eh_entries
),
778 le16_to_cpu(curp
->p_hdr
->eh_max
));
781 if (unlikely(ix
> EXT_LAST_INDEX(curp
->p_hdr
))) {
782 EXT4_ERROR_INODE(inode
, "ix > EXT_LAST_INDEX!");
786 err
= ext4_ext_dirty(handle
, inode
, curp
);
787 ext4_std_error(inode
->i_sb
, err
);
794 * inserts new subtree into the path, using free index entry
796 * - allocates all needed blocks (new leaf and all intermediate index blocks)
797 * - makes decision where to split
798 * - moves remaining extents and index entries (right to the split point)
799 * into the newly allocated blocks
800 * - initializes subtree
802 static int ext4_ext_split(handle_t
*handle
, struct inode
*inode
,
804 struct ext4_ext_path
*path
,
805 struct ext4_extent
*newext
, int at
)
807 struct buffer_head
*bh
= NULL
;
808 int depth
= ext_depth(inode
);
809 struct ext4_extent_header
*neh
;
810 struct ext4_extent_idx
*fidx
;
812 ext4_fsblk_t newblock
, oldblock
;
814 ext4_fsblk_t
*ablocks
= NULL
; /* array of allocated blocks */
817 /* make decision: where to split? */
818 /* FIXME: now decision is simplest: at current extent */
820 /* if current leaf will be split, then we should use
821 * border from split point */
822 if (unlikely(path
[depth
].p_ext
> EXT_MAX_EXTENT(path
[depth
].p_hdr
))) {
823 EXT4_ERROR_INODE(inode
, "p_ext > EXT_MAX_EXTENT!");
826 if (path
[depth
].p_ext
!= EXT_MAX_EXTENT(path
[depth
].p_hdr
)) {
827 border
= path
[depth
].p_ext
[1].ee_block
;
828 ext_debug("leaf will be split."
829 " next leaf starts at %d\n",
830 le32_to_cpu(border
));
832 border
= newext
->ee_block
;
833 ext_debug("leaf will be added."
834 " next leaf starts at %d\n",
835 le32_to_cpu(border
));
839 * If error occurs, then we break processing
840 * and mark filesystem read-only. index won't
841 * be inserted and tree will be in consistent
842 * state. Next mount will repair buffers too.
846 * Get array to track all allocated blocks.
847 * We need this to handle errors and free blocks
850 ablocks
= kzalloc(sizeof(ext4_fsblk_t
) * depth
, GFP_NOFS
);
854 /* allocate all needed blocks */
855 ext_debug("allocate %d blocks for indexes/leaf\n", depth
- at
);
856 for (a
= 0; a
< depth
- at
; a
++) {
857 newblock
= ext4_ext_new_meta_block(handle
, inode
, path
,
858 newext
, &err
, flags
);
861 ablocks
[a
] = newblock
;
864 /* initialize new leaf */
865 newblock
= ablocks
[--a
];
866 if (unlikely(newblock
== 0)) {
867 EXT4_ERROR_INODE(inode
, "newblock == 0!");
871 bh
= sb_getblk(inode
->i_sb
, newblock
);
878 err
= ext4_journal_get_create_access(handle
, bh
);
882 neh
= ext_block_hdr(bh
);
884 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block(inode
, 0));
885 neh
->eh_magic
= EXT4_EXT_MAGIC
;
888 /* move remainder of path[depth] to the new leaf */
889 if (unlikely(path
[depth
].p_hdr
->eh_entries
!=
890 path
[depth
].p_hdr
->eh_max
)) {
891 EXT4_ERROR_INODE(inode
, "eh_entries %d != eh_max %d!",
892 path
[depth
].p_hdr
->eh_entries
,
893 path
[depth
].p_hdr
->eh_max
);
897 /* start copy from next extent */
898 m
= EXT_MAX_EXTENT(path
[depth
].p_hdr
) - path
[depth
].p_ext
++;
899 ext4_ext_show_move(inode
, path
, newblock
, depth
);
901 struct ext4_extent
*ex
;
902 ex
= EXT_FIRST_EXTENT(neh
);
903 memmove(ex
, path
[depth
].p_ext
, sizeof(struct ext4_extent
) * m
);
904 le16_add_cpu(&neh
->eh_entries
, m
);
907 set_buffer_uptodate(bh
);
910 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
916 /* correct old leaf */
918 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
921 le16_add_cpu(&path
[depth
].p_hdr
->eh_entries
, -m
);
922 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
928 /* create intermediate indexes */
930 if (unlikely(k
< 0)) {
931 EXT4_ERROR_INODE(inode
, "k %d < 0!", k
);
936 ext_debug("create %d intermediate indices\n", k
);
937 /* insert new index into current index block */
938 /* current depth stored in i var */
942 newblock
= ablocks
[--a
];
943 bh
= sb_getblk(inode
->i_sb
, newblock
);
950 err
= ext4_journal_get_create_access(handle
, bh
);
954 neh
= ext_block_hdr(bh
);
955 neh
->eh_entries
= cpu_to_le16(1);
956 neh
->eh_magic
= EXT4_EXT_MAGIC
;
957 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block_idx(inode
, 0));
958 neh
->eh_depth
= cpu_to_le16(depth
- i
);
959 fidx
= EXT_FIRST_INDEX(neh
);
960 fidx
->ei_block
= border
;
961 ext4_idx_store_pblock(fidx
, oldblock
);
963 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
964 i
, newblock
, le32_to_cpu(border
), oldblock
);
966 /* move remainder of path[i] to the new index block */
967 if (unlikely(EXT_MAX_INDEX(path
[i
].p_hdr
) !=
968 EXT_LAST_INDEX(path
[i
].p_hdr
))) {
969 EXT4_ERROR_INODE(inode
,
970 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
971 le32_to_cpu(path
[i
].p_ext
->ee_block
));
975 /* start copy indexes */
976 m
= EXT_MAX_INDEX(path
[i
].p_hdr
) - path
[i
].p_idx
++;
977 ext_debug("cur 0x%p, last 0x%p\n", path
[i
].p_idx
,
978 EXT_MAX_INDEX(path
[i
].p_hdr
));
979 ext4_ext_show_move(inode
, path
, newblock
, i
);
981 memmove(++fidx
, path
[i
].p_idx
,
982 sizeof(struct ext4_extent_idx
) * m
);
983 le16_add_cpu(&neh
->eh_entries
, m
);
985 set_buffer_uptodate(bh
);
988 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
994 /* correct old index */
996 err
= ext4_ext_get_access(handle
, inode
, path
+ i
);
999 le16_add_cpu(&path
[i
].p_hdr
->eh_entries
, -m
);
1000 err
= ext4_ext_dirty(handle
, inode
, path
+ i
);
1008 /* insert new index */
1009 err
= ext4_ext_insert_index(handle
, inode
, path
+ at
,
1010 le32_to_cpu(border
), newblock
);
1014 if (buffer_locked(bh
))
1020 /* free all allocated blocks in error case */
1021 for (i
= 0; i
< depth
; i
++) {
1024 ext4_free_blocks(handle
, inode
, NULL
, ablocks
[i
], 1,
1025 EXT4_FREE_BLOCKS_METADATA
);
1034 * ext4_ext_grow_indepth:
1035 * implements tree growing procedure:
1036 * - allocates new block
1037 * - moves top-level data (index block or leaf) into the new block
1038 * - initializes new top-level, creating index that points to the
1039 * just created block
1041 static int ext4_ext_grow_indepth(handle_t
*handle
, struct inode
*inode
,
1043 struct ext4_ext_path
*path
,
1044 struct ext4_extent
*newext
)
1046 struct ext4_ext_path
*curp
= path
;
1047 struct ext4_extent_header
*neh
;
1048 struct buffer_head
*bh
;
1049 ext4_fsblk_t newblock
;
1052 newblock
= ext4_ext_new_meta_block(handle
, inode
, path
,
1053 newext
, &err
, flags
);
1057 bh
= sb_getblk(inode
->i_sb
, newblock
);
1060 ext4_std_error(inode
->i_sb
, err
);
1065 err
= ext4_journal_get_create_access(handle
, bh
);
1071 /* move top-level index/leaf into new block */
1072 memmove(bh
->b_data
, curp
->p_hdr
, sizeof(EXT4_I(inode
)->i_data
));
1074 /* set size of new block */
1075 neh
= ext_block_hdr(bh
);
1076 /* old root could have indexes or leaves
1077 * so calculate e_max right way */
1078 if (ext_depth(inode
))
1079 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block_idx(inode
, 0));
1081 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block(inode
, 0));
1082 neh
->eh_magic
= EXT4_EXT_MAGIC
;
1083 set_buffer_uptodate(bh
);
1086 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
1090 /* create index in new top-level index: num,max,pointer */
1091 err
= ext4_ext_get_access(handle
, inode
, curp
);
1095 curp
->p_hdr
->eh_magic
= EXT4_EXT_MAGIC
;
1096 curp
->p_hdr
->eh_max
= cpu_to_le16(ext4_ext_space_root_idx(inode
, 0));
1097 curp
->p_hdr
->eh_entries
= cpu_to_le16(1);
1098 curp
->p_idx
= EXT_FIRST_INDEX(curp
->p_hdr
);
1100 if (path
[0].p_hdr
->eh_depth
)
1101 curp
->p_idx
->ei_block
=
1102 EXT_FIRST_INDEX(path
[0].p_hdr
)->ei_block
;
1104 curp
->p_idx
->ei_block
=
1105 EXT_FIRST_EXTENT(path
[0].p_hdr
)->ee_block
;
1106 ext4_idx_store_pblock(curp
->p_idx
, newblock
);
1108 neh
= ext_inode_hdr(inode
);
1109 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1110 le16_to_cpu(neh
->eh_entries
), le16_to_cpu(neh
->eh_max
),
1111 le32_to_cpu(EXT_FIRST_INDEX(neh
)->ei_block
),
1112 ext4_idx_pblock(EXT_FIRST_INDEX(neh
)));
1114 neh
->eh_depth
= cpu_to_le16(path
->p_depth
+ 1);
1115 err
= ext4_ext_dirty(handle
, inode
, curp
);
1123 * ext4_ext_create_new_leaf:
1124 * finds empty index and adds new leaf.
1125 * if no free index is found, then it requests in-depth growing.
1127 static int ext4_ext_create_new_leaf(handle_t
*handle
, struct inode
*inode
,
1129 struct ext4_ext_path
*path
,
1130 struct ext4_extent
*newext
)
1132 struct ext4_ext_path
*curp
;
1133 int depth
, i
, err
= 0;
1136 i
= depth
= ext_depth(inode
);
1138 /* walk up to the tree and look for free index entry */
1139 curp
= path
+ depth
;
1140 while (i
> 0 && !EXT_HAS_FREE_INDEX(curp
)) {
1145 /* we use already allocated block for index block,
1146 * so subsequent data blocks should be contiguous */
1147 if (EXT_HAS_FREE_INDEX(curp
)) {
1148 /* if we found index with free entry, then use that
1149 * entry: create all needed subtree and add new leaf */
1150 err
= ext4_ext_split(handle
, inode
, flags
, path
, newext
, i
);
1155 ext4_ext_drop_refs(path
);
1156 path
= ext4_ext_find_extent(inode
,
1157 (ext4_lblk_t
)le32_to_cpu(newext
->ee_block
),
1160 err
= PTR_ERR(path
);
1162 /* tree is full, time to grow in depth */
1163 err
= ext4_ext_grow_indepth(handle
, inode
, flags
,
1169 ext4_ext_drop_refs(path
);
1170 path
= ext4_ext_find_extent(inode
,
1171 (ext4_lblk_t
)le32_to_cpu(newext
->ee_block
),
1174 err
= PTR_ERR(path
);
1179 * only first (depth 0 -> 1) produces free space;
1180 * in all other cases we have to split the grown tree
1182 depth
= ext_depth(inode
);
1183 if (path
[depth
].p_hdr
->eh_entries
== path
[depth
].p_hdr
->eh_max
) {
1184 /* now we need to split */
1194 * search the closest allocated block to the left for *logical
1195 * and returns it at @logical + it's physical address at @phys
1196 * if *logical is the smallest allocated block, the function
1197 * returns 0 at @phys
1198 * return value contains 0 (success) or error code
1200 static int ext4_ext_search_left(struct inode
*inode
,
1201 struct ext4_ext_path
*path
,
1202 ext4_lblk_t
*logical
, ext4_fsblk_t
*phys
)
1204 struct ext4_extent_idx
*ix
;
1205 struct ext4_extent
*ex
;
1208 if (unlikely(path
== NULL
)) {
1209 EXT4_ERROR_INODE(inode
, "path == NULL *logical %d!", *logical
);
1212 depth
= path
->p_depth
;
1215 if (depth
== 0 && path
->p_ext
== NULL
)
1218 /* usually extent in the path covers blocks smaller
1219 * then *logical, but it can be that extent is the
1220 * first one in the file */
1222 ex
= path
[depth
].p_ext
;
1223 ee_len
= ext4_ext_get_actual_len(ex
);
1224 if (*logical
< le32_to_cpu(ex
->ee_block
)) {
1225 if (unlikely(EXT_FIRST_EXTENT(path
[depth
].p_hdr
) != ex
)) {
1226 EXT4_ERROR_INODE(inode
,
1227 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1228 *logical
, le32_to_cpu(ex
->ee_block
));
1231 while (--depth
>= 0) {
1232 ix
= path
[depth
].p_idx
;
1233 if (unlikely(ix
!= EXT_FIRST_INDEX(path
[depth
].p_hdr
))) {
1234 EXT4_ERROR_INODE(inode
,
1235 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1236 ix
!= NULL
? ix
->ei_block
: 0,
1237 EXT_FIRST_INDEX(path
[depth
].p_hdr
) != NULL
?
1238 EXT_FIRST_INDEX(path
[depth
].p_hdr
)->ei_block
: 0,
1246 if (unlikely(*logical
< (le32_to_cpu(ex
->ee_block
) + ee_len
))) {
1247 EXT4_ERROR_INODE(inode
,
1248 "logical %d < ee_block %d + ee_len %d!",
1249 *logical
, le32_to_cpu(ex
->ee_block
), ee_len
);
1253 *logical
= le32_to_cpu(ex
->ee_block
) + ee_len
- 1;
1254 *phys
= ext4_ext_pblock(ex
) + ee_len
- 1;
1259 * search the closest allocated block to the right for *logical
1260 * and returns it at @logical + it's physical address at @phys
1261 * if *logical is the smallest allocated block, the function
1262 * returns 0 at @phys
1263 * return value contains 0 (success) or error code
1265 static int ext4_ext_search_right(struct inode
*inode
,
1266 struct ext4_ext_path
*path
,
1267 ext4_lblk_t
*logical
, ext4_fsblk_t
*phys
)
1269 struct buffer_head
*bh
= NULL
;
1270 struct ext4_extent_header
*eh
;
1271 struct ext4_extent_idx
*ix
;
1272 struct ext4_extent
*ex
;
1274 int depth
; /* Note, NOT eh_depth; depth from top of tree */
1277 if (unlikely(path
== NULL
)) {
1278 EXT4_ERROR_INODE(inode
, "path == NULL *logical %d!", *logical
);
1281 depth
= path
->p_depth
;
1284 if (depth
== 0 && path
->p_ext
== NULL
)
1287 /* usually extent in the path covers blocks smaller
1288 * then *logical, but it can be that extent is the
1289 * first one in the file */
1291 ex
= path
[depth
].p_ext
;
1292 ee_len
= ext4_ext_get_actual_len(ex
);
1293 if (*logical
< le32_to_cpu(ex
->ee_block
)) {
1294 if (unlikely(EXT_FIRST_EXTENT(path
[depth
].p_hdr
) != ex
)) {
1295 EXT4_ERROR_INODE(inode
,
1296 "first_extent(path[%d].p_hdr) != ex",
1300 while (--depth
>= 0) {
1301 ix
= path
[depth
].p_idx
;
1302 if (unlikely(ix
!= EXT_FIRST_INDEX(path
[depth
].p_hdr
))) {
1303 EXT4_ERROR_INODE(inode
,
1304 "ix != EXT_FIRST_INDEX *logical %d!",
1309 *logical
= le32_to_cpu(ex
->ee_block
);
1310 *phys
= ext4_ext_pblock(ex
);
1314 if (unlikely(*logical
< (le32_to_cpu(ex
->ee_block
) + ee_len
))) {
1315 EXT4_ERROR_INODE(inode
,
1316 "logical %d < ee_block %d + ee_len %d!",
1317 *logical
, le32_to_cpu(ex
->ee_block
), ee_len
);
1321 if (ex
!= EXT_LAST_EXTENT(path
[depth
].p_hdr
)) {
1322 /* next allocated block in this leaf */
1324 *logical
= le32_to_cpu(ex
->ee_block
);
1325 *phys
= ext4_ext_pblock(ex
);
1329 /* go up and search for index to the right */
1330 while (--depth
>= 0) {
1331 ix
= path
[depth
].p_idx
;
1332 if (ix
!= EXT_LAST_INDEX(path
[depth
].p_hdr
))
1336 /* we've gone up to the root and found no index to the right */
1340 /* we've found index to the right, let's
1341 * follow it and find the closest allocated
1342 * block to the right */
1344 block
= ext4_idx_pblock(ix
);
1345 while (++depth
< path
->p_depth
) {
1346 bh
= sb_bread(inode
->i_sb
, block
);
1349 eh
= ext_block_hdr(bh
);
1350 /* subtract from p_depth to get proper eh_depth */
1351 if (ext4_ext_check(inode
, eh
, path
->p_depth
- depth
)) {
1355 ix
= EXT_FIRST_INDEX(eh
);
1356 block
= ext4_idx_pblock(ix
);
1360 bh
= sb_bread(inode
->i_sb
, block
);
1363 eh
= ext_block_hdr(bh
);
1364 if (ext4_ext_check(inode
, eh
, path
->p_depth
- depth
)) {
1368 ex
= EXT_FIRST_EXTENT(eh
);
1369 *logical
= le32_to_cpu(ex
->ee_block
);
1370 *phys
= ext4_ext_pblock(ex
);
1376 * ext4_ext_next_allocated_block:
1377 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1378 * NOTE: it considers block number from index entry as
1379 * allocated block. Thus, index entries have to be consistent
1383 ext4_ext_next_allocated_block(struct ext4_ext_path
*path
)
1387 BUG_ON(path
== NULL
);
1388 depth
= path
->p_depth
;
1390 if (depth
== 0 && path
->p_ext
== NULL
)
1391 return EXT_MAX_BLOCKS
;
1393 while (depth
>= 0) {
1394 if (depth
== path
->p_depth
) {
1396 if (path
[depth
].p_ext
!=
1397 EXT_LAST_EXTENT(path
[depth
].p_hdr
))
1398 return le32_to_cpu(path
[depth
].p_ext
[1].ee_block
);
1401 if (path
[depth
].p_idx
!=
1402 EXT_LAST_INDEX(path
[depth
].p_hdr
))
1403 return le32_to_cpu(path
[depth
].p_idx
[1].ei_block
);
1408 return EXT_MAX_BLOCKS
;
1412 * ext4_ext_next_leaf_block:
1413 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1415 static ext4_lblk_t
ext4_ext_next_leaf_block(struct inode
*inode
,
1416 struct ext4_ext_path
*path
)
1420 BUG_ON(path
== NULL
);
1421 depth
= path
->p_depth
;
1423 /* zero-tree has no leaf blocks at all */
1425 return EXT_MAX_BLOCKS
;
1427 /* go to index block */
1430 while (depth
>= 0) {
1431 if (path
[depth
].p_idx
!=
1432 EXT_LAST_INDEX(path
[depth
].p_hdr
))
1433 return (ext4_lblk_t
)
1434 le32_to_cpu(path
[depth
].p_idx
[1].ei_block
);
1438 return EXT_MAX_BLOCKS
;
1442 * ext4_ext_correct_indexes:
1443 * if leaf gets modified and modified extent is first in the leaf,
1444 * then we have to correct all indexes above.
1445 * TODO: do we need to correct tree in all cases?
1447 static int ext4_ext_correct_indexes(handle_t
*handle
, struct inode
*inode
,
1448 struct ext4_ext_path
*path
)
1450 struct ext4_extent_header
*eh
;
1451 int depth
= ext_depth(inode
);
1452 struct ext4_extent
*ex
;
1456 eh
= path
[depth
].p_hdr
;
1457 ex
= path
[depth
].p_ext
;
1459 if (unlikely(ex
== NULL
|| eh
== NULL
)) {
1460 EXT4_ERROR_INODE(inode
,
1461 "ex %p == NULL or eh %p == NULL", ex
, eh
);
1466 /* there is no tree at all */
1470 if (ex
!= EXT_FIRST_EXTENT(eh
)) {
1471 /* we correct tree if first leaf got modified only */
1476 * TODO: we need correction if border is smaller than current one
1479 border
= path
[depth
].p_ext
->ee_block
;
1480 err
= ext4_ext_get_access(handle
, inode
, path
+ k
);
1483 path
[k
].p_idx
->ei_block
= border
;
1484 err
= ext4_ext_dirty(handle
, inode
, path
+ k
);
1489 /* change all left-side indexes */
1490 if (path
[k
+1].p_idx
!= EXT_FIRST_INDEX(path
[k
+1].p_hdr
))
1492 err
= ext4_ext_get_access(handle
, inode
, path
+ k
);
1495 path
[k
].p_idx
->ei_block
= border
;
1496 err
= ext4_ext_dirty(handle
, inode
, path
+ k
);
1505 ext4_can_extents_be_merged(struct inode
*inode
, struct ext4_extent
*ex1
,
1506 struct ext4_extent
*ex2
)
1508 unsigned short ext1_ee_len
, ext2_ee_len
, max_len
;
1511 * Make sure that either both extents are uninitialized, or
1514 if (ext4_ext_is_uninitialized(ex1
) ^ ext4_ext_is_uninitialized(ex2
))
1517 if (ext4_ext_is_uninitialized(ex1
))
1518 max_len
= EXT_UNINIT_MAX_LEN
;
1520 max_len
= EXT_INIT_MAX_LEN
;
1522 ext1_ee_len
= ext4_ext_get_actual_len(ex1
);
1523 ext2_ee_len
= ext4_ext_get_actual_len(ex2
);
1525 if (le32_to_cpu(ex1
->ee_block
) + ext1_ee_len
!=
1526 le32_to_cpu(ex2
->ee_block
))
1530 * To allow future support for preallocated extents to be added
1531 * as an RO_COMPAT feature, refuse to merge to extents if
1532 * this can result in the top bit of ee_len being set.
1534 if (ext1_ee_len
+ ext2_ee_len
> max_len
)
1536 #ifdef AGGRESSIVE_TEST
1537 if (ext1_ee_len
>= 4)
1541 if (ext4_ext_pblock(ex1
) + ext1_ee_len
== ext4_ext_pblock(ex2
))
1547 * This function tries to merge the "ex" extent to the next extent in the tree.
1548 * It always tries to merge towards right. If you want to merge towards
1549 * left, pass "ex - 1" as argument instead of "ex".
1550 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1551 * 1 if they got merged.
1553 static int ext4_ext_try_to_merge_right(struct inode
*inode
,
1554 struct ext4_ext_path
*path
,
1555 struct ext4_extent
*ex
)
1557 struct ext4_extent_header
*eh
;
1558 unsigned int depth
, len
;
1560 int uninitialized
= 0;
1562 depth
= ext_depth(inode
);
1563 BUG_ON(path
[depth
].p_hdr
== NULL
);
1564 eh
= path
[depth
].p_hdr
;
1566 while (ex
< EXT_LAST_EXTENT(eh
)) {
1567 if (!ext4_can_extents_be_merged(inode
, ex
, ex
+ 1))
1569 /* merge with next extent! */
1570 if (ext4_ext_is_uninitialized(ex
))
1572 ex
->ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
)
1573 + ext4_ext_get_actual_len(ex
+ 1));
1575 ext4_ext_mark_uninitialized(ex
);
1577 if (ex
+ 1 < EXT_LAST_EXTENT(eh
)) {
1578 len
= (EXT_LAST_EXTENT(eh
) - ex
- 1)
1579 * sizeof(struct ext4_extent
);
1580 memmove(ex
+ 1, ex
+ 2, len
);
1582 le16_add_cpu(&eh
->eh_entries
, -1);
1584 WARN_ON(eh
->eh_entries
== 0);
1585 if (!eh
->eh_entries
)
1586 EXT4_ERROR_INODE(inode
, "eh->eh_entries = 0!");
1593 * This function tries to merge the @ex extent to neighbours in the tree.
1594 * return 1 if merge left else 0.
1596 static int ext4_ext_try_to_merge(struct inode
*inode
,
1597 struct ext4_ext_path
*path
,
1598 struct ext4_extent
*ex
) {
1599 struct ext4_extent_header
*eh
;
1604 depth
= ext_depth(inode
);
1605 BUG_ON(path
[depth
].p_hdr
== NULL
);
1606 eh
= path
[depth
].p_hdr
;
1608 if (ex
> EXT_FIRST_EXTENT(eh
))
1609 merge_done
= ext4_ext_try_to_merge_right(inode
, path
, ex
- 1);
1612 ret
= ext4_ext_try_to_merge_right(inode
, path
, ex
);
1618 * check if a portion of the "newext" extent overlaps with an
1621 * If there is an overlap discovered, it updates the length of the newext
1622 * such that there will be no overlap, and then returns 1.
1623 * If there is no overlap found, it returns 0.
1625 static unsigned int ext4_ext_check_overlap(struct inode
*inode
,
1626 struct ext4_extent
*newext
,
1627 struct ext4_ext_path
*path
)
1630 unsigned int depth
, len1
;
1631 unsigned int ret
= 0;
1633 b1
= le32_to_cpu(newext
->ee_block
);
1634 len1
= ext4_ext_get_actual_len(newext
);
1635 depth
= ext_depth(inode
);
1636 if (!path
[depth
].p_ext
)
1638 b2
= le32_to_cpu(path
[depth
].p_ext
->ee_block
);
1641 * get the next allocated block if the extent in the path
1642 * is before the requested block(s)
1645 b2
= ext4_ext_next_allocated_block(path
);
1646 if (b2
== EXT_MAX_BLOCKS
)
1650 /* check for wrap through zero on extent logical start block*/
1651 if (b1
+ len1
< b1
) {
1652 len1
= EXT_MAX_BLOCKS
- b1
;
1653 newext
->ee_len
= cpu_to_le16(len1
);
1657 /* check for overlap */
1658 if (b1
+ len1
> b2
) {
1659 newext
->ee_len
= cpu_to_le16(b2
- b1
);
1667 * ext4_ext_insert_extent:
1668 * tries to merge requsted extent into the existing extent or
1669 * inserts requested extent as new one into the tree,
1670 * creating new leaf in the no-space case.
1672 int ext4_ext_insert_extent(handle_t
*handle
, struct inode
*inode
,
1673 struct ext4_ext_path
*path
,
1674 struct ext4_extent
*newext
, int flag
)
1676 struct ext4_extent_header
*eh
;
1677 struct ext4_extent
*ex
, *fex
;
1678 struct ext4_extent
*nearex
; /* nearest extent */
1679 struct ext4_ext_path
*npath
= NULL
;
1680 int depth
, len
, err
;
1682 unsigned uninitialized
= 0;
1685 if (unlikely(ext4_ext_get_actual_len(newext
) == 0)) {
1686 EXT4_ERROR_INODE(inode
, "ext4_ext_get_actual_len(newext) == 0");
1689 depth
= ext_depth(inode
);
1690 ex
= path
[depth
].p_ext
;
1691 if (unlikely(path
[depth
].p_hdr
== NULL
)) {
1692 EXT4_ERROR_INODE(inode
, "path[%d].p_hdr == NULL", depth
);
1696 /* try to insert block into found extent and return */
1697 if (ex
&& !(flag
& EXT4_GET_BLOCKS_PRE_IO
)
1698 && ext4_can_extents_be_merged(inode
, ex
, newext
)) {
1699 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1700 ext4_ext_is_uninitialized(newext
),
1701 ext4_ext_get_actual_len(newext
),
1702 le32_to_cpu(ex
->ee_block
),
1703 ext4_ext_is_uninitialized(ex
),
1704 ext4_ext_get_actual_len(ex
),
1705 ext4_ext_pblock(ex
));
1706 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
1711 * ext4_can_extents_be_merged should have checked that either
1712 * both extents are uninitialized, or both aren't. Thus we
1713 * need to check only one of them here.
1715 if (ext4_ext_is_uninitialized(ex
))
1717 ex
->ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
)
1718 + ext4_ext_get_actual_len(newext
));
1720 ext4_ext_mark_uninitialized(ex
);
1721 eh
= path
[depth
].p_hdr
;
1726 depth
= ext_depth(inode
);
1727 eh
= path
[depth
].p_hdr
;
1728 if (le16_to_cpu(eh
->eh_entries
) < le16_to_cpu(eh
->eh_max
))
1731 /* probably next leaf has space for us? */
1732 fex
= EXT_LAST_EXTENT(eh
);
1733 next
= EXT_MAX_BLOCKS
;
1734 if (le32_to_cpu(newext
->ee_block
) > le32_to_cpu(fex
->ee_block
))
1735 next
= ext4_ext_next_leaf_block(inode
, path
);
1736 if (next
!= EXT_MAX_BLOCKS
) {
1737 ext_debug("next leaf block - %d\n", next
);
1738 BUG_ON(npath
!= NULL
);
1739 npath
= ext4_ext_find_extent(inode
, next
, NULL
);
1741 return PTR_ERR(npath
);
1742 BUG_ON(npath
->p_depth
!= path
->p_depth
);
1743 eh
= npath
[depth
].p_hdr
;
1744 if (le16_to_cpu(eh
->eh_entries
) < le16_to_cpu(eh
->eh_max
)) {
1745 ext_debug("next leaf isn't full(%d)\n",
1746 le16_to_cpu(eh
->eh_entries
));
1750 ext_debug("next leaf has no free space(%d,%d)\n",
1751 le16_to_cpu(eh
->eh_entries
), le16_to_cpu(eh
->eh_max
));
1755 * There is no free space in the found leaf.
1756 * We're gonna add a new leaf in the tree.
1758 if (flag
& EXT4_GET_BLOCKS_PUNCH_OUT_EXT
)
1759 flags
= EXT4_MB_USE_ROOT_BLOCKS
;
1760 err
= ext4_ext_create_new_leaf(handle
, inode
, flags
, path
, newext
);
1763 depth
= ext_depth(inode
);
1764 eh
= path
[depth
].p_hdr
;
1767 nearex
= path
[depth
].p_ext
;
1769 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
1774 /* there is no extent in this leaf, create first one */
1775 ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
1776 le32_to_cpu(newext
->ee_block
),
1777 ext4_ext_pblock(newext
),
1778 ext4_ext_is_uninitialized(newext
),
1779 ext4_ext_get_actual_len(newext
));
1780 path
[depth
].p_ext
= EXT_FIRST_EXTENT(eh
);
1781 } else if (le32_to_cpu(newext
->ee_block
)
1782 > le32_to_cpu(nearex
->ee_block
)) {
1783 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1784 if (nearex
!= EXT_LAST_EXTENT(eh
)) {
1785 len
= EXT_MAX_EXTENT(eh
) - nearex
;
1786 len
= (len
- 1) * sizeof(struct ext4_extent
);
1787 len
= len
< 0 ? 0 : len
;
1788 ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
1789 "move %d from 0x%p to 0x%p\n",
1790 le32_to_cpu(newext
->ee_block
),
1791 ext4_ext_pblock(newext
),
1792 ext4_ext_is_uninitialized(newext
),
1793 ext4_ext_get_actual_len(newext
),
1794 nearex
, len
, nearex
+ 1, nearex
+ 2);
1795 memmove(nearex
+ 2, nearex
+ 1, len
);
1797 path
[depth
].p_ext
= nearex
+ 1;
1799 BUG_ON(newext
->ee_block
== nearex
->ee_block
);
1800 len
= (EXT_MAX_EXTENT(eh
) - nearex
) * sizeof(struct ext4_extent
);
1801 len
= len
< 0 ? 0 : len
;
1802 ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
1803 "move %d from 0x%p to 0x%p\n",
1804 le32_to_cpu(newext
->ee_block
),
1805 ext4_ext_pblock(newext
),
1806 ext4_ext_is_uninitialized(newext
),
1807 ext4_ext_get_actual_len(newext
),
1808 nearex
, len
, nearex
+ 1, nearex
+ 2);
1809 memmove(nearex
+ 1, nearex
, len
);
1810 path
[depth
].p_ext
= nearex
;
1813 le16_add_cpu(&eh
->eh_entries
, 1);
1814 nearex
= path
[depth
].p_ext
;
1815 nearex
->ee_block
= newext
->ee_block
;
1816 ext4_ext_store_pblock(nearex
, ext4_ext_pblock(newext
));
1817 nearex
->ee_len
= newext
->ee_len
;
1820 /* try to merge extents to the right */
1821 if (!(flag
& EXT4_GET_BLOCKS_PRE_IO
))
1822 ext4_ext_try_to_merge(inode
, path
, nearex
);
1824 /* try to merge extents to the left */
1826 /* time to correct all indexes above */
1827 err
= ext4_ext_correct_indexes(handle
, inode
, path
);
1831 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
1835 ext4_ext_drop_refs(npath
);
1838 ext4_ext_invalidate_cache(inode
);
1842 static int ext4_ext_walk_space(struct inode
*inode
, ext4_lblk_t block
,
1843 ext4_lblk_t num
, ext_prepare_callback func
,
1846 struct ext4_ext_path
*path
= NULL
;
1847 struct ext4_ext_cache cbex
;
1848 struct ext4_extent
*ex
;
1849 ext4_lblk_t next
, start
= 0, end
= 0;
1850 ext4_lblk_t last
= block
+ num
;
1851 int depth
, exists
, err
= 0;
1853 BUG_ON(func
== NULL
);
1854 BUG_ON(inode
== NULL
);
1856 while (block
< last
&& block
!= EXT_MAX_BLOCKS
) {
1858 /* find extent for this block */
1859 down_read(&EXT4_I(inode
)->i_data_sem
);
1860 path
= ext4_ext_find_extent(inode
, block
, path
);
1861 up_read(&EXT4_I(inode
)->i_data_sem
);
1863 err
= PTR_ERR(path
);
1868 depth
= ext_depth(inode
);
1869 if (unlikely(path
[depth
].p_hdr
== NULL
)) {
1870 EXT4_ERROR_INODE(inode
, "path[%d].p_hdr == NULL", depth
);
1874 ex
= path
[depth
].p_ext
;
1875 next
= ext4_ext_next_allocated_block(path
);
1879 /* there is no extent yet, so try to allocate
1880 * all requested space */
1883 } else if (le32_to_cpu(ex
->ee_block
) > block
) {
1884 /* need to allocate space before found extent */
1886 end
= le32_to_cpu(ex
->ee_block
);
1887 if (block
+ num
< end
)
1889 } else if (block
>= le32_to_cpu(ex
->ee_block
)
1890 + ext4_ext_get_actual_len(ex
)) {
1891 /* need to allocate space after found extent */
1896 } else if (block
>= le32_to_cpu(ex
->ee_block
)) {
1898 * some part of requested space is covered
1902 end
= le32_to_cpu(ex
->ee_block
)
1903 + ext4_ext_get_actual_len(ex
);
1904 if (block
+ num
< end
)
1910 BUG_ON(end
<= start
);
1913 cbex
.ec_block
= start
;
1914 cbex
.ec_len
= end
- start
;
1917 cbex
.ec_block
= le32_to_cpu(ex
->ee_block
);
1918 cbex
.ec_len
= ext4_ext_get_actual_len(ex
);
1919 cbex
.ec_start
= ext4_ext_pblock(ex
);
1922 if (unlikely(cbex
.ec_len
== 0)) {
1923 EXT4_ERROR_INODE(inode
, "cbex.ec_len == 0");
1927 err
= func(inode
, next
, &cbex
, ex
, cbdata
);
1928 ext4_ext_drop_refs(path
);
1933 if (err
== EXT_REPEAT
)
1935 else if (err
== EXT_BREAK
) {
1940 if (ext_depth(inode
) != depth
) {
1941 /* depth was changed. we have to realloc path */
1946 block
= cbex
.ec_block
+ cbex
.ec_len
;
1950 ext4_ext_drop_refs(path
);
1958 ext4_ext_put_in_cache(struct inode
*inode
, ext4_lblk_t block
,
1959 __u32 len
, ext4_fsblk_t start
)
1961 struct ext4_ext_cache
*cex
;
1963 spin_lock(&EXT4_I(inode
)->i_block_reservation_lock
);
1964 cex
= &EXT4_I(inode
)->i_cached_extent
;
1965 cex
->ec_block
= block
;
1967 cex
->ec_start
= start
;
1968 spin_unlock(&EXT4_I(inode
)->i_block_reservation_lock
);
1972 * ext4_ext_put_gap_in_cache:
1973 * calculate boundaries of the gap that the requested block fits into
1974 * and cache this gap
1977 ext4_ext_put_gap_in_cache(struct inode
*inode
, struct ext4_ext_path
*path
,
1980 int depth
= ext_depth(inode
);
1983 struct ext4_extent
*ex
;
1985 ex
= path
[depth
].p_ext
;
1987 /* there is no extent yet, so gap is [0;-] */
1989 len
= EXT_MAX_BLOCKS
;
1990 ext_debug("cache gap(whole file):");
1991 } else if (block
< le32_to_cpu(ex
->ee_block
)) {
1993 len
= le32_to_cpu(ex
->ee_block
) - block
;
1994 ext_debug("cache gap(before): %u [%u:%u]",
1996 le32_to_cpu(ex
->ee_block
),
1997 ext4_ext_get_actual_len(ex
));
1998 } else if (block
>= le32_to_cpu(ex
->ee_block
)
1999 + ext4_ext_get_actual_len(ex
)) {
2001 lblock
= le32_to_cpu(ex
->ee_block
)
2002 + ext4_ext_get_actual_len(ex
);
2004 next
= ext4_ext_next_allocated_block(path
);
2005 ext_debug("cache gap(after): [%u:%u] %u",
2006 le32_to_cpu(ex
->ee_block
),
2007 ext4_ext_get_actual_len(ex
),
2009 BUG_ON(next
== lblock
);
2010 len
= next
- lblock
;
2016 ext_debug(" -> %u:%lu\n", lblock
, len
);
2017 ext4_ext_put_in_cache(inode
, lblock
, len
, 0);
2021 * ext4_ext_in_cache()
2022 * Checks to see if the given block is in the cache.
2023 * If it is, the cached extent is stored in the given
2024 * cache extent pointer. If the cached extent is a hole,
2025 * this routine should be used instead of
2026 * ext4_ext_in_cache if the calling function needs to
2027 * know the size of the hole.
2029 * @inode: The files inode
2030 * @block: The block to look for in the cache
2031 * @ex: Pointer where the cached extent will be stored
2032 * if it contains block
2034 * Return 0 if cache is invalid; 1 if the cache is valid
2036 static int ext4_ext_check_cache(struct inode
*inode
, ext4_lblk_t block
,
2037 struct ext4_ext_cache
*ex
){
2038 struct ext4_ext_cache
*cex
;
2039 struct ext4_sb_info
*sbi
;
2043 * We borrow i_block_reservation_lock to protect i_cached_extent
2045 spin_lock(&EXT4_I(inode
)->i_block_reservation_lock
);
2046 cex
= &EXT4_I(inode
)->i_cached_extent
;
2047 sbi
= EXT4_SB(inode
->i_sb
);
2049 /* has cache valid data? */
2050 if (cex
->ec_len
== 0)
2053 if (in_range(block
, cex
->ec_block
, cex
->ec_len
)) {
2054 memcpy(ex
, cex
, sizeof(struct ext4_ext_cache
));
2055 ext_debug("%u cached by %u:%u:%llu\n",
2057 cex
->ec_block
, cex
->ec_len
, cex
->ec_start
);
2062 sbi
->extent_cache_misses
++;
2064 sbi
->extent_cache_hits
++;
2065 spin_unlock(&EXT4_I(inode
)->i_block_reservation_lock
);
2070 * ext4_ext_in_cache()
2071 * Checks to see if the given block is in the cache.
2072 * If it is, the cached extent is stored in the given
2075 * @inode: The files inode
2076 * @block: The block to look for in the cache
2077 * @ex: Pointer where the cached extent will be stored
2078 * if it contains block
2080 * Return 0 if cache is invalid; 1 if the cache is valid
2083 ext4_ext_in_cache(struct inode
*inode
, ext4_lblk_t block
,
2084 struct ext4_extent
*ex
)
2086 struct ext4_ext_cache cex
;
2089 if (ext4_ext_check_cache(inode
, block
, &cex
)) {
2090 ex
->ee_block
= cpu_to_le32(cex
.ec_block
);
2091 ext4_ext_store_pblock(ex
, cex
.ec_start
);
2092 ex
->ee_len
= cpu_to_le16(cex
.ec_len
);
2102 * removes index from the index block.
2103 * It's used in truncate case only, thus all requests are for
2104 * last index in the block only.
2106 static int ext4_ext_rm_idx(handle_t
*handle
, struct inode
*inode
,
2107 struct ext4_ext_path
*path
)
2112 /* free index block */
2114 leaf
= ext4_idx_pblock(path
->p_idx
);
2115 if (unlikely(path
->p_hdr
->eh_entries
== 0)) {
2116 EXT4_ERROR_INODE(inode
, "path->p_hdr->eh_entries == 0");
2119 err
= ext4_ext_get_access(handle
, inode
, path
);
2122 le16_add_cpu(&path
->p_hdr
->eh_entries
, -1);
2123 err
= ext4_ext_dirty(handle
, inode
, path
);
2126 ext_debug("index is empty, remove it, free block %llu\n", leaf
);
2127 ext4_free_blocks(handle
, inode
, NULL
, leaf
, 1,
2128 EXT4_FREE_BLOCKS_METADATA
| EXT4_FREE_BLOCKS_FORGET
);
2133 * ext4_ext_calc_credits_for_single_extent:
2134 * This routine returns max. credits that needed to insert an extent
2135 * to the extent tree.
2136 * When pass the actual path, the caller should calculate credits
2139 int ext4_ext_calc_credits_for_single_extent(struct inode
*inode
, int nrblocks
,
2140 struct ext4_ext_path
*path
)
2143 int depth
= ext_depth(inode
);
2146 /* probably there is space in leaf? */
2147 if (le16_to_cpu(path
[depth
].p_hdr
->eh_entries
)
2148 < le16_to_cpu(path
[depth
].p_hdr
->eh_max
)) {
2151 * There are some space in the leaf tree, no
2152 * need to account for leaf block credit
2154 * bitmaps and block group descriptor blocks
2155 * and other metadat blocks still need to be
2158 /* 1 bitmap, 1 block group descriptor */
2159 ret
= 2 + EXT4_META_TRANS_BLOCKS(inode
->i_sb
);
2164 return ext4_chunk_trans_blocks(inode
, nrblocks
);
2168 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2170 * if nrblocks are fit in a single extent (chunk flag is 1), then
2171 * in the worse case, each tree level index/leaf need to be changed
2172 * if the tree split due to insert a new extent, then the old tree
2173 * index/leaf need to be updated too
2175 * If the nrblocks are discontiguous, they could cause
2176 * the whole tree split more than once, but this is really rare.
2178 int ext4_ext_index_trans_blocks(struct inode
*inode
, int nrblocks
, int chunk
)
2181 int depth
= ext_depth(inode
);
2191 static int ext4_remove_blocks(handle_t
*handle
, struct inode
*inode
,
2192 struct ext4_extent
*ex
,
2193 ext4_lblk_t from
, ext4_lblk_t to
)
2195 unsigned short ee_len
= ext4_ext_get_actual_len(ex
);
2196 int flags
= EXT4_FREE_BLOCKS_FORGET
;
2198 if (S_ISDIR(inode
->i_mode
) || S_ISLNK(inode
->i_mode
))
2199 flags
|= EXT4_FREE_BLOCKS_METADATA
;
2200 #ifdef EXTENTS_STATS
2202 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2203 spin_lock(&sbi
->s_ext_stats_lock
);
2204 sbi
->s_ext_blocks
+= ee_len
;
2205 sbi
->s_ext_extents
++;
2206 if (ee_len
< sbi
->s_ext_min
)
2207 sbi
->s_ext_min
= ee_len
;
2208 if (ee_len
> sbi
->s_ext_max
)
2209 sbi
->s_ext_max
= ee_len
;
2210 if (ext_depth(inode
) > sbi
->s_depth_max
)
2211 sbi
->s_depth_max
= ext_depth(inode
);
2212 spin_unlock(&sbi
->s_ext_stats_lock
);
2215 if (from
>= le32_to_cpu(ex
->ee_block
)
2216 && to
== le32_to_cpu(ex
->ee_block
) + ee_len
- 1) {
2221 num
= le32_to_cpu(ex
->ee_block
) + ee_len
- from
;
2222 start
= ext4_ext_pblock(ex
) + ee_len
- num
;
2223 ext_debug("free last %u blocks starting %llu\n", num
, start
);
2224 ext4_free_blocks(handle
, inode
, NULL
, start
, num
, flags
);
2225 } else if (from
== le32_to_cpu(ex
->ee_block
)
2226 && to
<= le32_to_cpu(ex
->ee_block
) + ee_len
- 1) {
2232 start
= ext4_ext_pblock(ex
);
2234 ext_debug("free first %u blocks starting %llu\n", num
, start
);
2235 ext4_free_blocks(handle
, inode
, 0, start
, num
, flags
);
2238 printk(KERN_INFO
"strange request: removal(2) "
2239 "%u-%u from %u:%u\n",
2240 from
, to
, le32_to_cpu(ex
->ee_block
), ee_len
);
2247 * ext4_ext_rm_leaf() Removes the extents associated with the
2248 * blocks appearing between "start" and "end", and splits the extents
2249 * if "start" and "end" appear in the same extent
2251 * @handle: The journal handle
2252 * @inode: The files inode
2253 * @path: The path to the leaf
2254 * @start: The first block to remove
2255 * @end: The last block to remove
2258 ext4_ext_rm_leaf(handle_t
*handle
, struct inode
*inode
,
2259 struct ext4_ext_path
*path
, ext4_lblk_t start
,
2262 int err
= 0, correct_index
= 0;
2263 int depth
= ext_depth(inode
), credits
;
2264 struct ext4_extent_header
*eh
;
2265 ext4_lblk_t a
, b
, block
;
2267 ext4_lblk_t ex_ee_block
;
2268 unsigned short ex_ee_len
;
2269 unsigned uninitialized
= 0;
2270 struct ext4_extent
*ex
;
2271 struct ext4_map_blocks map
;
2273 /* the header must be checked already in ext4_ext_remove_space() */
2274 ext_debug("truncate since %u in leaf\n", start
);
2275 if (!path
[depth
].p_hdr
)
2276 path
[depth
].p_hdr
= ext_block_hdr(path
[depth
].p_bh
);
2277 eh
= path
[depth
].p_hdr
;
2278 if (unlikely(path
[depth
].p_hdr
== NULL
)) {
2279 EXT4_ERROR_INODE(inode
, "path[%d].p_hdr == NULL", depth
);
2282 /* find where to start removing */
2283 ex
= EXT_LAST_EXTENT(eh
);
2285 ex_ee_block
= le32_to_cpu(ex
->ee_block
);
2286 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2288 while (ex
>= EXT_FIRST_EXTENT(eh
) &&
2289 ex_ee_block
+ ex_ee_len
> start
) {
2291 if (ext4_ext_is_uninitialized(ex
))
2296 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block
,
2297 uninitialized
, ex_ee_len
);
2298 path
[depth
].p_ext
= ex
;
2300 a
= ex_ee_block
> start
? ex_ee_block
: start
;
2301 b
= ex_ee_block
+ex_ee_len
- 1 < end
?
2302 ex_ee_block
+ex_ee_len
- 1 : end
;
2304 ext_debug(" border %u:%u\n", a
, b
);
2306 /* If this extent is beyond the end of the hole, skip it */
2307 if (end
<= ex_ee_block
) {
2309 ex_ee_block
= le32_to_cpu(ex
->ee_block
);
2310 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2312 } else if (a
!= ex_ee_block
&&
2313 b
!= ex_ee_block
+ ex_ee_len
- 1) {
2315 * If this is a truncate, then this condition should
2316 * never happen because at least one of the end points
2317 * needs to be on the edge of the extent.
2319 if (end
== EXT_MAX_BLOCKS
- 1) {
2320 ext_debug(" bad truncate %u:%u\n",
2328 * else this is a hole punch, so the extent needs to
2329 * be split since neither edge of the hole is on the
2333 map
.m_pblk
= ext4_ext_pblock(ex
);
2334 map
.m_lblk
= ex_ee_block
;
2335 map
.m_len
= b
- ex_ee_block
;
2337 err
= ext4_split_extent(handle
,
2338 inode
, path
, &map
, 0,
2339 EXT4_GET_BLOCKS_PUNCH_OUT_EXT
|
2340 EXT4_GET_BLOCKS_PRE_IO
);
2345 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2347 b
= ex_ee_block
+ex_ee_len
- 1 < end
?
2348 ex_ee_block
+ex_ee_len
- 1 : end
;
2350 /* Then remove tail of this extent */
2351 block
= ex_ee_block
;
2354 } else if (a
!= ex_ee_block
) {
2355 /* remove tail of the extent */
2356 block
= ex_ee_block
;
2358 } else if (b
!= ex_ee_block
+ ex_ee_len
- 1) {
2359 /* remove head of the extent */
2361 num
= ex_ee_block
+ ex_ee_len
- b
;
2364 * If this is a truncate, this condition
2365 * should never happen
2367 if (end
== EXT_MAX_BLOCKS
- 1) {
2368 ext_debug(" bad truncate %u:%u\n",
2374 /* remove whole extent: excellent! */
2375 block
= ex_ee_block
;
2377 if (a
!= ex_ee_block
) {
2378 ext_debug(" bad truncate %u:%u\n",
2384 if (b
!= ex_ee_block
+ ex_ee_len
- 1) {
2385 ext_debug(" bad truncate %u:%u\n",
2393 * 3 for leaf, sb, and inode plus 2 (bmap and group
2394 * descriptor) for each block group; assume two block
2395 * groups plus ex_ee_len/blocks_per_block_group for
2398 credits
= 7 + 2*(ex_ee_len
/EXT4_BLOCKS_PER_GROUP(inode
->i_sb
));
2399 if (ex
== EXT_FIRST_EXTENT(eh
)) {
2401 credits
+= (ext_depth(inode
)) + 1;
2403 credits
+= EXT4_MAXQUOTAS_TRANS_BLOCKS(inode
->i_sb
);
2405 err
= ext4_ext_truncate_extend_restart(handle
, inode
, credits
);
2409 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
2413 err
= ext4_remove_blocks(handle
, inode
, ex
, a
, b
);
2418 /* this extent is removed; mark slot entirely unused */
2419 ext4_ext_store_pblock(ex
, 0);
2420 } else if (block
!= ex_ee_block
) {
2422 * If this was a head removal, then we need to update
2423 * the physical block since it is now at a different
2426 ext4_ext_store_pblock(ex
, ext4_ext_pblock(ex
) + (b
-a
));
2429 ex
->ee_block
= cpu_to_le32(block
);
2430 ex
->ee_len
= cpu_to_le16(num
);
2432 * Do not mark uninitialized if all the blocks in the
2433 * extent have been removed.
2435 if (uninitialized
&& num
)
2436 ext4_ext_mark_uninitialized(ex
);
2438 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
2443 * If the extent was completely released,
2444 * we need to remove it from the leaf
2447 if (end
!= EXT_MAX_BLOCKS
- 1) {
2449 * For hole punching, we need to scoot all the
2450 * extents up when an extent is removed so that
2451 * we dont have blank extents in the middle
2453 memmove(ex
, ex
+1, (EXT_LAST_EXTENT(eh
) - ex
) *
2454 sizeof(struct ext4_extent
));
2456 /* Now get rid of the one at the end */
2457 memset(EXT_LAST_EXTENT(eh
), 0,
2458 sizeof(struct ext4_extent
));
2460 le16_add_cpu(&eh
->eh_entries
, -1);
2463 ext_debug("new extent: %u:%u:%llu\n", block
, num
,
2464 ext4_ext_pblock(ex
));
2466 ex_ee_block
= le32_to_cpu(ex
->ee_block
);
2467 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2470 if (correct_index
&& eh
->eh_entries
)
2471 err
= ext4_ext_correct_indexes(handle
, inode
, path
);
2473 /* if this leaf is free, then we should
2474 * remove it from index block above */
2475 if (err
== 0 && eh
->eh_entries
== 0 && path
[depth
].p_bh
!= NULL
)
2476 err
= ext4_ext_rm_idx(handle
, inode
, path
+ depth
);
2483 * ext4_ext_more_to_rm:
2484 * returns 1 if current index has to be freed (even partial)
2487 ext4_ext_more_to_rm(struct ext4_ext_path
*path
)
2489 BUG_ON(path
->p_idx
== NULL
);
2491 if (path
->p_idx
< EXT_FIRST_INDEX(path
->p_hdr
))
2495 * if truncate on deeper level happened, it wasn't partial,
2496 * so we have to consider current index for truncation
2498 if (le16_to_cpu(path
->p_hdr
->eh_entries
) == path
->p_block
)
2503 static int ext4_ext_remove_space(struct inode
*inode
, ext4_lblk_t start
,
2506 struct super_block
*sb
= inode
->i_sb
;
2507 int depth
= ext_depth(inode
);
2508 struct ext4_ext_path
*path
;
2512 ext_debug("truncate since %u\n", start
);
2514 /* probably first extent we're gonna free will be last in block */
2515 handle
= ext4_journal_start(inode
, depth
+ 1);
2517 return PTR_ERR(handle
);
2520 ext4_ext_invalidate_cache(inode
);
2523 * We start scanning from right side, freeing all the blocks
2524 * after i_size and walking into the tree depth-wise.
2526 depth
= ext_depth(inode
);
2527 path
= kzalloc(sizeof(struct ext4_ext_path
) * (depth
+ 1), GFP_NOFS
);
2529 ext4_journal_stop(handle
);
2532 path
[0].p_depth
= depth
;
2533 path
[0].p_hdr
= ext_inode_hdr(inode
);
2534 if (ext4_ext_check(inode
, path
[0].p_hdr
, depth
)) {
2540 while (i
>= 0 && err
== 0) {
2542 /* this is leaf block */
2543 err
= ext4_ext_rm_leaf(handle
, inode
, path
,
2545 /* root level has p_bh == NULL, brelse() eats this */
2546 brelse(path
[i
].p_bh
);
2547 path
[i
].p_bh
= NULL
;
2552 /* this is index block */
2553 if (!path
[i
].p_hdr
) {
2554 ext_debug("initialize header\n");
2555 path
[i
].p_hdr
= ext_block_hdr(path
[i
].p_bh
);
2558 if (!path
[i
].p_idx
) {
2559 /* this level hasn't been touched yet */
2560 path
[i
].p_idx
= EXT_LAST_INDEX(path
[i
].p_hdr
);
2561 path
[i
].p_block
= le16_to_cpu(path
[i
].p_hdr
->eh_entries
)+1;
2562 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2564 le16_to_cpu(path
[i
].p_hdr
->eh_entries
));
2566 /* we were already here, see at next index */
2570 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2571 i
, EXT_FIRST_INDEX(path
[i
].p_hdr
),
2573 if (ext4_ext_more_to_rm(path
+ i
)) {
2574 struct buffer_head
*bh
;
2575 /* go to the next level */
2576 ext_debug("move to level %d (block %llu)\n",
2577 i
+ 1, ext4_idx_pblock(path
[i
].p_idx
));
2578 memset(path
+ i
+ 1, 0, sizeof(*path
));
2579 bh
= sb_bread(sb
, ext4_idx_pblock(path
[i
].p_idx
));
2581 /* should we reset i_size? */
2585 if (WARN_ON(i
+ 1 > depth
)) {
2589 if (ext4_ext_check(inode
, ext_block_hdr(bh
),
2594 path
[i
+ 1].p_bh
= bh
;
2596 /* save actual number of indexes since this
2597 * number is changed at the next iteration */
2598 path
[i
].p_block
= le16_to_cpu(path
[i
].p_hdr
->eh_entries
);
2601 /* we finished processing this index, go up */
2602 if (path
[i
].p_hdr
->eh_entries
== 0 && i
> 0) {
2603 /* index is empty, remove it;
2604 * handle must be already prepared by the
2605 * truncatei_leaf() */
2606 err
= ext4_ext_rm_idx(handle
, inode
, path
+ i
);
2608 /* root level has p_bh == NULL, brelse() eats this */
2609 brelse(path
[i
].p_bh
);
2610 path
[i
].p_bh
= NULL
;
2612 ext_debug("return to level %d\n", i
);
2616 /* TODO: flexible tree reduction should be here */
2617 if (path
->p_hdr
->eh_entries
== 0) {
2619 * truncate to zero freed all the tree,
2620 * so we need to correct eh_depth
2622 err
= ext4_ext_get_access(handle
, inode
, path
);
2624 ext_inode_hdr(inode
)->eh_depth
= 0;
2625 ext_inode_hdr(inode
)->eh_max
=
2626 cpu_to_le16(ext4_ext_space_root(inode
, 0));
2627 err
= ext4_ext_dirty(handle
, inode
, path
);
2631 ext4_ext_drop_refs(path
);
2635 ext4_journal_stop(handle
);
2641 * called at mount time
2643 void ext4_ext_init(struct super_block
*sb
)
2646 * possible initialization would be here
2649 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_EXTENTS
)) {
2650 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2651 printk(KERN_INFO
"EXT4-fs: file extents enabled");
2652 #ifdef AGGRESSIVE_TEST
2653 printk(", aggressive tests");
2655 #ifdef CHECK_BINSEARCH
2656 printk(", check binsearch");
2658 #ifdef EXTENTS_STATS
2663 #ifdef EXTENTS_STATS
2664 spin_lock_init(&EXT4_SB(sb
)->s_ext_stats_lock
);
2665 EXT4_SB(sb
)->s_ext_min
= 1 << 30;
2666 EXT4_SB(sb
)->s_ext_max
= 0;
2672 * called at umount time
2674 void ext4_ext_release(struct super_block
*sb
)
2676 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_EXTENTS
))
2679 #ifdef EXTENTS_STATS
2680 if (EXT4_SB(sb
)->s_ext_blocks
&& EXT4_SB(sb
)->s_ext_extents
) {
2681 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2682 printk(KERN_ERR
"EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2683 sbi
->s_ext_blocks
, sbi
->s_ext_extents
,
2684 sbi
->s_ext_blocks
/ sbi
->s_ext_extents
);
2685 printk(KERN_ERR
"EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2686 sbi
->s_ext_min
, sbi
->s_ext_max
, sbi
->s_depth_max
);
2691 /* FIXME!! we need to try to merge to left or right after zero-out */
2692 static int ext4_ext_zeroout(struct inode
*inode
, struct ext4_extent
*ex
)
2694 ext4_fsblk_t ee_pblock
;
2695 unsigned int ee_len
;
2698 ee_len
= ext4_ext_get_actual_len(ex
);
2699 ee_pblock
= ext4_ext_pblock(ex
);
2701 ret
= sb_issue_zeroout(inode
->i_sb
, ee_pblock
, ee_len
, GFP_NOFS
);
2709 * used by extent splitting.
2711 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
2713 #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
2714 #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
2717 * ext4_split_extent_at() splits an extent at given block.
2719 * @handle: the journal handle
2720 * @inode: the file inode
2721 * @path: the path to the extent
2722 * @split: the logical block where the extent is splitted.
2723 * @split_flags: indicates if the extent could be zeroout if split fails, and
2724 * the states(init or uninit) of new extents.
2725 * @flags: flags used to insert new extent to extent tree.
2728 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2729 * of which are deterimined by split_flag.
2731 * There are two cases:
2732 * a> the extent are splitted into two extent.
2733 * b> split is not needed, and just mark the extent.
2735 * return 0 on success.
2737 static int ext4_split_extent_at(handle_t
*handle
,
2738 struct inode
*inode
,
2739 struct ext4_ext_path
*path
,
2744 ext4_fsblk_t newblock
;
2745 ext4_lblk_t ee_block
;
2746 struct ext4_extent
*ex
, newex
, orig_ex
;
2747 struct ext4_extent
*ex2
= NULL
;
2748 unsigned int ee_len
, depth
;
2751 ext_debug("ext4_split_extents_at: inode %lu, logical"
2752 "block %llu\n", inode
->i_ino
, (unsigned long long)split
);
2754 ext4_ext_show_leaf(inode
, path
);
2756 depth
= ext_depth(inode
);
2757 ex
= path
[depth
].p_ext
;
2758 ee_block
= le32_to_cpu(ex
->ee_block
);
2759 ee_len
= ext4_ext_get_actual_len(ex
);
2760 newblock
= split
- ee_block
+ ext4_ext_pblock(ex
);
2762 BUG_ON(split
< ee_block
|| split
>= (ee_block
+ ee_len
));
2764 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
2768 if (split
== ee_block
) {
2770 * case b: block @split is the block that the extent begins with
2771 * then we just change the state of the extent, and splitting
2774 if (split_flag
& EXT4_EXT_MARK_UNINIT2
)
2775 ext4_ext_mark_uninitialized(ex
);
2777 ext4_ext_mark_initialized(ex
);
2779 if (!(flags
& EXT4_GET_BLOCKS_PRE_IO
))
2780 ext4_ext_try_to_merge(inode
, path
, ex
);
2782 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
2787 memcpy(&orig_ex
, ex
, sizeof(orig_ex
));
2788 ex
->ee_len
= cpu_to_le16(split
- ee_block
);
2789 if (split_flag
& EXT4_EXT_MARK_UNINIT1
)
2790 ext4_ext_mark_uninitialized(ex
);
2793 * path may lead to new leaf, not to original leaf any more
2794 * after ext4_ext_insert_extent() returns,
2796 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
2798 goto fix_extent_len
;
2801 ex2
->ee_block
= cpu_to_le32(split
);
2802 ex2
->ee_len
= cpu_to_le16(ee_len
- (split
- ee_block
));
2803 ext4_ext_store_pblock(ex2
, newblock
);
2804 if (split_flag
& EXT4_EXT_MARK_UNINIT2
)
2805 ext4_ext_mark_uninitialized(ex2
);
2807 err
= ext4_ext_insert_extent(handle
, inode
, path
, &newex
, flags
);
2808 if (err
== -ENOSPC
&& (EXT4_EXT_MAY_ZEROOUT
& split_flag
)) {
2809 err
= ext4_ext_zeroout(inode
, &orig_ex
);
2811 goto fix_extent_len
;
2812 /* update the extent length and mark as initialized */
2813 ex
->ee_len
= cpu_to_le32(ee_len
);
2814 ext4_ext_try_to_merge(inode
, path
, ex
);
2815 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
2818 goto fix_extent_len
;
2821 ext4_ext_show_leaf(inode
, path
);
2825 ex
->ee_len
= orig_ex
.ee_len
;
2826 ext4_ext_dirty(handle
, inode
, path
+ depth
);
2831 * ext4_split_extents() splits an extent and mark extent which is covered
2832 * by @map as split_flags indicates
2834 * It may result in splitting the extent into multiple extents (upto three)
2835 * There are three possibilities:
2836 * a> There is no split required
2837 * b> Splits in two extents: Split is happening at either end of the extent
2838 * c> Splits in three extents: Somone is splitting in middle of the extent
2841 static int ext4_split_extent(handle_t
*handle
,
2842 struct inode
*inode
,
2843 struct ext4_ext_path
*path
,
2844 struct ext4_map_blocks
*map
,
2848 ext4_lblk_t ee_block
;
2849 struct ext4_extent
*ex
;
2850 unsigned int ee_len
, depth
;
2853 int split_flag1
, flags1
;
2855 depth
= ext_depth(inode
);
2856 ex
= path
[depth
].p_ext
;
2857 ee_block
= le32_to_cpu(ex
->ee_block
);
2858 ee_len
= ext4_ext_get_actual_len(ex
);
2859 uninitialized
= ext4_ext_is_uninitialized(ex
);
2861 if (map
->m_lblk
+ map
->m_len
< ee_block
+ ee_len
) {
2862 split_flag1
= split_flag
& EXT4_EXT_MAY_ZEROOUT
?
2863 EXT4_EXT_MAY_ZEROOUT
: 0;
2864 flags1
= flags
| EXT4_GET_BLOCKS_PRE_IO
;
2866 split_flag1
|= EXT4_EXT_MARK_UNINIT1
|
2867 EXT4_EXT_MARK_UNINIT2
;
2868 err
= ext4_split_extent_at(handle
, inode
, path
,
2869 map
->m_lblk
+ map
->m_len
, split_flag1
, flags1
);
2874 ext4_ext_drop_refs(path
);
2875 path
= ext4_ext_find_extent(inode
, map
->m_lblk
, path
);
2877 return PTR_ERR(path
);
2879 if (map
->m_lblk
>= ee_block
) {
2880 split_flag1
= split_flag
& EXT4_EXT_MAY_ZEROOUT
?
2881 EXT4_EXT_MAY_ZEROOUT
: 0;
2883 split_flag1
|= EXT4_EXT_MARK_UNINIT1
;
2884 if (split_flag
& EXT4_EXT_MARK_UNINIT2
)
2885 split_flag1
|= EXT4_EXT_MARK_UNINIT2
;
2886 err
= ext4_split_extent_at(handle
, inode
, path
,
2887 map
->m_lblk
, split_flag1
, flags
);
2892 ext4_ext_show_leaf(inode
, path
);
2894 return err
? err
: map
->m_len
;
2897 #define EXT4_EXT_ZERO_LEN 7
2899 * This function is called by ext4_ext_map_blocks() if someone tries to write
2900 * to an uninitialized extent. It may result in splitting the uninitialized
2901 * extent into multiple extents (up to three - one initialized and two
2903 * There are three possibilities:
2904 * a> There is no split required: Entire extent should be initialized
2905 * b> Splits in two extents: Write is happening at either end of the extent
2906 * c> Splits in three extents: Somone is writing in middle of the extent
2908 static int ext4_ext_convert_to_initialized(handle_t
*handle
,
2909 struct inode
*inode
,
2910 struct ext4_map_blocks
*map
,
2911 struct ext4_ext_path
*path
)
2913 struct ext4_map_blocks split_map
;
2914 struct ext4_extent zero_ex
;
2915 struct ext4_extent
*ex
;
2916 ext4_lblk_t ee_block
, eof_block
;
2917 unsigned int allocated
, ee_len
, depth
;
2921 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
2922 "block %llu, max_blocks %u\n", inode
->i_ino
,
2923 (unsigned long long)map
->m_lblk
, map
->m_len
);
2925 eof_block
= (inode
->i_size
+ inode
->i_sb
->s_blocksize
- 1) >>
2926 inode
->i_sb
->s_blocksize_bits
;
2927 if (eof_block
< map
->m_lblk
+ map
->m_len
)
2928 eof_block
= map
->m_lblk
+ map
->m_len
;
2930 depth
= ext_depth(inode
);
2931 ex
= path
[depth
].p_ext
;
2932 ee_block
= le32_to_cpu(ex
->ee_block
);
2933 ee_len
= ext4_ext_get_actual_len(ex
);
2934 allocated
= ee_len
- (map
->m_lblk
- ee_block
);
2936 WARN_ON(map
->m_lblk
< ee_block
);
2938 * It is safe to convert extent to initialized via explicit
2939 * zeroout only if extent is fully insde i_size or new_size.
2941 split_flag
|= ee_block
+ ee_len
<= eof_block
? EXT4_EXT_MAY_ZEROOUT
: 0;
2943 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2944 if (ee_len
<= 2*EXT4_EXT_ZERO_LEN
&&
2945 (EXT4_EXT_MAY_ZEROOUT
& split_flag
)) {
2946 err
= ext4_ext_zeroout(inode
, ex
);
2950 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
2953 ext4_ext_mark_initialized(ex
);
2954 ext4_ext_try_to_merge(inode
, path
, ex
);
2955 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
2961 * 1. split the extent into three extents.
2962 * 2. split the extent into two extents, zeroout the first half.
2963 * 3. split the extent into two extents, zeroout the second half.
2964 * 4. split the extent into two extents with out zeroout.
2966 split_map
.m_lblk
= map
->m_lblk
;
2967 split_map
.m_len
= map
->m_len
;
2969 if (allocated
> map
->m_len
) {
2970 if (allocated
<= EXT4_EXT_ZERO_LEN
&&
2971 (EXT4_EXT_MAY_ZEROOUT
& split_flag
)) {
2974 cpu_to_le32(map
->m_lblk
);
2975 zero_ex
.ee_len
= cpu_to_le16(allocated
);
2976 ext4_ext_store_pblock(&zero_ex
,
2977 ext4_ext_pblock(ex
) + map
->m_lblk
- ee_block
);
2978 err
= ext4_ext_zeroout(inode
, &zero_ex
);
2981 split_map
.m_lblk
= map
->m_lblk
;
2982 split_map
.m_len
= allocated
;
2983 } else if ((map
->m_lblk
- ee_block
+ map
->m_len
<
2984 EXT4_EXT_ZERO_LEN
) &&
2985 (EXT4_EXT_MAY_ZEROOUT
& split_flag
)) {
2987 if (map
->m_lblk
!= ee_block
) {
2988 zero_ex
.ee_block
= ex
->ee_block
;
2989 zero_ex
.ee_len
= cpu_to_le16(map
->m_lblk
-
2991 ext4_ext_store_pblock(&zero_ex
,
2992 ext4_ext_pblock(ex
));
2993 err
= ext4_ext_zeroout(inode
, &zero_ex
);
2998 split_map
.m_lblk
= ee_block
;
2999 split_map
.m_len
= map
->m_lblk
- ee_block
+ map
->m_len
;
3000 allocated
= map
->m_len
;
3004 allocated
= ext4_split_extent(handle
, inode
, path
,
3005 &split_map
, split_flag
, 0);
3010 return err
? err
: allocated
;
3014 * This function is called by ext4_ext_map_blocks() from
3015 * ext4_get_blocks_dio_write() when DIO to write
3016 * to an uninitialized extent.
3018 * Writing to an uninitialized extent may result in splitting the uninitialized
3019 * extent into multiple /initialized uninitialized extents (up to three)
3020 * There are three possibilities:
3021 * a> There is no split required: Entire extent should be uninitialized
3022 * b> Splits in two extents: Write is happening at either end of the extent
3023 * c> Splits in three extents: Somone is writing in middle of the extent
3025 * One of more index blocks maybe needed if the extent tree grow after
3026 * the uninitialized extent split. To prevent ENOSPC occur at the IO
3027 * complete, we need to split the uninitialized extent before DIO submit
3028 * the IO. The uninitialized extent called at this time will be split
3029 * into three uninitialized extent(at most). After IO complete, the part
3030 * being filled will be convert to initialized by the end_io callback function
3031 * via ext4_convert_unwritten_extents().
3033 * Returns the size of uninitialized extent to be written on success.
3035 static int ext4_split_unwritten_extents(handle_t
*handle
,
3036 struct inode
*inode
,
3037 struct ext4_map_blocks
*map
,
3038 struct ext4_ext_path
*path
,
3041 ext4_lblk_t eof_block
;
3042 ext4_lblk_t ee_block
;
3043 struct ext4_extent
*ex
;
3044 unsigned int ee_len
;
3045 int split_flag
= 0, depth
;
3047 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3048 "block %llu, max_blocks %u\n", inode
->i_ino
,
3049 (unsigned long long)map
->m_lblk
, map
->m_len
);
3051 eof_block
= (inode
->i_size
+ inode
->i_sb
->s_blocksize
- 1) >>
3052 inode
->i_sb
->s_blocksize_bits
;
3053 if (eof_block
< map
->m_lblk
+ map
->m_len
)
3054 eof_block
= map
->m_lblk
+ map
->m_len
;
3056 * It is safe to convert extent to initialized via explicit
3057 * zeroout only if extent is fully insde i_size or new_size.
3059 depth
= ext_depth(inode
);
3060 ex
= path
[depth
].p_ext
;
3061 ee_block
= le32_to_cpu(ex
->ee_block
);
3062 ee_len
= ext4_ext_get_actual_len(ex
);
3064 split_flag
|= ee_block
+ ee_len
<= eof_block
? EXT4_EXT_MAY_ZEROOUT
: 0;
3065 split_flag
|= EXT4_EXT_MARK_UNINIT2
;
3067 flags
|= EXT4_GET_BLOCKS_PRE_IO
;
3068 return ext4_split_extent(handle
, inode
, path
, map
, split_flag
, flags
);
3071 static int ext4_convert_unwritten_extents_endio(handle_t
*handle
,
3072 struct inode
*inode
,
3073 struct ext4_ext_path
*path
)
3075 struct ext4_extent
*ex
;
3079 depth
= ext_depth(inode
);
3080 ex
= path
[depth
].p_ext
;
3082 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3083 "block %llu, max_blocks %u\n", inode
->i_ino
,
3084 (unsigned long long)le32_to_cpu(ex
->ee_block
),
3085 ext4_ext_get_actual_len(ex
));
3087 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3090 /* first mark the extent as initialized */
3091 ext4_ext_mark_initialized(ex
);
3093 /* note: ext4_ext_correct_indexes() isn't needed here because
3094 * borders are not changed
3096 ext4_ext_try_to_merge(inode
, path
, ex
);
3098 /* Mark modified extent as dirty */
3099 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
3101 ext4_ext_show_leaf(inode
, path
);
3105 static void unmap_underlying_metadata_blocks(struct block_device
*bdev
,
3106 sector_t block
, int count
)
3109 for (i
= 0; i
< count
; i
++)
3110 unmap_underlying_metadata(bdev
, block
+ i
);
3114 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3116 static int check_eofblocks_fl(handle_t
*handle
, struct inode
*inode
,
3118 struct ext4_ext_path
*path
,
3122 struct ext4_extent_header
*eh
;
3123 struct ext4_extent
*last_ex
;
3125 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EOFBLOCKS
))
3128 depth
= ext_depth(inode
);
3129 eh
= path
[depth
].p_hdr
;
3131 if (unlikely(!eh
->eh_entries
)) {
3132 EXT4_ERROR_INODE(inode
, "eh->eh_entries == 0 and "
3133 "EOFBLOCKS_FL set");
3136 last_ex
= EXT_LAST_EXTENT(eh
);
3138 * We should clear the EOFBLOCKS_FL flag if we are writing the
3139 * last block in the last extent in the file. We test this by
3140 * first checking to see if the caller to
3141 * ext4_ext_get_blocks() was interested in the last block (or
3142 * a block beyond the last block) in the current extent. If
3143 * this turns out to be false, we can bail out from this
3144 * function immediately.
3146 if (lblk
+ len
< le32_to_cpu(last_ex
->ee_block
) +
3147 ext4_ext_get_actual_len(last_ex
))
3150 * If the caller does appear to be planning to write at or
3151 * beyond the end of the current extent, we then test to see
3152 * if the current extent is the last extent in the file, by
3153 * checking to make sure it was reached via the rightmost node
3154 * at each level of the tree.
3156 for (i
= depth
-1; i
>= 0; i
--)
3157 if (path
[i
].p_idx
!= EXT_LAST_INDEX(path
[i
].p_hdr
))
3159 ext4_clear_inode_flag(inode
, EXT4_INODE_EOFBLOCKS
);
3160 return ext4_mark_inode_dirty(handle
, inode
);
3164 ext4_ext_handle_uninitialized_extents(handle_t
*handle
, struct inode
*inode
,
3165 struct ext4_map_blocks
*map
,
3166 struct ext4_ext_path
*path
, int flags
,
3167 unsigned int allocated
, ext4_fsblk_t newblock
)
3171 ext4_io_end_t
*io
= EXT4_I(inode
)->cur_aio_dio
;
3173 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3174 "block %llu, max_blocks %u, flags %d, allocated %u",
3175 inode
->i_ino
, (unsigned long long)map
->m_lblk
, map
->m_len
,
3177 ext4_ext_show_leaf(inode
, path
);
3179 /* get_block() before submit the IO, split the extent */
3180 if ((flags
& EXT4_GET_BLOCKS_PRE_IO
)) {
3181 ret
= ext4_split_unwritten_extents(handle
, inode
, map
,
3184 * Flag the inode(non aio case) or end_io struct (aio case)
3185 * that this IO needs to conversion to written when IO is
3188 if (io
&& !(io
->flag
& EXT4_IO_END_UNWRITTEN
)) {
3189 io
->flag
= EXT4_IO_END_UNWRITTEN
;
3190 atomic_inc(&EXT4_I(inode
)->i_aiodio_unwritten
);
3192 ext4_set_inode_state(inode
, EXT4_STATE_DIO_UNWRITTEN
);
3193 if (ext4_should_dioread_nolock(inode
))
3194 map
->m_flags
|= EXT4_MAP_UNINIT
;
3197 /* IO end_io complete, convert the filled extent to written */
3198 if ((flags
& EXT4_GET_BLOCKS_CONVERT
)) {
3199 ret
= ext4_convert_unwritten_extents_endio(handle
, inode
,
3202 ext4_update_inode_fsync_trans(handle
, inode
, 1);
3203 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
,
3209 /* buffered IO case */
3211 * repeat fallocate creation request
3212 * we already have an unwritten extent
3214 if (flags
& EXT4_GET_BLOCKS_UNINIT_EXT
)
3217 /* buffered READ or buffered write_begin() lookup */
3218 if ((flags
& EXT4_GET_BLOCKS_CREATE
) == 0) {
3220 * We have blocks reserved already. We
3221 * return allocated blocks so that delalloc
3222 * won't do block reservation for us. But
3223 * the buffer head will be unmapped so that
3224 * a read from the block returns 0s.
3226 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
3230 /* buffered write, writepage time, convert*/
3231 ret
= ext4_ext_convert_to_initialized(handle
, inode
, map
, path
);
3233 ext4_update_inode_fsync_trans(handle
, inode
, 1);
3234 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
, path
,
3246 map
->m_flags
|= EXT4_MAP_NEW
;
3248 * if we allocated more blocks than requested
3249 * we need to make sure we unmap the extra block
3250 * allocated. The actual needed block will get
3251 * unmapped later when we find the buffer_head marked
3254 if (allocated
> map
->m_len
) {
3255 unmap_underlying_metadata_blocks(inode
->i_sb
->s_bdev
,
3256 newblock
+ map
->m_len
,
3257 allocated
- map
->m_len
);
3258 allocated
= map
->m_len
;
3262 * If we have done fallocate with the offset that is already
3263 * delayed allocated, we would have block reservation
3264 * and quota reservation done in the delayed write path.
3265 * But fallocate would have already updated quota and block
3266 * count for this offset. So cancel these reservation
3268 if (flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
)
3269 ext4_da_update_reserve_space(inode
, allocated
, 0);
3272 map
->m_flags
|= EXT4_MAP_MAPPED
;
3274 if (allocated
> map
->m_len
)
3275 allocated
= map
->m_len
;
3276 ext4_ext_show_leaf(inode
, path
);
3277 map
->m_pblk
= newblock
;
3278 map
->m_len
= allocated
;
3281 ext4_ext_drop_refs(path
);
3284 return err
? err
: allocated
;
3288 * Block allocation/map/preallocation routine for extents based files
3291 * Need to be called with
3292 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3293 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3295 * return > 0, number of of blocks already mapped/allocated
3296 * if create == 0 and these are pre-allocated blocks
3297 * buffer head is unmapped
3298 * otherwise blocks are mapped
3300 * return = 0, if plain look up failed (blocks have not been allocated)
3301 * buffer head is unmapped
3303 * return < 0, error case.
3305 int ext4_ext_map_blocks(handle_t
*handle
, struct inode
*inode
,
3306 struct ext4_map_blocks
*map
, int flags
)
3308 struct ext4_ext_path
*path
= NULL
;
3309 struct ext4_extent newex
, *ex
;
3310 ext4_fsblk_t newblock
= 0;
3311 int err
= 0, depth
, ret
;
3312 unsigned int allocated
= 0;
3313 unsigned int punched_out
= 0;
3314 unsigned int result
= 0;
3315 struct ext4_allocation_request ar
;
3316 ext4_io_end_t
*io
= EXT4_I(inode
)->cur_aio_dio
;
3317 struct ext4_map_blocks punch_map
;
3319 ext_debug("blocks %u/%u requested for inode %lu\n",
3320 map
->m_lblk
, map
->m_len
, inode
->i_ino
);
3321 trace_ext4_ext_map_blocks_enter(inode
, map
->m_lblk
, map
->m_len
, flags
);
3323 /* check in cache */
3324 if (ext4_ext_in_cache(inode
, map
->m_lblk
, &newex
) &&
3325 ((flags
& EXT4_GET_BLOCKS_PUNCH_OUT_EXT
) == 0)) {
3326 if (!newex
.ee_start_lo
&& !newex
.ee_start_hi
) {
3327 if ((flags
& EXT4_GET_BLOCKS_CREATE
) == 0) {
3329 * block isn't allocated yet and
3330 * user doesn't want to allocate it
3334 /* we should allocate requested block */
3336 /* block is already allocated */
3337 newblock
= map
->m_lblk
3338 - le32_to_cpu(newex
.ee_block
)
3339 + ext4_ext_pblock(&newex
);
3340 /* number of remaining blocks in the extent */
3341 allocated
= ext4_ext_get_actual_len(&newex
) -
3342 (map
->m_lblk
- le32_to_cpu(newex
.ee_block
));
3347 /* find extent for this block */
3348 path
= ext4_ext_find_extent(inode
, map
->m_lblk
, NULL
);
3350 err
= PTR_ERR(path
);
3355 depth
= ext_depth(inode
);
3358 * consistent leaf must not be empty;
3359 * this situation is possible, though, _during_ tree modification;
3360 * this is why assert can't be put in ext4_ext_find_extent()
3362 if (unlikely(path
[depth
].p_ext
== NULL
&& depth
!= 0)) {
3363 EXT4_ERROR_INODE(inode
, "bad extent address "
3364 "lblock: %lu, depth: %d pblock %lld",
3365 (unsigned long) map
->m_lblk
, depth
,
3366 path
[depth
].p_block
);
3371 ex
= path
[depth
].p_ext
;
3373 ext4_lblk_t ee_block
= le32_to_cpu(ex
->ee_block
);
3374 ext4_fsblk_t ee_start
= ext4_ext_pblock(ex
);
3375 unsigned short ee_len
;
3378 * Uninitialized extents are treated as holes, except that
3379 * we split out initialized portions during a write.
3381 ee_len
= ext4_ext_get_actual_len(ex
);
3382 /* if found extent covers block, simply return it */
3383 if (in_range(map
->m_lblk
, ee_block
, ee_len
)) {
3384 newblock
= map
->m_lblk
- ee_block
+ ee_start
;
3385 /* number of remaining blocks in the extent */
3386 allocated
= ee_len
- (map
->m_lblk
- ee_block
);
3387 ext_debug("%u fit into %u:%d -> %llu\n", map
->m_lblk
,
3388 ee_block
, ee_len
, newblock
);
3390 if ((flags
& EXT4_GET_BLOCKS_PUNCH_OUT_EXT
) == 0) {
3392 * Do not put uninitialized extent
3395 if (!ext4_ext_is_uninitialized(ex
)) {
3396 ext4_ext_put_in_cache(inode
, ee_block
,
3400 ret
= ext4_ext_handle_uninitialized_extents(
3401 handle
, inode
, map
, path
, flags
,
3402 allocated
, newblock
);
3407 * Punch out the map length, but only to the
3410 punched_out
= allocated
< map
->m_len
?
3411 allocated
: map
->m_len
;
3414 * Sense extents need to be converted to
3415 * uninitialized, they must fit in an
3416 * uninitialized extent
3418 if (punched_out
> EXT_UNINIT_MAX_LEN
)
3419 punched_out
= EXT_UNINIT_MAX_LEN
;
3421 punch_map
.m_lblk
= map
->m_lblk
;
3422 punch_map
.m_pblk
= newblock
;
3423 punch_map
.m_len
= punched_out
;
3424 punch_map
.m_flags
= 0;
3426 /* Check to see if the extent needs to be split */
3427 if (punch_map
.m_len
!= ee_len
||
3428 punch_map
.m_lblk
!= ee_block
) {
3430 ret
= ext4_split_extent(handle
, inode
,
3431 path
, &punch_map
, 0,
3432 EXT4_GET_BLOCKS_PUNCH_OUT_EXT
|
3433 EXT4_GET_BLOCKS_PRE_IO
);
3440 * find extent for the block at
3441 * the start of the hole
3443 ext4_ext_drop_refs(path
);
3446 path
= ext4_ext_find_extent(inode
,
3449 err
= PTR_ERR(path
);
3454 depth
= ext_depth(inode
);
3455 ex
= path
[depth
].p_ext
;
3456 ee_len
= ext4_ext_get_actual_len(ex
);
3457 ee_block
= le32_to_cpu(ex
->ee_block
);
3458 ee_start
= ext4_ext_pblock(ex
);
3462 ext4_ext_mark_uninitialized(ex
);
3464 err
= ext4_ext_remove_space(inode
, map
->m_lblk
,
3465 map
->m_lblk
+ punched_out
);
3472 * requested block isn't allocated yet;
3473 * we couldn't try to create block if create flag is zero
3475 if ((flags
& EXT4_GET_BLOCKS_CREATE
) == 0) {
3477 * put just found gap into cache to speed up
3478 * subsequent requests
3480 ext4_ext_put_gap_in_cache(inode
, path
, map
->m_lblk
);
3484 * Okay, we need to do block allocation.
3487 /* find neighbour allocated blocks */
3488 ar
.lleft
= map
->m_lblk
;
3489 err
= ext4_ext_search_left(inode
, path
, &ar
.lleft
, &ar
.pleft
);
3492 ar
.lright
= map
->m_lblk
;
3493 err
= ext4_ext_search_right(inode
, path
, &ar
.lright
, &ar
.pright
);
3498 * See if request is beyond maximum number of blocks we can have in
3499 * a single extent. For an initialized extent this limit is
3500 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3501 * EXT_UNINIT_MAX_LEN.
3503 if (map
->m_len
> EXT_INIT_MAX_LEN
&&
3504 !(flags
& EXT4_GET_BLOCKS_UNINIT_EXT
))
3505 map
->m_len
= EXT_INIT_MAX_LEN
;
3506 else if (map
->m_len
> EXT_UNINIT_MAX_LEN
&&
3507 (flags
& EXT4_GET_BLOCKS_UNINIT_EXT
))
3508 map
->m_len
= EXT_UNINIT_MAX_LEN
;
3510 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
3511 newex
.ee_block
= cpu_to_le32(map
->m_lblk
);
3512 newex
.ee_len
= cpu_to_le16(map
->m_len
);
3513 err
= ext4_ext_check_overlap(inode
, &newex
, path
);
3515 allocated
= ext4_ext_get_actual_len(&newex
);
3517 allocated
= map
->m_len
;
3519 /* allocate new block */
3521 ar
.goal
= ext4_ext_find_goal(inode
, path
, map
->m_lblk
);
3522 ar
.logical
= map
->m_lblk
;
3524 if (S_ISREG(inode
->i_mode
))
3525 ar
.flags
= EXT4_MB_HINT_DATA
;
3527 /* disable in-core preallocation for non-regular files */
3529 if (flags
& EXT4_GET_BLOCKS_NO_NORMALIZE
)
3530 ar
.flags
|= EXT4_MB_HINT_NOPREALLOC
;
3531 newblock
= ext4_mb_new_blocks(handle
, &ar
, &err
);
3534 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3535 ar
.goal
, newblock
, allocated
);
3537 /* try to insert new extent into found leaf and return */
3538 ext4_ext_store_pblock(&newex
, newblock
);
3539 newex
.ee_len
= cpu_to_le16(ar
.len
);
3540 /* Mark uninitialized */
3541 if (flags
& EXT4_GET_BLOCKS_UNINIT_EXT
){
3542 ext4_ext_mark_uninitialized(&newex
);
3544 * io_end structure was created for every IO write to an
3545 * uninitialized extent. To avoid unnecessary conversion,
3546 * here we flag the IO that really needs the conversion.
3547 * For non asycn direct IO case, flag the inode state
3548 * that we need to perform conversion when IO is done.
3550 if ((flags
& EXT4_GET_BLOCKS_PRE_IO
)) {
3551 if (io
&& !(io
->flag
& EXT4_IO_END_UNWRITTEN
)) {
3552 io
->flag
= EXT4_IO_END_UNWRITTEN
;
3553 atomic_inc(&EXT4_I(inode
)->i_aiodio_unwritten
);
3555 ext4_set_inode_state(inode
,
3556 EXT4_STATE_DIO_UNWRITTEN
);
3558 if (ext4_should_dioread_nolock(inode
))
3559 map
->m_flags
|= EXT4_MAP_UNINIT
;
3562 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
, path
, ar
.len
);
3564 err
= ext4_ext_insert_extent(handle
, inode
, path
,
3567 int fb_flags
= flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
?
3568 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE
: 0;
3569 /* free data blocks we just allocated */
3570 /* not a good idea to call discard here directly,
3571 * but otherwise we'd need to call it every free() */
3572 ext4_discard_preallocations(inode
);
3573 ext4_free_blocks(handle
, inode
, NULL
, ext4_ext_pblock(&newex
),
3574 ext4_ext_get_actual_len(&newex
), fb_flags
);
3578 /* previous routine could use block we allocated */
3579 newblock
= ext4_ext_pblock(&newex
);
3580 allocated
= ext4_ext_get_actual_len(&newex
);
3581 if (allocated
> map
->m_len
)
3582 allocated
= map
->m_len
;
3583 map
->m_flags
|= EXT4_MAP_NEW
;
3586 * Update reserved blocks/metadata blocks after successful
3587 * block allocation which had been deferred till now.
3589 if (flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
)
3590 ext4_da_update_reserve_space(inode
, allocated
, 1);
3593 * Cache the extent and update transaction to commit on fdatasync only
3594 * when it is _not_ an uninitialized extent.
3596 if ((flags
& EXT4_GET_BLOCKS_UNINIT_EXT
) == 0) {
3597 ext4_ext_put_in_cache(inode
, map
->m_lblk
, allocated
, newblock
);
3598 ext4_update_inode_fsync_trans(handle
, inode
, 1);
3600 ext4_update_inode_fsync_trans(handle
, inode
, 0);
3602 if (allocated
> map
->m_len
)
3603 allocated
= map
->m_len
;
3604 ext4_ext_show_leaf(inode
, path
);
3605 map
->m_flags
|= EXT4_MAP_MAPPED
;
3606 map
->m_pblk
= newblock
;
3607 map
->m_len
= allocated
;
3610 ext4_ext_drop_refs(path
);
3613 trace_ext4_ext_map_blocks_exit(inode
, map
->m_lblk
,
3614 newblock
, map
->m_len
, err
? err
: allocated
);
3616 result
= (flags
& EXT4_GET_BLOCKS_PUNCH_OUT_EXT
) ?
3617 punched_out
: allocated
;
3619 return err
? err
: result
;
3622 void ext4_ext_truncate(struct inode
*inode
)
3624 struct address_space
*mapping
= inode
->i_mapping
;
3625 struct super_block
*sb
= inode
->i_sb
;
3626 ext4_lblk_t last_block
;
3631 * finish any pending end_io work so we won't run the risk of
3632 * converting any truncated blocks to initialized later
3634 ext4_flush_completed_IO(inode
);
3637 * probably first extent we're gonna free will be last in block
3639 err
= ext4_writepage_trans_blocks(inode
);
3640 handle
= ext4_journal_start(inode
, err
);
3644 if (inode
->i_size
& (sb
->s_blocksize
- 1))
3645 ext4_block_truncate_page(handle
, mapping
, inode
->i_size
);
3647 if (ext4_orphan_add(handle
, inode
))
3650 down_write(&EXT4_I(inode
)->i_data_sem
);
3651 ext4_ext_invalidate_cache(inode
);
3653 ext4_discard_preallocations(inode
);
3656 * TODO: optimization is possible here.
3657 * Probably we need not scan at all,
3658 * because page truncation is enough.
3661 /* we have to know where to truncate from in crash case */
3662 EXT4_I(inode
)->i_disksize
= inode
->i_size
;
3663 ext4_mark_inode_dirty(handle
, inode
);
3665 last_block
= (inode
->i_size
+ sb
->s_blocksize
- 1)
3666 >> EXT4_BLOCK_SIZE_BITS(sb
);
3667 err
= ext4_ext_remove_space(inode
, last_block
, EXT_MAX_BLOCKS
- 1);
3669 /* In a multi-transaction truncate, we only make the final
3670 * transaction synchronous.
3673 ext4_handle_sync(handle
);
3675 up_write(&EXT4_I(inode
)->i_data_sem
);
3679 * If this was a simple ftruncate() and the file will remain alive,
3680 * then we need to clear up the orphan record which we created above.
3681 * However, if this was a real unlink then we were called by
3682 * ext4_delete_inode(), and we allow that function to clean up the
3683 * orphan info for us.
3686 ext4_orphan_del(handle
, inode
);
3688 inode
->i_mtime
= inode
->i_ctime
= ext4_current_time(inode
);
3689 ext4_mark_inode_dirty(handle
, inode
);
3690 ext4_journal_stop(handle
);
3693 static void ext4_falloc_update_inode(struct inode
*inode
,
3694 int mode
, loff_t new_size
, int update_ctime
)
3696 struct timespec now
;
3699 now
= current_fs_time(inode
->i_sb
);
3700 if (!timespec_equal(&inode
->i_ctime
, &now
))
3701 inode
->i_ctime
= now
;
3704 * Update only when preallocation was requested beyond
3707 if (!(mode
& FALLOC_FL_KEEP_SIZE
)) {
3708 if (new_size
> i_size_read(inode
))
3709 i_size_write(inode
, new_size
);
3710 if (new_size
> EXT4_I(inode
)->i_disksize
)
3711 ext4_update_i_disksize(inode
, new_size
);
3714 * Mark that we allocate beyond EOF so the subsequent truncate
3715 * can proceed even if the new size is the same as i_size.
3717 if (new_size
> i_size_read(inode
))
3718 ext4_set_inode_flag(inode
, EXT4_INODE_EOFBLOCKS
);
3724 * preallocate space for a file. This implements ext4's fallocate file
3725 * operation, which gets called from sys_fallocate system call.
3726 * For block-mapped files, posix_fallocate should fall back to the method
3727 * of writing zeroes to the required new blocks (the same behavior which is
3728 * expected for file systems which do not support fallocate() system call).
3730 long ext4_fallocate(struct file
*file
, int mode
, loff_t offset
, loff_t len
)
3732 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
3735 unsigned int max_blocks
;
3739 struct ext4_map_blocks map
;
3740 unsigned int credits
, blkbits
= inode
->i_blkbits
;
3743 * currently supporting (pre)allocate mode for extent-based
3746 if (!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)))
3749 /* Return error if mode is not supported */
3750 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
3753 if (mode
& FALLOC_FL_PUNCH_HOLE
)
3754 return ext4_punch_hole(file
, offset
, len
);
3756 trace_ext4_fallocate_enter(inode
, offset
, len
, mode
);
3757 map
.m_lblk
= offset
>> blkbits
;
3759 * We can't just convert len to max_blocks because
3760 * If blocksize = 4096 offset = 3072 and len = 2048
3762 max_blocks
= (EXT4_BLOCK_ALIGN(len
+ offset
, blkbits
) >> blkbits
)
3765 * credits to insert 1 extent into extent tree
3767 credits
= ext4_chunk_trans_blocks(inode
, max_blocks
);
3768 mutex_lock(&inode
->i_mutex
);
3769 ret
= inode_newsize_ok(inode
, (len
+ offset
));
3771 mutex_unlock(&inode
->i_mutex
);
3772 trace_ext4_fallocate_exit(inode
, offset
, max_blocks
, ret
);
3776 while (ret
>= 0 && ret
< max_blocks
) {
3777 map
.m_lblk
= map
.m_lblk
+ ret
;
3778 map
.m_len
= max_blocks
= max_blocks
- ret
;
3779 handle
= ext4_journal_start(inode
, credits
);
3780 if (IS_ERR(handle
)) {
3781 ret
= PTR_ERR(handle
);
3784 ret
= ext4_map_blocks(handle
, inode
, &map
,
3785 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT
|
3786 EXT4_GET_BLOCKS_NO_NORMALIZE
);
3790 printk(KERN_ERR
"%s: ext4_ext_map_blocks "
3791 "returned error inode#%lu, block=%u, "
3792 "max_blocks=%u", __func__
,
3793 inode
->i_ino
, map
.m_lblk
, max_blocks
);
3795 ext4_mark_inode_dirty(handle
, inode
);
3796 ret2
= ext4_journal_stop(handle
);
3799 if ((map
.m_lblk
+ ret
) >= (EXT4_BLOCK_ALIGN(offset
+ len
,
3800 blkbits
) >> blkbits
))
3801 new_size
= offset
+ len
;
3803 new_size
= (map
.m_lblk
+ ret
) << blkbits
;
3805 ext4_falloc_update_inode(inode
, mode
, new_size
,
3806 (map
.m_flags
& EXT4_MAP_NEW
));
3807 ext4_mark_inode_dirty(handle
, inode
);
3808 ret2
= ext4_journal_stop(handle
);
3812 if (ret
== -ENOSPC
&&
3813 ext4_should_retry_alloc(inode
->i_sb
, &retries
)) {
3817 mutex_unlock(&inode
->i_mutex
);
3818 trace_ext4_fallocate_exit(inode
, offset
, max_blocks
,
3819 ret
> 0 ? ret2
: ret
);
3820 return ret
> 0 ? ret2
: ret
;
3824 * This function convert a range of blocks to written extents
3825 * The caller of this function will pass the start offset and the size.
3826 * all unwritten extents within this range will be converted to
3829 * This function is called from the direct IO end io call back
3830 * function, to convert the fallocated extents after IO is completed.
3831 * Returns 0 on success.
3833 int ext4_convert_unwritten_extents(struct inode
*inode
, loff_t offset
,
3837 unsigned int max_blocks
;
3840 struct ext4_map_blocks map
;
3841 unsigned int credits
, blkbits
= inode
->i_blkbits
;
3843 map
.m_lblk
= offset
>> blkbits
;
3845 * We can't just convert len to max_blocks because
3846 * If blocksize = 4096 offset = 3072 and len = 2048
3848 max_blocks
= ((EXT4_BLOCK_ALIGN(len
+ offset
, blkbits
) >> blkbits
) -
3851 * credits to insert 1 extent into extent tree
3853 credits
= ext4_chunk_trans_blocks(inode
, max_blocks
);
3854 while (ret
>= 0 && ret
< max_blocks
) {
3856 map
.m_len
= (max_blocks
-= ret
);
3857 handle
= ext4_journal_start(inode
, credits
);
3858 if (IS_ERR(handle
)) {
3859 ret
= PTR_ERR(handle
);
3862 ret
= ext4_map_blocks(handle
, inode
, &map
,
3863 EXT4_GET_BLOCKS_IO_CONVERT_EXT
);
3866 printk(KERN_ERR
"%s: ext4_ext_map_blocks "
3867 "returned error inode#%lu, block=%u, "
3868 "max_blocks=%u", __func__
,
3869 inode
->i_ino
, map
.m_lblk
, map
.m_len
);
3871 ext4_mark_inode_dirty(handle
, inode
);
3872 ret2
= ext4_journal_stop(handle
);
3873 if (ret
<= 0 || ret2
)
3876 return ret
> 0 ? ret2
: ret
;
3880 * Callback function called for each extent to gather FIEMAP information.
3882 static int ext4_ext_fiemap_cb(struct inode
*inode
, ext4_lblk_t next
,
3883 struct ext4_ext_cache
*newex
, struct ext4_extent
*ex
,
3891 struct fiemap_extent_info
*fieinfo
= data
;
3892 unsigned char blksize_bits
;
3894 blksize_bits
= inode
->i_sb
->s_blocksize_bits
;
3895 logical
= (__u64
)newex
->ec_block
<< blksize_bits
;
3897 if (newex
->ec_start
== 0) {
3899 * No extent in extent-tree contains block @newex->ec_start,
3900 * then the block may stay in 1)a hole or 2)delayed-extent.
3902 * Holes or delayed-extents are processed as follows.
3903 * 1. lookup dirty pages with specified range in pagecache.
3904 * If no page is got, then there is no delayed-extent and
3905 * return with EXT_CONTINUE.
3906 * 2. find the 1st mapped buffer,
3907 * 3. check if the mapped buffer is both in the request range
3908 * and a delayed buffer. If not, there is no delayed-extent,
3910 * 4. a delayed-extent is found, the extent will be collected.
3912 ext4_lblk_t end
= 0;
3913 pgoff_t last_offset
;
3916 pgoff_t start_index
= 0;
3917 struct page
**pages
= NULL
;
3918 struct buffer_head
*bh
= NULL
;
3919 struct buffer_head
*head
= NULL
;
3920 unsigned int nr_pages
= PAGE_SIZE
/ sizeof(struct page
*);
3922 pages
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
3926 offset
= logical
>> PAGE_SHIFT
;
3928 last_offset
= offset
;
3930 ret
= find_get_pages_tag(inode
->i_mapping
, &offset
,
3931 PAGECACHE_TAG_DIRTY
, nr_pages
, pages
);
3933 if (!(flags
& FIEMAP_EXTENT_DELALLOC
)) {
3934 /* First time, try to find a mapped buffer. */
3937 for (index
= 0; index
< ret
; index
++)
3938 page_cache_release(pages
[index
]);
3941 return EXT_CONTINUE
;
3946 /* Try to find the 1st mapped buffer. */
3947 end
= ((__u64
)pages
[index
]->index
<< PAGE_SHIFT
) >>
3949 if (!page_has_buffers(pages
[index
]))
3951 head
= page_buffers(pages
[index
]);
3958 if (end
>= newex
->ec_block
+
3960 /* The buffer is out of
3961 * the request range.
3965 if (buffer_mapped(bh
) &&
3966 end
>= newex
->ec_block
) {
3967 start_index
= index
- 1;
3968 /* get the 1st mapped buffer. */
3969 goto found_mapped_buffer
;
3972 bh
= bh
->b_this_page
;
3974 } while (bh
!= head
);
3976 /* No mapped buffer in the range found in this page,
3977 * We need to look up next page.
3980 /* There is no page left, but we need to limit
3983 newex
->ec_len
= end
- newex
->ec_block
;
3988 /*Find contiguous delayed buffers. */
3989 if (ret
> 0 && pages
[0]->index
== last_offset
)
3990 head
= page_buffers(pages
[0]);
3996 found_mapped_buffer
:
3997 if (bh
!= NULL
&& buffer_delay(bh
)) {
3998 /* 1st or contiguous delayed buffer found. */
3999 if (!(flags
& FIEMAP_EXTENT_DELALLOC
)) {
4001 * 1st delayed buffer found, record
4002 * the start of extent.
4004 flags
|= FIEMAP_EXTENT_DELALLOC
;
4005 newex
->ec_block
= end
;
4006 logical
= (__u64
)end
<< blksize_bits
;
4008 /* Find contiguous delayed buffers. */
4010 if (!buffer_delay(bh
))
4011 goto found_delayed_extent
;
4012 bh
= bh
->b_this_page
;
4014 } while (bh
!= head
);
4016 for (; index
< ret
; index
++) {
4017 if (!page_has_buffers(pages
[index
])) {
4021 head
= page_buffers(pages
[index
]);
4027 if (pages
[index
]->index
!=
4028 pages
[start_index
]->index
+ index
4030 /* Blocks are not contiguous. */
4036 if (!buffer_delay(bh
))
4037 /* Delayed-extent ends. */
4038 goto found_delayed_extent
;
4039 bh
= bh
->b_this_page
;
4041 } while (bh
!= head
);
4043 } else if (!(flags
& FIEMAP_EXTENT_DELALLOC
))
4047 found_delayed_extent
:
4048 newex
->ec_len
= min(end
- newex
->ec_block
,
4049 (ext4_lblk_t
)EXT_INIT_MAX_LEN
);
4050 if (ret
== nr_pages
&& bh
!= NULL
&&
4051 newex
->ec_len
< EXT_INIT_MAX_LEN
&&
4053 /* Have not collected an extent and continue. */
4054 for (index
= 0; index
< ret
; index
++)
4055 page_cache_release(pages
[index
]);
4059 for (index
= 0; index
< ret
; index
++)
4060 page_cache_release(pages
[index
]);
4064 physical
= (__u64
)newex
->ec_start
<< blksize_bits
;
4065 length
= (__u64
)newex
->ec_len
<< blksize_bits
;
4067 if (ex
&& ext4_ext_is_uninitialized(ex
))
4068 flags
|= FIEMAP_EXTENT_UNWRITTEN
;
4070 if (next
== EXT_MAX_BLOCKS
)
4071 flags
|= FIEMAP_EXTENT_LAST
;
4073 ret
= fiemap_fill_next_extent(fieinfo
, logical
, physical
,
4079 return EXT_CONTINUE
;
4082 /* fiemap flags we can handle specified here */
4083 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4085 static int ext4_xattr_fiemap(struct inode
*inode
,
4086 struct fiemap_extent_info
*fieinfo
)
4090 __u32 flags
= FIEMAP_EXTENT_LAST
;
4091 int blockbits
= inode
->i_sb
->s_blocksize_bits
;
4095 if (ext4_test_inode_state(inode
, EXT4_STATE_XATTR
)) {
4096 struct ext4_iloc iloc
;
4097 int offset
; /* offset of xattr in inode */
4099 error
= ext4_get_inode_loc(inode
, &iloc
);
4102 physical
= iloc
.bh
->b_blocknr
<< blockbits
;
4103 offset
= EXT4_GOOD_OLD_INODE_SIZE
+
4104 EXT4_I(inode
)->i_extra_isize
;
4106 length
= EXT4_SB(inode
->i_sb
)->s_inode_size
- offset
;
4107 flags
|= FIEMAP_EXTENT_DATA_INLINE
;
4109 } else { /* external block */
4110 physical
= EXT4_I(inode
)->i_file_acl
<< blockbits
;
4111 length
= inode
->i_sb
->s_blocksize
;
4115 error
= fiemap_fill_next_extent(fieinfo
, 0, physical
,
4117 return (error
< 0 ? error
: 0);
4121 * ext4_ext_punch_hole
4123 * Punches a hole of "length" bytes in a file starting
4126 * @inode: The inode of the file to punch a hole in
4127 * @offset: The starting byte offset of the hole
4128 * @length: The length of the hole
4130 * Returns the number of blocks removed or negative on err
4132 int ext4_ext_punch_hole(struct file
*file
, loff_t offset
, loff_t length
)
4134 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
4135 struct super_block
*sb
= inode
->i_sb
;
4136 struct ext4_ext_cache cache_ex
;
4137 ext4_lblk_t first_block
, last_block
, num_blocks
, iblock
, max_blocks
;
4138 struct address_space
*mapping
= inode
->i_mapping
;
4139 struct ext4_map_blocks map
;
4141 loff_t first_block_offset
, last_block_offset
, block_len
;
4142 loff_t first_page
, last_page
, first_page_offset
, last_page_offset
;
4143 int ret
, credits
, blocks_released
, err
= 0;
4145 first_block
= (offset
+ sb
->s_blocksize
- 1) >>
4146 EXT4_BLOCK_SIZE_BITS(sb
);
4147 last_block
= (offset
+ length
) >> EXT4_BLOCK_SIZE_BITS(sb
);
4149 first_block_offset
= first_block
<< EXT4_BLOCK_SIZE_BITS(sb
);
4150 last_block_offset
= last_block
<< EXT4_BLOCK_SIZE_BITS(sb
);
4152 first_page
= (offset
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
4153 last_page
= (offset
+ length
) >> PAGE_CACHE_SHIFT
;
4155 first_page_offset
= first_page
<< PAGE_CACHE_SHIFT
;
4156 last_page_offset
= last_page
<< PAGE_CACHE_SHIFT
;
4159 * Write out all dirty pages to avoid race conditions
4160 * Then release them.
4162 if (mapping
->nrpages
&& mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
)) {
4163 err
= filemap_write_and_wait_range(mapping
,
4164 first_page_offset
== 0 ? 0 : first_page_offset
-1,
4171 /* Now release the pages */
4172 if (last_page_offset
> first_page_offset
) {
4173 truncate_inode_pages_range(mapping
, first_page_offset
,
4174 last_page_offset
-1);
4177 /* finish any pending end_io work */
4178 ext4_flush_completed_IO(inode
);
4180 credits
= ext4_writepage_trans_blocks(inode
);
4181 handle
= ext4_journal_start(inode
, credits
);
4183 return PTR_ERR(handle
);
4185 err
= ext4_orphan_add(handle
, inode
);
4190 * Now we need to zero out the un block aligned data.
4191 * If the file is smaller than a block, just
4192 * zero out the middle
4194 if (first_block
> last_block
)
4195 ext4_block_zero_page_range(handle
, mapping
, offset
, length
);
4197 /* zero out the head of the hole before the first block */
4198 block_len
= first_block_offset
- offset
;
4200 ext4_block_zero_page_range(handle
, mapping
,
4203 /* zero out the tail of the hole after the last block */
4204 block_len
= offset
+ length
- last_block_offset
;
4205 if (block_len
> 0) {
4206 ext4_block_zero_page_range(handle
, mapping
,
4207 last_block_offset
, block_len
);
4211 /* If there are no blocks to remove, return now */
4212 if (first_block
>= last_block
)
4215 down_write(&EXT4_I(inode
)->i_data_sem
);
4216 ext4_ext_invalidate_cache(inode
);
4217 ext4_discard_preallocations(inode
);
4220 * Loop over all the blocks and identify blocks
4221 * that need to be punched out
4223 iblock
= first_block
;
4224 blocks_released
= 0;
4225 while (iblock
< last_block
) {
4226 max_blocks
= last_block
- iblock
;
4228 memset(&map
, 0, sizeof(map
));
4229 map
.m_lblk
= iblock
;
4230 map
.m_len
= max_blocks
;
4231 ret
= ext4_ext_map_blocks(handle
, inode
, &map
,
4232 EXT4_GET_BLOCKS_PUNCH_OUT_EXT
);
4235 blocks_released
+= ret
;
4237 } else if (ret
== 0) {
4239 * If map blocks could not find the block,
4240 * then it is in a hole. If the hole was
4241 * not already cached, then map blocks should
4242 * put it in the cache. So we can get the hole
4245 memset(&cache_ex
, 0, sizeof(cache_ex
));
4246 if ((ext4_ext_check_cache(inode
, iblock
, &cache_ex
)) &&
4247 !cache_ex
.ec_start
) {
4249 /* The hole is cached */
4250 num_blocks
= cache_ex
.ec_block
+
4251 cache_ex
.ec_len
- iblock
;
4254 /* The block could not be identified */
4259 /* Map blocks error */
4264 if (num_blocks
== 0) {
4265 /* This condition should never happen */
4266 ext_debug("Block lookup failed");
4271 iblock
+= num_blocks
;
4274 if (blocks_released
> 0) {
4275 ext4_ext_invalidate_cache(inode
);
4276 ext4_discard_preallocations(inode
);
4280 ext4_handle_sync(handle
);
4282 up_write(&EXT4_I(inode
)->i_data_sem
);
4285 ext4_orphan_del(handle
, inode
);
4286 inode
->i_mtime
= inode
->i_ctime
= ext4_current_time(inode
);
4287 ext4_mark_inode_dirty(handle
, inode
);
4288 ext4_journal_stop(handle
);
4291 int ext4_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
4292 __u64 start
, __u64 len
)
4294 ext4_lblk_t start_blk
;
4297 /* fallback to generic here if not in extents fmt */
4298 if (!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)))
4299 return generic_block_fiemap(inode
, fieinfo
, start
, len
,
4302 if (fiemap_check_flags(fieinfo
, EXT4_FIEMAP_FLAGS
))
4305 if (fieinfo
->fi_flags
& FIEMAP_FLAG_XATTR
) {
4306 error
= ext4_xattr_fiemap(inode
, fieinfo
);
4308 ext4_lblk_t len_blks
;
4311 start_blk
= start
>> inode
->i_sb
->s_blocksize_bits
;
4312 last_blk
= (start
+ len
- 1) >> inode
->i_sb
->s_blocksize_bits
;
4313 if (last_blk
>= EXT_MAX_BLOCKS
)
4314 last_blk
= EXT_MAX_BLOCKS
-1;
4315 len_blks
= ((ext4_lblk_t
) last_blk
) - start_blk
+ 1;
4318 * Walk the extent tree gathering extent information.
4319 * ext4_ext_fiemap_cb will push extents back to user.
4321 error
= ext4_ext_walk_space(inode
, start_blk
, len_blks
,
4322 ext4_ext_fiemap_cb
, fieinfo
);