2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <asm/uaccess.h>
41 #include <linux/fiemap.h>
42 #include <linux/backing-dev.h>
43 #include "ext4_jbd2.h"
44 #include "ext4_extents.h"
47 #include <trace/events/ext4.h>
50 * used by extent splitting.
52 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
54 #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
55 #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
57 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
58 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
60 static __le32
ext4_extent_block_csum(struct inode
*inode
,
61 struct ext4_extent_header
*eh
)
63 struct ext4_inode_info
*ei
= EXT4_I(inode
);
64 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
67 csum
= ext4_chksum(sbi
, ei
->i_csum_seed
, (__u8
*)eh
,
68 EXT4_EXTENT_TAIL_OFFSET(eh
));
69 return cpu_to_le32(csum
);
72 static int ext4_extent_block_csum_verify(struct inode
*inode
,
73 struct ext4_extent_header
*eh
)
75 struct ext4_extent_tail
*et
;
77 if (!ext4_has_metadata_csum(inode
->i_sb
))
80 et
= find_ext4_extent_tail(eh
);
81 if (et
->et_checksum
!= ext4_extent_block_csum(inode
, eh
))
86 static void ext4_extent_block_csum_set(struct inode
*inode
,
87 struct ext4_extent_header
*eh
)
89 struct ext4_extent_tail
*et
;
91 if (!ext4_has_metadata_csum(inode
->i_sb
))
94 et
= find_ext4_extent_tail(eh
);
95 et
->et_checksum
= ext4_extent_block_csum(inode
, eh
);
98 static int ext4_split_extent(handle_t
*handle
,
100 struct ext4_ext_path
**ppath
,
101 struct ext4_map_blocks
*map
,
105 static int ext4_split_extent_at(handle_t
*handle
,
107 struct ext4_ext_path
**ppath
,
112 static int ext4_find_delayed_extent(struct inode
*inode
,
113 struct extent_status
*newes
);
115 static int ext4_ext_truncate_extend_restart(handle_t
*handle
,
121 if (!ext4_handle_valid(handle
))
123 if (handle
->h_buffer_credits
> needed
)
125 err
= ext4_journal_extend(handle
, needed
);
128 err
= ext4_truncate_restart_trans(handle
, inode
, needed
);
140 static int ext4_ext_get_access(handle_t
*handle
, struct inode
*inode
,
141 struct ext4_ext_path
*path
)
144 /* path points to block */
145 BUFFER_TRACE(path
->p_bh
, "get_write_access");
146 return ext4_journal_get_write_access(handle
, path
->p_bh
);
148 /* path points to leaf/index in inode body */
149 /* we use in-core data, no need to protect them */
159 int __ext4_ext_dirty(const char *where
, unsigned int line
, handle_t
*handle
,
160 struct inode
*inode
, struct ext4_ext_path
*path
)
164 WARN_ON(!rwsem_is_locked(&EXT4_I(inode
)->i_data_sem
));
166 ext4_extent_block_csum_set(inode
, ext_block_hdr(path
->p_bh
));
167 /* path points to block */
168 err
= __ext4_handle_dirty_metadata(where
, line
, handle
,
171 /* path points to leaf/index in inode body */
172 err
= ext4_mark_inode_dirty(handle
, inode
);
177 static ext4_fsblk_t
ext4_ext_find_goal(struct inode
*inode
,
178 struct ext4_ext_path
*path
,
182 int depth
= path
->p_depth
;
183 struct ext4_extent
*ex
;
186 * Try to predict block placement assuming that we are
187 * filling in a file which will eventually be
188 * non-sparse --- i.e., in the case of libbfd writing
189 * an ELF object sections out-of-order but in a way
190 * the eventually results in a contiguous object or
191 * executable file, or some database extending a table
192 * space file. However, this is actually somewhat
193 * non-ideal if we are writing a sparse file such as
194 * qemu or KVM writing a raw image file that is going
195 * to stay fairly sparse, since it will end up
196 * fragmenting the file system's free space. Maybe we
197 * should have some hueristics or some way to allow
198 * userspace to pass a hint to file system,
199 * especially if the latter case turns out to be
202 ex
= path
[depth
].p_ext
;
204 ext4_fsblk_t ext_pblk
= ext4_ext_pblock(ex
);
205 ext4_lblk_t ext_block
= le32_to_cpu(ex
->ee_block
);
207 if (block
> ext_block
)
208 return ext_pblk
+ (block
- ext_block
);
210 return ext_pblk
- (ext_block
- block
);
213 /* it looks like index is empty;
214 * try to find starting block from index itself */
215 if (path
[depth
].p_bh
)
216 return path
[depth
].p_bh
->b_blocknr
;
219 /* OK. use inode's group */
220 return ext4_inode_to_goal_block(inode
);
224 * Allocation for a meta data block
227 ext4_ext_new_meta_block(handle_t
*handle
, struct inode
*inode
,
228 struct ext4_ext_path
*path
,
229 struct ext4_extent
*ex
, int *err
, unsigned int flags
)
231 ext4_fsblk_t goal
, newblock
;
233 goal
= ext4_ext_find_goal(inode
, path
, le32_to_cpu(ex
->ee_block
));
234 newblock
= ext4_new_meta_blocks(handle
, inode
, goal
, flags
,
239 static inline int ext4_ext_space_block(struct inode
*inode
, int check
)
243 size
= (inode
->i_sb
->s_blocksize
- sizeof(struct ext4_extent_header
))
244 / sizeof(struct ext4_extent
);
245 #ifdef AGGRESSIVE_TEST
246 if (!check
&& size
> 6)
252 static inline int ext4_ext_space_block_idx(struct inode
*inode
, int check
)
256 size
= (inode
->i_sb
->s_blocksize
- sizeof(struct ext4_extent_header
))
257 / sizeof(struct ext4_extent_idx
);
258 #ifdef AGGRESSIVE_TEST
259 if (!check
&& size
> 5)
265 static inline int ext4_ext_space_root(struct inode
*inode
, int check
)
269 size
= sizeof(EXT4_I(inode
)->i_data
);
270 size
-= sizeof(struct ext4_extent_header
);
271 size
/= sizeof(struct ext4_extent
);
272 #ifdef AGGRESSIVE_TEST
273 if (!check
&& size
> 3)
279 static inline int ext4_ext_space_root_idx(struct inode
*inode
, int check
)
283 size
= sizeof(EXT4_I(inode
)->i_data
);
284 size
-= sizeof(struct ext4_extent_header
);
285 size
/= sizeof(struct ext4_extent_idx
);
286 #ifdef AGGRESSIVE_TEST
287 if (!check
&& size
> 4)
294 ext4_force_split_extent_at(handle_t
*handle
, struct inode
*inode
,
295 struct ext4_ext_path
**ppath
, ext4_lblk_t lblk
,
298 struct ext4_ext_path
*path
= *ppath
;
299 int unwritten
= ext4_ext_is_unwritten(path
[path
->p_depth
].p_ext
);
301 return ext4_split_extent_at(handle
, inode
, ppath
, lblk
, unwritten
?
302 EXT4_EXT_MARK_UNWRIT1
|EXT4_EXT_MARK_UNWRIT2
: 0,
303 EXT4_EX_NOCACHE
| EXT4_GET_BLOCKS_PRE_IO
|
304 (nofail
? EXT4_GET_BLOCKS_METADATA_NOFAIL
:0));
308 * Calculate the number of metadata blocks needed
309 * to allocate @blocks
310 * Worse case is one block per extent
312 int ext4_ext_calc_metadata_amount(struct inode
*inode
, ext4_lblk_t lblock
)
314 struct ext4_inode_info
*ei
= EXT4_I(inode
);
317 idxs
= ((inode
->i_sb
->s_blocksize
- sizeof(struct ext4_extent_header
))
318 / sizeof(struct ext4_extent_idx
));
321 * If the new delayed allocation block is contiguous with the
322 * previous da block, it can share index blocks with the
323 * previous block, so we only need to allocate a new index
324 * block every idxs leaf blocks. At ldxs**2 blocks, we need
325 * an additional index block, and at ldxs**3 blocks, yet
326 * another index blocks.
328 if (ei
->i_da_metadata_calc_len
&&
329 ei
->i_da_metadata_calc_last_lblock
+1 == lblock
) {
332 if ((ei
->i_da_metadata_calc_len
% idxs
) == 0)
334 if ((ei
->i_da_metadata_calc_len
% (idxs
*idxs
)) == 0)
336 if ((ei
->i_da_metadata_calc_len
% (idxs
*idxs
*idxs
)) == 0) {
338 ei
->i_da_metadata_calc_len
= 0;
340 ei
->i_da_metadata_calc_len
++;
341 ei
->i_da_metadata_calc_last_lblock
++;
346 * In the worst case we need a new set of index blocks at
347 * every level of the inode's extent tree.
349 ei
->i_da_metadata_calc_len
= 1;
350 ei
->i_da_metadata_calc_last_lblock
= lblock
;
351 return ext_depth(inode
) + 1;
355 ext4_ext_max_entries(struct inode
*inode
, int depth
)
359 if (depth
== ext_depth(inode
)) {
361 max
= ext4_ext_space_root(inode
, 1);
363 max
= ext4_ext_space_root_idx(inode
, 1);
366 max
= ext4_ext_space_block(inode
, 1);
368 max
= ext4_ext_space_block_idx(inode
, 1);
374 static int ext4_valid_extent(struct inode
*inode
, struct ext4_extent
*ext
)
376 ext4_fsblk_t block
= ext4_ext_pblock(ext
);
377 int len
= ext4_ext_get_actual_len(ext
);
378 ext4_lblk_t lblock
= le32_to_cpu(ext
->ee_block
);
383 * - overflow/wrap-around
385 if (lblock
+ len
<= lblock
)
387 return ext4_data_block_valid(EXT4_SB(inode
->i_sb
), block
, len
);
390 static int ext4_valid_extent_idx(struct inode
*inode
,
391 struct ext4_extent_idx
*ext_idx
)
393 ext4_fsblk_t block
= ext4_idx_pblock(ext_idx
);
395 return ext4_data_block_valid(EXT4_SB(inode
->i_sb
), block
, 1);
398 static int ext4_valid_extent_entries(struct inode
*inode
,
399 struct ext4_extent_header
*eh
,
402 unsigned short entries
;
403 if (eh
->eh_entries
== 0)
406 entries
= le16_to_cpu(eh
->eh_entries
);
410 struct ext4_extent
*ext
= EXT_FIRST_EXTENT(eh
);
411 struct ext4_super_block
*es
= EXT4_SB(inode
->i_sb
)->s_es
;
412 ext4_fsblk_t pblock
= 0;
413 ext4_lblk_t lblock
= 0;
414 ext4_lblk_t prev
= 0;
417 if (!ext4_valid_extent(inode
, ext
))
420 /* Check for overlapping extents */
421 lblock
= le32_to_cpu(ext
->ee_block
);
422 len
= ext4_ext_get_actual_len(ext
);
423 if ((lblock
<= prev
) && prev
) {
424 pblock
= ext4_ext_pblock(ext
);
425 es
->s_last_error_block
= cpu_to_le64(pblock
);
430 prev
= lblock
+ len
- 1;
433 struct ext4_extent_idx
*ext_idx
= EXT_FIRST_INDEX(eh
);
435 if (!ext4_valid_extent_idx(inode
, ext_idx
))
444 static int __ext4_ext_check(const char *function
, unsigned int line
,
445 struct inode
*inode
, struct ext4_extent_header
*eh
,
446 int depth
, ext4_fsblk_t pblk
)
448 const char *error_msg
;
449 int max
= 0, err
= -EFSCORRUPTED
;
451 if (unlikely(eh
->eh_magic
!= EXT4_EXT_MAGIC
)) {
452 error_msg
= "invalid magic";
455 if (unlikely(le16_to_cpu(eh
->eh_depth
) != depth
)) {
456 error_msg
= "unexpected eh_depth";
459 if (unlikely(eh
->eh_max
== 0)) {
460 error_msg
= "invalid eh_max";
463 max
= ext4_ext_max_entries(inode
, depth
);
464 if (unlikely(le16_to_cpu(eh
->eh_max
) > max
)) {
465 error_msg
= "too large eh_max";
468 if (unlikely(le16_to_cpu(eh
->eh_entries
) > le16_to_cpu(eh
->eh_max
))) {
469 error_msg
= "invalid eh_entries";
472 if (!ext4_valid_extent_entries(inode
, eh
, depth
)) {
473 error_msg
= "invalid extent entries";
476 if (unlikely(depth
> 32)) {
477 error_msg
= "too large eh_depth";
480 /* Verify checksum on non-root extent tree nodes */
481 if (ext_depth(inode
) != depth
&&
482 !ext4_extent_block_csum_verify(inode
, eh
)) {
483 error_msg
= "extent tree corrupted";
490 ext4_error_inode(inode
, function
, line
, 0,
491 "pblk %llu bad header/extent: %s - magic %x, "
492 "entries %u, max %u(%u), depth %u(%u)",
493 (unsigned long long) pblk
, error_msg
,
494 le16_to_cpu(eh
->eh_magic
),
495 le16_to_cpu(eh
->eh_entries
), le16_to_cpu(eh
->eh_max
),
496 max
, le16_to_cpu(eh
->eh_depth
), depth
);
500 #define ext4_ext_check(inode, eh, depth, pblk) \
501 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
503 int ext4_ext_check_inode(struct inode
*inode
)
505 return ext4_ext_check(inode
, ext_inode_hdr(inode
), ext_depth(inode
), 0);
508 static void ext4_cache_extents(struct inode
*inode
,
509 struct ext4_extent_header
*eh
)
511 struct ext4_extent
*ex
= EXT_FIRST_EXTENT(eh
);
512 ext4_lblk_t prev
= 0;
515 for (i
= le16_to_cpu(eh
->eh_entries
); i
> 0; i
--, ex
++) {
516 unsigned int status
= EXTENT_STATUS_WRITTEN
;
517 ext4_lblk_t lblk
= le32_to_cpu(ex
->ee_block
);
518 int len
= ext4_ext_get_actual_len(ex
);
520 if (prev
&& (prev
!= lblk
))
521 ext4_es_cache_extent(inode
, prev
, lblk
- prev
, ~0,
524 if (ext4_ext_is_unwritten(ex
))
525 status
= EXTENT_STATUS_UNWRITTEN
;
526 ext4_es_cache_extent(inode
, lblk
, len
,
527 ext4_ext_pblock(ex
), status
);
532 static struct buffer_head
*
533 __read_extent_tree_block(const char *function
, unsigned int line
,
534 struct inode
*inode
, ext4_fsblk_t pblk
, int depth
,
537 struct buffer_head
*bh
;
540 bh
= sb_getblk_gfp(inode
->i_sb
, pblk
, __GFP_MOVABLE
| GFP_NOFS
);
542 return ERR_PTR(-ENOMEM
);
544 if (!bh_uptodate_or_lock(bh
)) {
545 trace_ext4_ext_load_extent(inode
, pblk
, _RET_IP_
);
546 err
= bh_submit_read(bh
);
550 if (buffer_verified(bh
) && !(flags
& EXT4_EX_FORCE_CACHE
))
552 if (!ext4_has_feature_journal(inode
->i_sb
) ||
554 le32_to_cpu(EXT4_SB(inode
->i_sb
)->s_es
->s_journal_inum
))) {
555 err
= __ext4_ext_check(function
, line
, inode
,
556 ext_block_hdr(bh
), depth
, pblk
);
560 set_buffer_verified(bh
);
562 * If this is a leaf block, cache all of its entries
564 if (!(flags
& EXT4_EX_NOCACHE
) && depth
== 0) {
565 struct ext4_extent_header
*eh
= ext_block_hdr(bh
);
566 ext4_cache_extents(inode
, eh
);
575 #define read_extent_tree_block(inode, pblk, depth, flags) \
576 __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
580 * This function is called to cache a file's extent information in the
583 int ext4_ext_precache(struct inode
*inode
)
585 struct ext4_inode_info
*ei
= EXT4_I(inode
);
586 struct ext4_ext_path
*path
= NULL
;
587 struct buffer_head
*bh
;
588 int i
= 0, depth
, ret
= 0;
590 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))
591 return 0; /* not an extent-mapped inode */
593 down_read(&ei
->i_data_sem
);
594 depth
= ext_depth(inode
);
596 path
= kzalloc(sizeof(struct ext4_ext_path
) * (depth
+ 1),
599 up_read(&ei
->i_data_sem
);
603 /* Don't cache anything if there are no external extent blocks */
606 path
[0].p_hdr
= ext_inode_hdr(inode
);
607 ret
= ext4_ext_check(inode
, path
[0].p_hdr
, depth
, 0);
610 path
[0].p_idx
= EXT_FIRST_INDEX(path
[0].p_hdr
);
613 * If this is a leaf block or we've reached the end of
614 * the index block, go up
617 path
[i
].p_idx
> EXT_LAST_INDEX(path
[i
].p_hdr
)) {
618 brelse(path
[i
].p_bh
);
623 bh
= read_extent_tree_block(inode
,
624 ext4_idx_pblock(path
[i
].p_idx
++),
626 EXT4_EX_FORCE_CACHE
);
633 path
[i
].p_hdr
= ext_block_hdr(bh
);
634 path
[i
].p_idx
= EXT_FIRST_INDEX(path
[i
].p_hdr
);
636 ext4_set_inode_state(inode
, EXT4_STATE_EXT_PRECACHED
);
638 up_read(&ei
->i_data_sem
);
639 ext4_ext_drop_refs(path
);
645 static void ext4_ext_show_path(struct inode
*inode
, struct ext4_ext_path
*path
)
647 int k
, l
= path
->p_depth
;
650 for (k
= 0; k
<= l
; k
++, path
++) {
652 ext_debug(" %d->%llu", le32_to_cpu(path
->p_idx
->ei_block
),
653 ext4_idx_pblock(path
->p_idx
));
654 } else if (path
->p_ext
) {
655 ext_debug(" %d:[%d]%d:%llu ",
656 le32_to_cpu(path
->p_ext
->ee_block
),
657 ext4_ext_is_unwritten(path
->p_ext
),
658 ext4_ext_get_actual_len(path
->p_ext
),
659 ext4_ext_pblock(path
->p_ext
));
666 static void ext4_ext_show_leaf(struct inode
*inode
, struct ext4_ext_path
*path
)
668 int depth
= ext_depth(inode
);
669 struct ext4_extent_header
*eh
;
670 struct ext4_extent
*ex
;
676 eh
= path
[depth
].p_hdr
;
677 ex
= EXT_FIRST_EXTENT(eh
);
679 ext_debug("Displaying leaf extents for inode %lu\n", inode
->i_ino
);
681 for (i
= 0; i
< le16_to_cpu(eh
->eh_entries
); i
++, ex
++) {
682 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex
->ee_block
),
683 ext4_ext_is_unwritten(ex
),
684 ext4_ext_get_actual_len(ex
), ext4_ext_pblock(ex
));
689 static void ext4_ext_show_move(struct inode
*inode
, struct ext4_ext_path
*path
,
690 ext4_fsblk_t newblock
, int level
)
692 int depth
= ext_depth(inode
);
693 struct ext4_extent
*ex
;
695 if (depth
!= level
) {
696 struct ext4_extent_idx
*idx
;
697 idx
= path
[level
].p_idx
;
698 while (idx
<= EXT_MAX_INDEX(path
[level
].p_hdr
)) {
699 ext_debug("%d: move %d:%llu in new index %llu\n", level
,
700 le32_to_cpu(idx
->ei_block
),
701 ext4_idx_pblock(idx
),
709 ex
= path
[depth
].p_ext
;
710 while (ex
<= EXT_MAX_EXTENT(path
[depth
].p_hdr
)) {
711 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
712 le32_to_cpu(ex
->ee_block
),
714 ext4_ext_is_unwritten(ex
),
715 ext4_ext_get_actual_len(ex
),
722 #define ext4_ext_show_path(inode, path)
723 #define ext4_ext_show_leaf(inode, path)
724 #define ext4_ext_show_move(inode, path, newblock, level)
727 void ext4_ext_drop_refs(struct ext4_ext_path
*path
)
733 depth
= path
->p_depth
;
734 for (i
= 0; i
<= depth
; i
++, path
++)
742 * ext4_ext_binsearch_idx:
743 * binary search for the closest index of the given block
744 * the header must be checked before calling this
747 ext4_ext_binsearch_idx(struct inode
*inode
,
748 struct ext4_ext_path
*path
, ext4_lblk_t block
)
750 struct ext4_extent_header
*eh
= path
->p_hdr
;
751 struct ext4_extent_idx
*r
, *l
, *m
;
754 ext_debug("binsearch for %u(idx): ", block
);
756 l
= EXT_FIRST_INDEX(eh
) + 1;
757 r
= EXT_LAST_INDEX(eh
);
760 if (block
< le32_to_cpu(m
->ei_block
))
764 ext_debug("%p(%u):%p(%u):%p(%u) ", l
, le32_to_cpu(l
->ei_block
),
765 m
, le32_to_cpu(m
->ei_block
),
766 r
, le32_to_cpu(r
->ei_block
));
770 ext_debug(" -> %u->%lld ", le32_to_cpu(path
->p_idx
->ei_block
),
771 ext4_idx_pblock(path
->p_idx
));
773 #ifdef CHECK_BINSEARCH
775 struct ext4_extent_idx
*chix
, *ix
;
778 chix
= ix
= EXT_FIRST_INDEX(eh
);
779 for (k
= 0; k
< le16_to_cpu(eh
->eh_entries
); k
++, ix
++) {
781 le32_to_cpu(ix
->ei_block
) <= le32_to_cpu(ix
[-1].ei_block
)) {
782 printk(KERN_DEBUG
"k=%d, ix=0x%p, "
784 ix
, EXT_FIRST_INDEX(eh
));
785 printk(KERN_DEBUG
"%u <= %u\n",
786 le32_to_cpu(ix
->ei_block
),
787 le32_to_cpu(ix
[-1].ei_block
));
789 BUG_ON(k
&& le32_to_cpu(ix
->ei_block
)
790 <= le32_to_cpu(ix
[-1].ei_block
));
791 if (block
< le32_to_cpu(ix
->ei_block
))
795 BUG_ON(chix
!= path
->p_idx
);
802 * ext4_ext_binsearch:
803 * binary search for closest extent of the given block
804 * the header must be checked before calling this
807 ext4_ext_binsearch(struct inode
*inode
,
808 struct ext4_ext_path
*path
, ext4_lblk_t block
)
810 struct ext4_extent_header
*eh
= path
->p_hdr
;
811 struct ext4_extent
*r
, *l
, *m
;
813 if (eh
->eh_entries
== 0) {
815 * this leaf is empty:
816 * we get such a leaf in split/add case
821 ext_debug("binsearch for %u: ", block
);
823 l
= EXT_FIRST_EXTENT(eh
) + 1;
824 r
= EXT_LAST_EXTENT(eh
);
828 if (block
< le32_to_cpu(m
->ee_block
))
832 ext_debug("%p(%u):%p(%u):%p(%u) ", l
, le32_to_cpu(l
->ee_block
),
833 m
, le32_to_cpu(m
->ee_block
),
834 r
, le32_to_cpu(r
->ee_block
));
838 ext_debug(" -> %d:%llu:[%d]%d ",
839 le32_to_cpu(path
->p_ext
->ee_block
),
840 ext4_ext_pblock(path
->p_ext
),
841 ext4_ext_is_unwritten(path
->p_ext
),
842 ext4_ext_get_actual_len(path
->p_ext
));
844 #ifdef CHECK_BINSEARCH
846 struct ext4_extent
*chex
, *ex
;
849 chex
= ex
= EXT_FIRST_EXTENT(eh
);
850 for (k
= 0; k
< le16_to_cpu(eh
->eh_entries
); k
++, ex
++) {
851 BUG_ON(k
&& le32_to_cpu(ex
->ee_block
)
852 <= le32_to_cpu(ex
[-1].ee_block
));
853 if (block
< le32_to_cpu(ex
->ee_block
))
857 BUG_ON(chex
!= path
->p_ext
);
863 int ext4_ext_tree_init(handle_t
*handle
, struct inode
*inode
)
865 struct ext4_extent_header
*eh
;
867 eh
= ext_inode_hdr(inode
);
870 eh
->eh_magic
= EXT4_EXT_MAGIC
;
871 eh
->eh_max
= cpu_to_le16(ext4_ext_space_root(inode
, 0));
872 ext4_mark_inode_dirty(handle
, inode
);
876 struct ext4_ext_path
*
877 ext4_find_extent(struct inode
*inode
, ext4_lblk_t block
,
878 struct ext4_ext_path
**orig_path
, int flags
)
880 struct ext4_extent_header
*eh
;
881 struct buffer_head
*bh
;
882 struct ext4_ext_path
*path
= orig_path
? *orig_path
: NULL
;
883 short int depth
, i
, ppos
= 0;
886 eh
= ext_inode_hdr(inode
);
887 depth
= ext_depth(inode
);
888 if (depth
< 0 || depth
> EXT4_MAX_EXTENT_DEPTH
) {
889 EXT4_ERROR_INODE(inode
, "inode has invalid extent depth: %d",
896 ext4_ext_drop_refs(path
);
897 if (depth
> path
[0].p_maxdepth
) {
899 *orig_path
= path
= NULL
;
903 /* account possible depth increase */
904 path
= kzalloc(sizeof(struct ext4_ext_path
) * (depth
+ 2),
907 return ERR_PTR(-ENOMEM
);
908 path
[0].p_maxdepth
= depth
+ 1;
914 if (!(flags
& EXT4_EX_NOCACHE
) && depth
== 0)
915 ext4_cache_extents(inode
, eh
);
916 /* walk through the tree */
918 ext_debug("depth %d: num %d, max %d\n",
919 ppos
, le16_to_cpu(eh
->eh_entries
), le16_to_cpu(eh
->eh_max
));
921 ext4_ext_binsearch_idx(inode
, path
+ ppos
, block
);
922 path
[ppos
].p_block
= ext4_idx_pblock(path
[ppos
].p_idx
);
923 path
[ppos
].p_depth
= i
;
924 path
[ppos
].p_ext
= NULL
;
926 bh
= read_extent_tree_block(inode
, path
[ppos
].p_block
, --i
,
933 eh
= ext_block_hdr(bh
);
935 if (unlikely(ppos
> depth
)) {
937 EXT4_ERROR_INODE(inode
,
938 "ppos %d > depth %d", ppos
, depth
);
942 path
[ppos
].p_bh
= bh
;
943 path
[ppos
].p_hdr
= eh
;
946 path
[ppos
].p_depth
= i
;
947 path
[ppos
].p_ext
= NULL
;
948 path
[ppos
].p_idx
= NULL
;
951 ext4_ext_binsearch(inode
, path
+ ppos
, block
);
952 /* if not an empty leaf */
953 if (path
[ppos
].p_ext
)
954 path
[ppos
].p_block
= ext4_ext_pblock(path
[ppos
].p_ext
);
956 ext4_ext_show_path(inode
, path
);
961 ext4_ext_drop_refs(path
);
969 * ext4_ext_insert_index:
970 * insert new index [@logical;@ptr] into the block at @curp;
971 * check where to insert: before @curp or after @curp
973 static int ext4_ext_insert_index(handle_t
*handle
, struct inode
*inode
,
974 struct ext4_ext_path
*curp
,
975 int logical
, ext4_fsblk_t ptr
)
977 struct ext4_extent_idx
*ix
;
980 err
= ext4_ext_get_access(handle
, inode
, curp
);
984 if (unlikely(logical
== le32_to_cpu(curp
->p_idx
->ei_block
))) {
985 EXT4_ERROR_INODE(inode
,
986 "logical %d == ei_block %d!",
987 logical
, le32_to_cpu(curp
->p_idx
->ei_block
));
988 return -EFSCORRUPTED
;
991 if (unlikely(le16_to_cpu(curp
->p_hdr
->eh_entries
)
992 >= le16_to_cpu(curp
->p_hdr
->eh_max
))) {
993 EXT4_ERROR_INODE(inode
,
994 "eh_entries %d >= eh_max %d!",
995 le16_to_cpu(curp
->p_hdr
->eh_entries
),
996 le16_to_cpu(curp
->p_hdr
->eh_max
));
997 return -EFSCORRUPTED
;
1000 if (logical
> le32_to_cpu(curp
->p_idx
->ei_block
)) {
1002 ext_debug("insert new index %d after: %llu\n", logical
, ptr
);
1003 ix
= curp
->p_idx
+ 1;
1006 ext_debug("insert new index %d before: %llu\n", logical
, ptr
);
1010 len
= EXT_LAST_INDEX(curp
->p_hdr
) - ix
+ 1;
1013 ext_debug("insert new index %d: "
1014 "move %d indices from 0x%p to 0x%p\n",
1015 logical
, len
, ix
, ix
+ 1);
1016 memmove(ix
+ 1, ix
, len
* sizeof(struct ext4_extent_idx
));
1019 if (unlikely(ix
> EXT_MAX_INDEX(curp
->p_hdr
))) {
1020 EXT4_ERROR_INODE(inode
, "ix > EXT_MAX_INDEX!");
1021 return -EFSCORRUPTED
;
1024 ix
->ei_block
= cpu_to_le32(logical
);
1025 ext4_idx_store_pblock(ix
, ptr
);
1026 le16_add_cpu(&curp
->p_hdr
->eh_entries
, 1);
1028 if (unlikely(ix
> EXT_LAST_INDEX(curp
->p_hdr
))) {
1029 EXT4_ERROR_INODE(inode
, "ix > EXT_LAST_INDEX!");
1030 return -EFSCORRUPTED
;
1033 err
= ext4_ext_dirty(handle
, inode
, curp
);
1034 ext4_std_error(inode
->i_sb
, err
);
1041 * inserts new subtree into the path, using free index entry
1043 * - allocates all needed blocks (new leaf and all intermediate index blocks)
1044 * - makes decision where to split
1045 * - moves remaining extents and index entries (right to the split point)
1046 * into the newly allocated blocks
1047 * - initializes subtree
1049 static int ext4_ext_split(handle_t
*handle
, struct inode
*inode
,
1051 struct ext4_ext_path
*path
,
1052 struct ext4_extent
*newext
, int at
)
1054 struct buffer_head
*bh
= NULL
;
1055 int depth
= ext_depth(inode
);
1056 struct ext4_extent_header
*neh
;
1057 struct ext4_extent_idx
*fidx
;
1058 int i
= at
, k
, m
, a
;
1059 ext4_fsblk_t newblock
, oldblock
;
1061 ext4_fsblk_t
*ablocks
= NULL
; /* array of allocated blocks */
1063 size_t ext_size
= 0;
1065 /* make decision: where to split? */
1066 /* FIXME: now decision is simplest: at current extent */
1068 /* if current leaf will be split, then we should use
1069 * border from split point */
1070 if (unlikely(path
[depth
].p_ext
> EXT_MAX_EXTENT(path
[depth
].p_hdr
))) {
1071 EXT4_ERROR_INODE(inode
, "p_ext > EXT_MAX_EXTENT!");
1072 return -EFSCORRUPTED
;
1074 if (path
[depth
].p_ext
!= EXT_MAX_EXTENT(path
[depth
].p_hdr
)) {
1075 border
= path
[depth
].p_ext
[1].ee_block
;
1076 ext_debug("leaf will be split."
1077 " next leaf starts at %d\n",
1078 le32_to_cpu(border
));
1080 border
= newext
->ee_block
;
1081 ext_debug("leaf will be added."
1082 " next leaf starts at %d\n",
1083 le32_to_cpu(border
));
1087 * If error occurs, then we break processing
1088 * and mark filesystem read-only. index won't
1089 * be inserted and tree will be in consistent
1090 * state. Next mount will repair buffers too.
1094 * Get array to track all allocated blocks.
1095 * We need this to handle errors and free blocks
1098 ablocks
= kzalloc(sizeof(ext4_fsblk_t
) * depth
, GFP_NOFS
);
1102 /* allocate all needed blocks */
1103 ext_debug("allocate %d blocks for indexes/leaf\n", depth
- at
);
1104 for (a
= 0; a
< depth
- at
; a
++) {
1105 newblock
= ext4_ext_new_meta_block(handle
, inode
, path
,
1106 newext
, &err
, flags
);
1109 ablocks
[a
] = newblock
;
1112 /* initialize new leaf */
1113 newblock
= ablocks
[--a
];
1114 if (unlikely(newblock
== 0)) {
1115 EXT4_ERROR_INODE(inode
, "newblock == 0!");
1116 err
= -EFSCORRUPTED
;
1119 bh
= sb_getblk_gfp(inode
->i_sb
, newblock
, __GFP_MOVABLE
| GFP_NOFS
);
1120 if (unlikely(!bh
)) {
1126 err
= ext4_journal_get_create_access(handle
, bh
);
1130 neh
= ext_block_hdr(bh
);
1131 neh
->eh_entries
= 0;
1132 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block(inode
, 0));
1133 neh
->eh_magic
= EXT4_EXT_MAGIC
;
1136 /* move remainder of path[depth] to the new leaf */
1137 if (unlikely(path
[depth
].p_hdr
->eh_entries
!=
1138 path
[depth
].p_hdr
->eh_max
)) {
1139 EXT4_ERROR_INODE(inode
, "eh_entries %d != eh_max %d!",
1140 path
[depth
].p_hdr
->eh_entries
,
1141 path
[depth
].p_hdr
->eh_max
);
1142 err
= -EFSCORRUPTED
;
1145 /* start copy from next extent */
1146 m
= EXT_MAX_EXTENT(path
[depth
].p_hdr
) - path
[depth
].p_ext
++;
1147 ext4_ext_show_move(inode
, path
, newblock
, depth
);
1149 struct ext4_extent
*ex
;
1150 ex
= EXT_FIRST_EXTENT(neh
);
1151 memmove(ex
, path
[depth
].p_ext
, sizeof(struct ext4_extent
) * m
);
1152 le16_add_cpu(&neh
->eh_entries
, m
);
1155 /* zero out unused area in the extent block */
1156 ext_size
= sizeof(struct ext4_extent_header
) +
1157 sizeof(struct ext4_extent
) * le16_to_cpu(neh
->eh_entries
);
1158 memset(bh
->b_data
+ ext_size
, 0, inode
->i_sb
->s_blocksize
- ext_size
);
1159 ext4_extent_block_csum_set(inode
, neh
);
1160 set_buffer_uptodate(bh
);
1163 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
1169 /* correct old leaf */
1171 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
1174 le16_add_cpu(&path
[depth
].p_hdr
->eh_entries
, -m
);
1175 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
1181 /* create intermediate indexes */
1183 if (unlikely(k
< 0)) {
1184 EXT4_ERROR_INODE(inode
, "k %d < 0!", k
);
1185 err
= -EFSCORRUPTED
;
1189 ext_debug("create %d intermediate indices\n", k
);
1190 /* insert new index into current index block */
1191 /* current depth stored in i var */
1194 oldblock
= newblock
;
1195 newblock
= ablocks
[--a
];
1196 bh
= sb_getblk(inode
->i_sb
, newblock
);
1197 if (unlikely(!bh
)) {
1203 err
= ext4_journal_get_create_access(handle
, bh
);
1207 neh
= ext_block_hdr(bh
);
1208 neh
->eh_entries
= cpu_to_le16(1);
1209 neh
->eh_magic
= EXT4_EXT_MAGIC
;
1210 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block_idx(inode
, 0));
1211 neh
->eh_depth
= cpu_to_le16(depth
- i
);
1212 fidx
= EXT_FIRST_INDEX(neh
);
1213 fidx
->ei_block
= border
;
1214 ext4_idx_store_pblock(fidx
, oldblock
);
1216 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1217 i
, newblock
, le32_to_cpu(border
), oldblock
);
1219 /* move remainder of path[i] to the new index block */
1220 if (unlikely(EXT_MAX_INDEX(path
[i
].p_hdr
) !=
1221 EXT_LAST_INDEX(path
[i
].p_hdr
))) {
1222 EXT4_ERROR_INODE(inode
,
1223 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1224 le32_to_cpu(path
[i
].p_ext
->ee_block
));
1225 err
= -EFSCORRUPTED
;
1228 /* start copy indexes */
1229 m
= EXT_MAX_INDEX(path
[i
].p_hdr
) - path
[i
].p_idx
++;
1230 ext_debug("cur 0x%p, last 0x%p\n", path
[i
].p_idx
,
1231 EXT_MAX_INDEX(path
[i
].p_hdr
));
1232 ext4_ext_show_move(inode
, path
, newblock
, i
);
1234 memmove(++fidx
, path
[i
].p_idx
,
1235 sizeof(struct ext4_extent_idx
) * m
);
1236 le16_add_cpu(&neh
->eh_entries
, m
);
1238 /* zero out unused area in the extent block */
1239 ext_size
= sizeof(struct ext4_extent_header
) +
1240 (sizeof(struct ext4_extent
) * le16_to_cpu(neh
->eh_entries
));
1241 memset(bh
->b_data
+ ext_size
, 0,
1242 inode
->i_sb
->s_blocksize
- ext_size
);
1243 ext4_extent_block_csum_set(inode
, neh
);
1244 set_buffer_uptodate(bh
);
1247 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
1253 /* correct old index */
1255 err
= ext4_ext_get_access(handle
, inode
, path
+ i
);
1258 le16_add_cpu(&path
[i
].p_hdr
->eh_entries
, -m
);
1259 err
= ext4_ext_dirty(handle
, inode
, path
+ i
);
1267 /* insert new index */
1268 err
= ext4_ext_insert_index(handle
, inode
, path
+ at
,
1269 le32_to_cpu(border
), newblock
);
1273 if (buffer_locked(bh
))
1279 /* free all allocated blocks in error case */
1280 for (i
= 0; i
< depth
; i
++) {
1283 ext4_free_blocks(handle
, inode
, NULL
, ablocks
[i
], 1,
1284 EXT4_FREE_BLOCKS_METADATA
);
1293 * ext4_ext_grow_indepth:
1294 * implements tree growing procedure:
1295 * - allocates new block
1296 * - moves top-level data (index block or leaf) into the new block
1297 * - initializes new top-level, creating index that points to the
1298 * just created block
1300 static int ext4_ext_grow_indepth(handle_t
*handle
, struct inode
*inode
,
1303 struct ext4_extent_header
*neh
;
1304 struct buffer_head
*bh
;
1305 ext4_fsblk_t newblock
, goal
= 0;
1306 struct ext4_super_block
*es
= EXT4_SB(inode
->i_sb
)->s_es
;
1308 size_t ext_size
= 0;
1310 /* Try to prepend new index to old one */
1311 if (ext_depth(inode
))
1312 goal
= ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode
)));
1313 if (goal
> le32_to_cpu(es
->s_first_data_block
)) {
1314 flags
|= EXT4_MB_HINT_TRY_GOAL
;
1317 goal
= ext4_inode_to_goal_block(inode
);
1318 newblock
= ext4_new_meta_blocks(handle
, inode
, goal
, flags
,
1323 bh
= sb_getblk_gfp(inode
->i_sb
, newblock
, __GFP_MOVABLE
| GFP_NOFS
);
1328 err
= ext4_journal_get_create_access(handle
, bh
);
1334 ext_size
= sizeof(EXT4_I(inode
)->i_data
);
1335 /* move top-level index/leaf into new block */
1336 memmove(bh
->b_data
, EXT4_I(inode
)->i_data
, ext_size
);
1337 /* zero out unused area in the extent block */
1338 memset(bh
->b_data
+ ext_size
, 0, inode
->i_sb
->s_blocksize
- ext_size
);
1340 /* set size of new block */
1341 neh
= ext_block_hdr(bh
);
1342 /* old root could have indexes or leaves
1343 * so calculate e_max right way */
1344 if (ext_depth(inode
))
1345 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block_idx(inode
, 0));
1347 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block(inode
, 0));
1348 neh
->eh_magic
= EXT4_EXT_MAGIC
;
1349 ext4_extent_block_csum_set(inode
, neh
);
1350 set_buffer_uptodate(bh
);
1353 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
1357 /* Update top-level index: num,max,pointer */
1358 neh
= ext_inode_hdr(inode
);
1359 neh
->eh_entries
= cpu_to_le16(1);
1360 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh
), newblock
);
1361 if (neh
->eh_depth
== 0) {
1362 /* Root extent block becomes index block */
1363 neh
->eh_max
= cpu_to_le16(ext4_ext_space_root_idx(inode
, 0));
1364 EXT_FIRST_INDEX(neh
)->ei_block
=
1365 EXT_FIRST_EXTENT(neh
)->ee_block
;
1367 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1368 le16_to_cpu(neh
->eh_entries
), le16_to_cpu(neh
->eh_max
),
1369 le32_to_cpu(EXT_FIRST_INDEX(neh
)->ei_block
),
1370 ext4_idx_pblock(EXT_FIRST_INDEX(neh
)));
1372 le16_add_cpu(&neh
->eh_depth
, 1);
1373 ext4_mark_inode_dirty(handle
, inode
);
1381 * ext4_ext_create_new_leaf:
1382 * finds empty index and adds new leaf.
1383 * if no free index is found, then it requests in-depth growing.
1385 static int ext4_ext_create_new_leaf(handle_t
*handle
, struct inode
*inode
,
1386 unsigned int mb_flags
,
1387 unsigned int gb_flags
,
1388 struct ext4_ext_path
**ppath
,
1389 struct ext4_extent
*newext
)
1391 struct ext4_ext_path
*path
= *ppath
;
1392 struct ext4_ext_path
*curp
;
1393 int depth
, i
, err
= 0;
1396 i
= depth
= ext_depth(inode
);
1398 /* walk up to the tree and look for free index entry */
1399 curp
= path
+ depth
;
1400 while (i
> 0 && !EXT_HAS_FREE_INDEX(curp
)) {
1405 /* we use already allocated block for index block,
1406 * so subsequent data blocks should be contiguous */
1407 if (EXT_HAS_FREE_INDEX(curp
)) {
1408 /* if we found index with free entry, then use that
1409 * entry: create all needed subtree and add new leaf */
1410 err
= ext4_ext_split(handle
, inode
, mb_flags
, path
, newext
, i
);
1415 path
= ext4_find_extent(inode
,
1416 (ext4_lblk_t
)le32_to_cpu(newext
->ee_block
),
1419 err
= PTR_ERR(path
);
1421 /* tree is full, time to grow in depth */
1422 err
= ext4_ext_grow_indepth(handle
, inode
, mb_flags
);
1427 path
= ext4_find_extent(inode
,
1428 (ext4_lblk_t
)le32_to_cpu(newext
->ee_block
),
1431 err
= PTR_ERR(path
);
1436 * only first (depth 0 -> 1) produces free space;
1437 * in all other cases we have to split the grown tree
1439 depth
= ext_depth(inode
);
1440 if (path
[depth
].p_hdr
->eh_entries
== path
[depth
].p_hdr
->eh_max
) {
1441 /* now we need to split */
1451 * search the closest allocated block to the left for *logical
1452 * and returns it at @logical + it's physical address at @phys
1453 * if *logical is the smallest allocated block, the function
1454 * returns 0 at @phys
1455 * return value contains 0 (success) or error code
1457 static int ext4_ext_search_left(struct inode
*inode
,
1458 struct ext4_ext_path
*path
,
1459 ext4_lblk_t
*logical
, ext4_fsblk_t
*phys
)
1461 struct ext4_extent_idx
*ix
;
1462 struct ext4_extent
*ex
;
1465 if (unlikely(path
== NULL
)) {
1466 EXT4_ERROR_INODE(inode
, "path == NULL *logical %d!", *logical
);
1467 return -EFSCORRUPTED
;
1469 depth
= path
->p_depth
;
1472 if (depth
== 0 && path
->p_ext
== NULL
)
1475 /* usually extent in the path covers blocks smaller
1476 * then *logical, but it can be that extent is the
1477 * first one in the file */
1479 ex
= path
[depth
].p_ext
;
1480 ee_len
= ext4_ext_get_actual_len(ex
);
1481 if (*logical
< le32_to_cpu(ex
->ee_block
)) {
1482 if (unlikely(EXT_FIRST_EXTENT(path
[depth
].p_hdr
) != ex
)) {
1483 EXT4_ERROR_INODE(inode
,
1484 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1485 *logical
, le32_to_cpu(ex
->ee_block
));
1486 return -EFSCORRUPTED
;
1488 while (--depth
>= 0) {
1489 ix
= path
[depth
].p_idx
;
1490 if (unlikely(ix
!= EXT_FIRST_INDEX(path
[depth
].p_hdr
))) {
1491 EXT4_ERROR_INODE(inode
,
1492 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1493 ix
!= NULL
? le32_to_cpu(ix
->ei_block
) : 0,
1494 EXT_FIRST_INDEX(path
[depth
].p_hdr
) != NULL
?
1495 le32_to_cpu(EXT_FIRST_INDEX(path
[depth
].p_hdr
)->ei_block
) : 0,
1497 return -EFSCORRUPTED
;
1503 if (unlikely(*logical
< (le32_to_cpu(ex
->ee_block
) + ee_len
))) {
1504 EXT4_ERROR_INODE(inode
,
1505 "logical %d < ee_block %d + ee_len %d!",
1506 *logical
, le32_to_cpu(ex
->ee_block
), ee_len
);
1507 return -EFSCORRUPTED
;
1510 *logical
= le32_to_cpu(ex
->ee_block
) + ee_len
- 1;
1511 *phys
= ext4_ext_pblock(ex
) + ee_len
- 1;
1516 * search the closest allocated block to the right for *logical
1517 * and returns it at @logical + it's physical address at @phys
1518 * if *logical is the largest allocated block, the function
1519 * returns 0 at @phys
1520 * return value contains 0 (success) or error code
1522 static int ext4_ext_search_right(struct inode
*inode
,
1523 struct ext4_ext_path
*path
,
1524 ext4_lblk_t
*logical
, ext4_fsblk_t
*phys
,
1525 struct ext4_extent
**ret_ex
)
1527 struct buffer_head
*bh
= NULL
;
1528 struct ext4_extent_header
*eh
;
1529 struct ext4_extent_idx
*ix
;
1530 struct ext4_extent
*ex
;
1532 int depth
; /* Note, NOT eh_depth; depth from top of tree */
1535 if (unlikely(path
== NULL
)) {
1536 EXT4_ERROR_INODE(inode
, "path == NULL *logical %d!", *logical
);
1537 return -EFSCORRUPTED
;
1539 depth
= path
->p_depth
;
1542 if (depth
== 0 && path
->p_ext
== NULL
)
1545 /* usually extent in the path covers blocks smaller
1546 * then *logical, but it can be that extent is the
1547 * first one in the file */
1549 ex
= path
[depth
].p_ext
;
1550 ee_len
= ext4_ext_get_actual_len(ex
);
1551 if (*logical
< le32_to_cpu(ex
->ee_block
)) {
1552 if (unlikely(EXT_FIRST_EXTENT(path
[depth
].p_hdr
) != ex
)) {
1553 EXT4_ERROR_INODE(inode
,
1554 "first_extent(path[%d].p_hdr) != ex",
1556 return -EFSCORRUPTED
;
1558 while (--depth
>= 0) {
1559 ix
= path
[depth
].p_idx
;
1560 if (unlikely(ix
!= EXT_FIRST_INDEX(path
[depth
].p_hdr
))) {
1561 EXT4_ERROR_INODE(inode
,
1562 "ix != EXT_FIRST_INDEX *logical %d!",
1564 return -EFSCORRUPTED
;
1570 if (unlikely(*logical
< (le32_to_cpu(ex
->ee_block
) + ee_len
))) {
1571 EXT4_ERROR_INODE(inode
,
1572 "logical %d < ee_block %d + ee_len %d!",
1573 *logical
, le32_to_cpu(ex
->ee_block
), ee_len
);
1574 return -EFSCORRUPTED
;
1577 if (ex
!= EXT_LAST_EXTENT(path
[depth
].p_hdr
)) {
1578 /* next allocated block in this leaf */
1583 /* go up and search for index to the right */
1584 while (--depth
>= 0) {
1585 ix
= path
[depth
].p_idx
;
1586 if (ix
!= EXT_LAST_INDEX(path
[depth
].p_hdr
))
1590 /* we've gone up to the root and found no index to the right */
1594 /* we've found index to the right, let's
1595 * follow it and find the closest allocated
1596 * block to the right */
1598 block
= ext4_idx_pblock(ix
);
1599 while (++depth
< path
->p_depth
) {
1600 /* subtract from p_depth to get proper eh_depth */
1601 bh
= read_extent_tree_block(inode
, block
,
1602 path
->p_depth
- depth
, 0);
1605 eh
= ext_block_hdr(bh
);
1606 ix
= EXT_FIRST_INDEX(eh
);
1607 block
= ext4_idx_pblock(ix
);
1611 bh
= read_extent_tree_block(inode
, block
, path
->p_depth
- depth
, 0);
1614 eh
= ext_block_hdr(bh
);
1615 ex
= EXT_FIRST_EXTENT(eh
);
1617 *logical
= le32_to_cpu(ex
->ee_block
);
1618 *phys
= ext4_ext_pblock(ex
);
1626 * ext4_ext_next_allocated_block:
1627 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1628 * NOTE: it considers block number from index entry as
1629 * allocated block. Thus, index entries have to be consistent
1633 ext4_ext_next_allocated_block(struct ext4_ext_path
*path
)
1637 BUG_ON(path
== NULL
);
1638 depth
= path
->p_depth
;
1640 if (depth
== 0 && path
->p_ext
== NULL
)
1641 return EXT_MAX_BLOCKS
;
1643 while (depth
>= 0) {
1644 if (depth
== path
->p_depth
) {
1646 if (path
[depth
].p_ext
&&
1647 path
[depth
].p_ext
!=
1648 EXT_LAST_EXTENT(path
[depth
].p_hdr
))
1649 return le32_to_cpu(path
[depth
].p_ext
[1].ee_block
);
1652 if (path
[depth
].p_idx
!=
1653 EXT_LAST_INDEX(path
[depth
].p_hdr
))
1654 return le32_to_cpu(path
[depth
].p_idx
[1].ei_block
);
1659 return EXT_MAX_BLOCKS
;
1663 * ext4_ext_next_leaf_block:
1664 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1666 static ext4_lblk_t
ext4_ext_next_leaf_block(struct ext4_ext_path
*path
)
1670 BUG_ON(path
== NULL
);
1671 depth
= path
->p_depth
;
1673 /* zero-tree has no leaf blocks at all */
1675 return EXT_MAX_BLOCKS
;
1677 /* go to index block */
1680 while (depth
>= 0) {
1681 if (path
[depth
].p_idx
!=
1682 EXT_LAST_INDEX(path
[depth
].p_hdr
))
1683 return (ext4_lblk_t
)
1684 le32_to_cpu(path
[depth
].p_idx
[1].ei_block
);
1688 return EXT_MAX_BLOCKS
;
1692 * ext4_ext_correct_indexes:
1693 * if leaf gets modified and modified extent is first in the leaf,
1694 * then we have to correct all indexes above.
1695 * TODO: do we need to correct tree in all cases?
1697 static int ext4_ext_correct_indexes(handle_t
*handle
, struct inode
*inode
,
1698 struct ext4_ext_path
*path
)
1700 struct ext4_extent_header
*eh
;
1701 int depth
= ext_depth(inode
);
1702 struct ext4_extent
*ex
;
1706 eh
= path
[depth
].p_hdr
;
1707 ex
= path
[depth
].p_ext
;
1709 if (unlikely(ex
== NULL
|| eh
== NULL
)) {
1710 EXT4_ERROR_INODE(inode
,
1711 "ex %p == NULL or eh %p == NULL", ex
, eh
);
1712 return -EFSCORRUPTED
;
1716 /* there is no tree at all */
1720 if (ex
!= EXT_FIRST_EXTENT(eh
)) {
1721 /* we correct tree if first leaf got modified only */
1726 * TODO: we need correction if border is smaller than current one
1729 border
= path
[depth
].p_ext
->ee_block
;
1730 err
= ext4_ext_get_access(handle
, inode
, path
+ k
);
1733 path
[k
].p_idx
->ei_block
= border
;
1734 err
= ext4_ext_dirty(handle
, inode
, path
+ k
);
1739 /* change all left-side indexes */
1740 if (path
[k
+1].p_idx
!= EXT_FIRST_INDEX(path
[k
+1].p_hdr
))
1742 err
= ext4_ext_get_access(handle
, inode
, path
+ k
);
1745 path
[k
].p_idx
->ei_block
= border
;
1746 err
= ext4_ext_dirty(handle
, inode
, path
+ k
);
1755 ext4_can_extents_be_merged(struct inode
*inode
, struct ext4_extent
*ex1
,
1756 struct ext4_extent
*ex2
)
1758 unsigned short ext1_ee_len
, ext2_ee_len
;
1760 if (ext4_ext_is_unwritten(ex1
) != ext4_ext_is_unwritten(ex2
))
1763 ext1_ee_len
= ext4_ext_get_actual_len(ex1
);
1764 ext2_ee_len
= ext4_ext_get_actual_len(ex2
);
1766 if (le32_to_cpu(ex1
->ee_block
) + ext1_ee_len
!=
1767 le32_to_cpu(ex2
->ee_block
))
1771 * To allow future support for preallocated extents to be added
1772 * as an RO_COMPAT feature, refuse to merge to extents if
1773 * this can result in the top bit of ee_len being set.
1775 if (ext1_ee_len
+ ext2_ee_len
> EXT_INIT_MAX_LEN
)
1777 if (ext4_ext_is_unwritten(ex1
) &&
1778 (ext4_test_inode_state(inode
, EXT4_STATE_DIO_UNWRITTEN
) ||
1779 atomic_read(&EXT4_I(inode
)->i_unwritten
) ||
1780 (ext1_ee_len
+ ext2_ee_len
> EXT_UNWRITTEN_MAX_LEN
)))
1782 #ifdef AGGRESSIVE_TEST
1783 if (ext1_ee_len
>= 4)
1787 if (ext4_ext_pblock(ex1
) + ext1_ee_len
== ext4_ext_pblock(ex2
))
1793 * This function tries to merge the "ex" extent to the next extent in the tree.
1794 * It always tries to merge towards right. If you want to merge towards
1795 * left, pass "ex - 1" as argument instead of "ex".
1796 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1797 * 1 if they got merged.
1799 static int ext4_ext_try_to_merge_right(struct inode
*inode
,
1800 struct ext4_ext_path
*path
,
1801 struct ext4_extent
*ex
)
1803 struct ext4_extent_header
*eh
;
1804 unsigned int depth
, len
;
1805 int merge_done
= 0, unwritten
;
1807 depth
= ext_depth(inode
);
1808 BUG_ON(path
[depth
].p_hdr
== NULL
);
1809 eh
= path
[depth
].p_hdr
;
1811 while (ex
< EXT_LAST_EXTENT(eh
)) {
1812 if (!ext4_can_extents_be_merged(inode
, ex
, ex
+ 1))
1814 /* merge with next extent! */
1815 unwritten
= ext4_ext_is_unwritten(ex
);
1816 ex
->ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
)
1817 + ext4_ext_get_actual_len(ex
+ 1));
1819 ext4_ext_mark_unwritten(ex
);
1821 if (ex
+ 1 < EXT_LAST_EXTENT(eh
)) {
1822 len
= (EXT_LAST_EXTENT(eh
) - ex
- 1)
1823 * sizeof(struct ext4_extent
);
1824 memmove(ex
+ 1, ex
+ 2, len
);
1826 le16_add_cpu(&eh
->eh_entries
, -1);
1828 WARN_ON(eh
->eh_entries
== 0);
1829 if (!eh
->eh_entries
)
1830 EXT4_ERROR_INODE(inode
, "eh->eh_entries = 0!");
1837 * This function does a very simple check to see if we can collapse
1838 * an extent tree with a single extent tree leaf block into the inode.
1840 static void ext4_ext_try_to_merge_up(handle_t
*handle
,
1841 struct inode
*inode
,
1842 struct ext4_ext_path
*path
)
1845 unsigned max_root
= ext4_ext_space_root(inode
, 0);
1848 if ((path
[0].p_depth
!= 1) ||
1849 (le16_to_cpu(path
[0].p_hdr
->eh_entries
) != 1) ||
1850 (le16_to_cpu(path
[1].p_hdr
->eh_entries
) > max_root
))
1854 * We need to modify the block allocation bitmap and the block
1855 * group descriptor to release the extent tree block. If we
1856 * can't get the journal credits, give up.
1858 if (ext4_journal_extend(handle
, 2))
1862 * Copy the extent data up to the inode
1864 blk
= ext4_idx_pblock(path
[0].p_idx
);
1865 s
= le16_to_cpu(path
[1].p_hdr
->eh_entries
) *
1866 sizeof(struct ext4_extent_idx
);
1867 s
+= sizeof(struct ext4_extent_header
);
1869 path
[1].p_maxdepth
= path
[0].p_maxdepth
;
1870 memcpy(path
[0].p_hdr
, path
[1].p_hdr
, s
);
1871 path
[0].p_depth
= 0;
1872 path
[0].p_ext
= EXT_FIRST_EXTENT(path
[0].p_hdr
) +
1873 (path
[1].p_ext
- EXT_FIRST_EXTENT(path
[1].p_hdr
));
1874 path
[0].p_hdr
->eh_max
= cpu_to_le16(max_root
);
1876 brelse(path
[1].p_bh
);
1877 ext4_free_blocks(handle
, inode
, NULL
, blk
, 1,
1878 EXT4_FREE_BLOCKS_METADATA
| EXT4_FREE_BLOCKS_FORGET
);
1882 * This function tries to merge the @ex extent to neighbours in the tree.
1883 * return 1 if merge left else 0.
1885 static void ext4_ext_try_to_merge(handle_t
*handle
,
1886 struct inode
*inode
,
1887 struct ext4_ext_path
*path
,
1888 struct ext4_extent
*ex
) {
1889 struct ext4_extent_header
*eh
;
1893 depth
= ext_depth(inode
);
1894 BUG_ON(path
[depth
].p_hdr
== NULL
);
1895 eh
= path
[depth
].p_hdr
;
1897 if (ex
> EXT_FIRST_EXTENT(eh
))
1898 merge_done
= ext4_ext_try_to_merge_right(inode
, path
, ex
- 1);
1901 (void) ext4_ext_try_to_merge_right(inode
, path
, ex
);
1903 ext4_ext_try_to_merge_up(handle
, inode
, path
);
1907 * check if a portion of the "newext" extent overlaps with an
1910 * If there is an overlap discovered, it updates the length of the newext
1911 * such that there will be no overlap, and then returns 1.
1912 * If there is no overlap found, it returns 0.
1914 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info
*sbi
,
1915 struct inode
*inode
,
1916 struct ext4_extent
*newext
,
1917 struct ext4_ext_path
*path
)
1920 unsigned int depth
, len1
;
1921 unsigned int ret
= 0;
1923 b1
= le32_to_cpu(newext
->ee_block
);
1924 len1
= ext4_ext_get_actual_len(newext
);
1925 depth
= ext_depth(inode
);
1926 if (!path
[depth
].p_ext
)
1928 b2
= EXT4_LBLK_CMASK(sbi
, le32_to_cpu(path
[depth
].p_ext
->ee_block
));
1931 * get the next allocated block if the extent in the path
1932 * is before the requested block(s)
1935 b2
= ext4_ext_next_allocated_block(path
);
1936 if (b2
== EXT_MAX_BLOCKS
)
1938 b2
= EXT4_LBLK_CMASK(sbi
, b2
);
1941 /* check for wrap through zero on extent logical start block*/
1942 if (b1
+ len1
< b1
) {
1943 len1
= EXT_MAX_BLOCKS
- b1
;
1944 newext
->ee_len
= cpu_to_le16(len1
);
1948 /* check for overlap */
1949 if (b1
+ len1
> b2
) {
1950 newext
->ee_len
= cpu_to_le16(b2
- b1
);
1958 * ext4_ext_insert_extent:
1959 * tries to merge requsted extent into the existing extent or
1960 * inserts requested extent as new one into the tree,
1961 * creating new leaf in the no-space case.
1963 int ext4_ext_insert_extent(handle_t
*handle
, struct inode
*inode
,
1964 struct ext4_ext_path
**ppath
,
1965 struct ext4_extent
*newext
, int gb_flags
)
1967 struct ext4_ext_path
*path
= *ppath
;
1968 struct ext4_extent_header
*eh
;
1969 struct ext4_extent
*ex
, *fex
;
1970 struct ext4_extent
*nearex
; /* nearest extent */
1971 struct ext4_ext_path
*npath
= NULL
;
1972 int depth
, len
, err
;
1974 int mb_flags
= 0, unwritten
;
1976 if (gb_flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
)
1977 mb_flags
|= EXT4_MB_DELALLOC_RESERVED
;
1978 if (unlikely(ext4_ext_get_actual_len(newext
) == 0)) {
1979 EXT4_ERROR_INODE(inode
, "ext4_ext_get_actual_len(newext) == 0");
1980 return -EFSCORRUPTED
;
1982 depth
= ext_depth(inode
);
1983 ex
= path
[depth
].p_ext
;
1984 eh
= path
[depth
].p_hdr
;
1985 if (unlikely(path
[depth
].p_hdr
== NULL
)) {
1986 EXT4_ERROR_INODE(inode
, "path[%d].p_hdr == NULL", depth
);
1987 return -EFSCORRUPTED
;
1990 /* try to insert block into found extent and return */
1991 if (ex
&& !(gb_flags
& EXT4_GET_BLOCKS_PRE_IO
)) {
1994 * Try to see whether we should rather test the extent on
1995 * right from ex, or from the left of ex. This is because
1996 * ext4_find_extent() can return either extent on the
1997 * left, or on the right from the searched position. This
1998 * will make merging more effective.
2000 if (ex
< EXT_LAST_EXTENT(eh
) &&
2001 (le32_to_cpu(ex
->ee_block
) +
2002 ext4_ext_get_actual_len(ex
) <
2003 le32_to_cpu(newext
->ee_block
))) {
2006 } else if ((ex
> EXT_FIRST_EXTENT(eh
)) &&
2007 (le32_to_cpu(newext
->ee_block
) +
2008 ext4_ext_get_actual_len(newext
) <
2009 le32_to_cpu(ex
->ee_block
)))
2012 /* Try to append newex to the ex */
2013 if (ext4_can_extents_be_merged(inode
, ex
, newext
)) {
2014 ext_debug("append [%d]%d block to %u:[%d]%d"
2016 ext4_ext_is_unwritten(newext
),
2017 ext4_ext_get_actual_len(newext
),
2018 le32_to_cpu(ex
->ee_block
),
2019 ext4_ext_is_unwritten(ex
),
2020 ext4_ext_get_actual_len(ex
),
2021 ext4_ext_pblock(ex
));
2022 err
= ext4_ext_get_access(handle
, inode
,
2026 unwritten
= ext4_ext_is_unwritten(ex
);
2027 ex
->ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
)
2028 + ext4_ext_get_actual_len(newext
));
2030 ext4_ext_mark_unwritten(ex
);
2031 eh
= path
[depth
].p_hdr
;
2037 /* Try to prepend newex to the ex */
2038 if (ext4_can_extents_be_merged(inode
, newext
, ex
)) {
2039 ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
2041 le32_to_cpu(newext
->ee_block
),
2042 ext4_ext_is_unwritten(newext
),
2043 ext4_ext_get_actual_len(newext
),
2044 le32_to_cpu(ex
->ee_block
),
2045 ext4_ext_is_unwritten(ex
),
2046 ext4_ext_get_actual_len(ex
),
2047 ext4_ext_pblock(ex
));
2048 err
= ext4_ext_get_access(handle
, inode
,
2053 unwritten
= ext4_ext_is_unwritten(ex
);
2054 ex
->ee_block
= newext
->ee_block
;
2055 ext4_ext_store_pblock(ex
, ext4_ext_pblock(newext
));
2056 ex
->ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
)
2057 + ext4_ext_get_actual_len(newext
));
2059 ext4_ext_mark_unwritten(ex
);
2060 eh
= path
[depth
].p_hdr
;
2066 depth
= ext_depth(inode
);
2067 eh
= path
[depth
].p_hdr
;
2068 if (le16_to_cpu(eh
->eh_entries
) < le16_to_cpu(eh
->eh_max
))
2071 /* probably next leaf has space for us? */
2072 fex
= EXT_LAST_EXTENT(eh
);
2073 next
= EXT_MAX_BLOCKS
;
2074 if (le32_to_cpu(newext
->ee_block
) > le32_to_cpu(fex
->ee_block
))
2075 next
= ext4_ext_next_leaf_block(path
);
2076 if (next
!= EXT_MAX_BLOCKS
) {
2077 ext_debug("next leaf block - %u\n", next
);
2078 BUG_ON(npath
!= NULL
);
2079 npath
= ext4_find_extent(inode
, next
, NULL
, 0);
2081 return PTR_ERR(npath
);
2082 BUG_ON(npath
->p_depth
!= path
->p_depth
);
2083 eh
= npath
[depth
].p_hdr
;
2084 if (le16_to_cpu(eh
->eh_entries
) < le16_to_cpu(eh
->eh_max
)) {
2085 ext_debug("next leaf isn't full(%d)\n",
2086 le16_to_cpu(eh
->eh_entries
));
2090 ext_debug("next leaf has no free space(%d,%d)\n",
2091 le16_to_cpu(eh
->eh_entries
), le16_to_cpu(eh
->eh_max
));
2095 * There is no free space in the found leaf.
2096 * We're gonna add a new leaf in the tree.
2098 if (gb_flags
& EXT4_GET_BLOCKS_METADATA_NOFAIL
)
2099 mb_flags
|= EXT4_MB_USE_RESERVED
;
2100 err
= ext4_ext_create_new_leaf(handle
, inode
, mb_flags
, gb_flags
,
2104 depth
= ext_depth(inode
);
2105 eh
= path
[depth
].p_hdr
;
2108 nearex
= path
[depth
].p_ext
;
2110 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
2115 /* there is no extent in this leaf, create first one */
2116 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
2117 le32_to_cpu(newext
->ee_block
),
2118 ext4_ext_pblock(newext
),
2119 ext4_ext_is_unwritten(newext
),
2120 ext4_ext_get_actual_len(newext
));
2121 nearex
= EXT_FIRST_EXTENT(eh
);
2123 if (le32_to_cpu(newext
->ee_block
)
2124 > le32_to_cpu(nearex
->ee_block
)) {
2126 ext_debug("insert %u:%llu:[%d]%d before: "
2128 le32_to_cpu(newext
->ee_block
),
2129 ext4_ext_pblock(newext
),
2130 ext4_ext_is_unwritten(newext
),
2131 ext4_ext_get_actual_len(newext
),
2136 BUG_ON(newext
->ee_block
== nearex
->ee_block
);
2137 ext_debug("insert %u:%llu:[%d]%d after: "
2139 le32_to_cpu(newext
->ee_block
),
2140 ext4_ext_pblock(newext
),
2141 ext4_ext_is_unwritten(newext
),
2142 ext4_ext_get_actual_len(newext
),
2145 len
= EXT_LAST_EXTENT(eh
) - nearex
+ 1;
2147 ext_debug("insert %u:%llu:[%d]%d: "
2148 "move %d extents from 0x%p to 0x%p\n",
2149 le32_to_cpu(newext
->ee_block
),
2150 ext4_ext_pblock(newext
),
2151 ext4_ext_is_unwritten(newext
),
2152 ext4_ext_get_actual_len(newext
),
2153 len
, nearex
, nearex
+ 1);
2154 memmove(nearex
+ 1, nearex
,
2155 len
* sizeof(struct ext4_extent
));
2159 le16_add_cpu(&eh
->eh_entries
, 1);
2160 path
[depth
].p_ext
= nearex
;
2161 nearex
->ee_block
= newext
->ee_block
;
2162 ext4_ext_store_pblock(nearex
, ext4_ext_pblock(newext
));
2163 nearex
->ee_len
= newext
->ee_len
;
2166 /* try to merge extents */
2167 if (!(gb_flags
& EXT4_GET_BLOCKS_PRE_IO
))
2168 ext4_ext_try_to_merge(handle
, inode
, path
, nearex
);
2171 /* time to correct all indexes above */
2172 err
= ext4_ext_correct_indexes(handle
, inode
, path
);
2176 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
2179 ext4_ext_drop_refs(npath
);
2184 static int ext4_fill_fiemap_extents(struct inode
*inode
,
2185 ext4_lblk_t block
, ext4_lblk_t num
,
2186 struct fiemap_extent_info
*fieinfo
)
2188 struct ext4_ext_path
*path
= NULL
;
2189 struct ext4_extent
*ex
;
2190 struct extent_status es
;
2191 ext4_lblk_t next
, next_del
, start
= 0, end
= 0;
2192 ext4_lblk_t last
= block
+ num
;
2193 int exists
, depth
= 0, err
= 0;
2194 unsigned int flags
= 0;
2195 unsigned char blksize_bits
= inode
->i_sb
->s_blocksize_bits
;
2197 while (block
< last
&& block
!= EXT_MAX_BLOCKS
) {
2199 /* find extent for this block */
2200 down_read(&EXT4_I(inode
)->i_data_sem
);
2202 path
= ext4_find_extent(inode
, block
, &path
, 0);
2204 up_read(&EXT4_I(inode
)->i_data_sem
);
2205 err
= PTR_ERR(path
);
2210 depth
= ext_depth(inode
);
2211 if (unlikely(path
[depth
].p_hdr
== NULL
)) {
2212 up_read(&EXT4_I(inode
)->i_data_sem
);
2213 EXT4_ERROR_INODE(inode
, "path[%d].p_hdr == NULL", depth
);
2214 err
= -EFSCORRUPTED
;
2217 ex
= path
[depth
].p_ext
;
2218 next
= ext4_ext_next_allocated_block(path
);
2223 /* there is no extent yet, so try to allocate
2224 * all requested space */
2227 } else if (le32_to_cpu(ex
->ee_block
) > block
) {
2228 /* need to allocate space before found extent */
2230 end
= le32_to_cpu(ex
->ee_block
);
2231 if (block
+ num
< end
)
2233 } else if (block
>= le32_to_cpu(ex
->ee_block
)
2234 + ext4_ext_get_actual_len(ex
)) {
2235 /* need to allocate space after found extent */
2240 } else if (block
>= le32_to_cpu(ex
->ee_block
)) {
2242 * some part of requested space is covered
2246 end
= le32_to_cpu(ex
->ee_block
)
2247 + ext4_ext_get_actual_len(ex
);
2248 if (block
+ num
< end
)
2254 BUG_ON(end
<= start
);
2258 es
.es_len
= end
- start
;
2261 es
.es_lblk
= le32_to_cpu(ex
->ee_block
);
2262 es
.es_len
= ext4_ext_get_actual_len(ex
);
2263 es
.es_pblk
= ext4_ext_pblock(ex
);
2264 if (ext4_ext_is_unwritten(ex
))
2265 flags
|= FIEMAP_EXTENT_UNWRITTEN
;
2269 * Find delayed extent and update es accordingly. We call
2270 * it even in !exists case to find out whether es is the
2271 * last existing extent or not.
2273 next_del
= ext4_find_delayed_extent(inode
, &es
);
2274 if (!exists
&& next_del
) {
2276 flags
|= (FIEMAP_EXTENT_DELALLOC
|
2277 FIEMAP_EXTENT_UNKNOWN
);
2279 up_read(&EXT4_I(inode
)->i_data_sem
);
2281 if (unlikely(es
.es_len
== 0)) {
2282 EXT4_ERROR_INODE(inode
, "es.es_len == 0");
2283 err
= -EFSCORRUPTED
;
2288 * This is possible iff next == next_del == EXT_MAX_BLOCKS.
2289 * we need to check next == EXT_MAX_BLOCKS because it is
2290 * possible that an extent is with unwritten and delayed
2291 * status due to when an extent is delayed allocated and
2292 * is allocated by fallocate status tree will track both of
2295 * So we could return a unwritten and delayed extent, and
2296 * its block is equal to 'next'.
2298 if (next
== next_del
&& next
== EXT_MAX_BLOCKS
) {
2299 flags
|= FIEMAP_EXTENT_LAST
;
2300 if (unlikely(next_del
!= EXT_MAX_BLOCKS
||
2301 next
!= EXT_MAX_BLOCKS
)) {
2302 EXT4_ERROR_INODE(inode
,
2303 "next extent == %u, next "
2304 "delalloc extent = %u",
2306 err
= -EFSCORRUPTED
;
2312 err
= fiemap_fill_next_extent(fieinfo
,
2313 (__u64
)es
.es_lblk
<< blksize_bits
,
2314 (__u64
)es
.es_pblk
<< blksize_bits
,
2315 (__u64
)es
.es_len
<< blksize_bits
,
2325 block
= es
.es_lblk
+ es
.es_len
;
2328 ext4_ext_drop_refs(path
);
2334 * ext4_ext_put_gap_in_cache:
2335 * calculate boundaries of the gap that the requested block fits into
2336 * and cache this gap
2339 ext4_ext_put_gap_in_cache(struct inode
*inode
, struct ext4_ext_path
*path
,
2342 int depth
= ext_depth(inode
);
2345 struct ext4_extent
*ex
;
2346 struct extent_status es
;
2348 ex
= path
[depth
].p_ext
;
2350 /* there is no extent yet, so gap is [0;-] */
2352 len
= EXT_MAX_BLOCKS
;
2353 ext_debug("cache gap(whole file):");
2354 } else if (block
< le32_to_cpu(ex
->ee_block
)) {
2356 len
= le32_to_cpu(ex
->ee_block
) - block
;
2357 ext_debug("cache gap(before): %u [%u:%u]",
2359 le32_to_cpu(ex
->ee_block
),
2360 ext4_ext_get_actual_len(ex
));
2361 } else if (block
>= le32_to_cpu(ex
->ee_block
)
2362 + ext4_ext_get_actual_len(ex
)) {
2364 lblock
= le32_to_cpu(ex
->ee_block
)
2365 + ext4_ext_get_actual_len(ex
);
2367 next
= ext4_ext_next_allocated_block(path
);
2368 ext_debug("cache gap(after): [%u:%u] %u",
2369 le32_to_cpu(ex
->ee_block
),
2370 ext4_ext_get_actual_len(ex
),
2372 BUG_ON(next
== lblock
);
2373 len
= next
- lblock
;
2378 ext4_es_find_delayed_extent_range(inode
, lblock
, lblock
+ len
- 1, &es
);
2380 /* There's delayed extent containing lblock? */
2381 if (es
.es_lblk
<= lblock
)
2383 len
= min(es
.es_lblk
- lblock
, len
);
2385 ext_debug(" -> %u:%u\n", lblock
, len
);
2386 ext4_es_insert_extent(inode
, lblock
, len
, ~0, EXTENT_STATUS_HOLE
);
2391 * removes index from the index block.
2393 static int ext4_ext_rm_idx(handle_t
*handle
, struct inode
*inode
,
2394 struct ext4_ext_path
*path
, int depth
)
2399 /* free index block */
2401 path
= path
+ depth
;
2402 leaf
= ext4_idx_pblock(path
->p_idx
);
2403 if (unlikely(path
->p_hdr
->eh_entries
== 0)) {
2404 EXT4_ERROR_INODE(inode
, "path->p_hdr->eh_entries == 0");
2405 return -EFSCORRUPTED
;
2407 err
= ext4_ext_get_access(handle
, inode
, path
);
2411 if (path
->p_idx
!= EXT_LAST_INDEX(path
->p_hdr
)) {
2412 int len
= EXT_LAST_INDEX(path
->p_hdr
) - path
->p_idx
;
2413 len
*= sizeof(struct ext4_extent_idx
);
2414 memmove(path
->p_idx
, path
->p_idx
+ 1, len
);
2417 le16_add_cpu(&path
->p_hdr
->eh_entries
, -1);
2418 err
= ext4_ext_dirty(handle
, inode
, path
);
2421 ext_debug("index is empty, remove it, free block %llu\n", leaf
);
2422 trace_ext4_ext_rm_idx(inode
, leaf
);
2424 ext4_free_blocks(handle
, inode
, NULL
, leaf
, 1,
2425 EXT4_FREE_BLOCKS_METADATA
| EXT4_FREE_BLOCKS_FORGET
);
2427 while (--depth
>= 0) {
2428 if (path
->p_idx
!= EXT_FIRST_INDEX(path
->p_hdr
))
2431 err
= ext4_ext_get_access(handle
, inode
, path
);
2434 path
->p_idx
->ei_block
= (path
+1)->p_idx
->ei_block
;
2435 err
= ext4_ext_dirty(handle
, inode
, path
);
2443 * ext4_ext_calc_credits_for_single_extent:
2444 * This routine returns max. credits that needed to insert an extent
2445 * to the extent tree.
2446 * When pass the actual path, the caller should calculate credits
2449 int ext4_ext_calc_credits_for_single_extent(struct inode
*inode
, int nrblocks
,
2450 struct ext4_ext_path
*path
)
2453 int depth
= ext_depth(inode
);
2456 /* probably there is space in leaf? */
2457 if (le16_to_cpu(path
[depth
].p_hdr
->eh_entries
)
2458 < le16_to_cpu(path
[depth
].p_hdr
->eh_max
)) {
2461 * There are some space in the leaf tree, no
2462 * need to account for leaf block credit
2464 * bitmaps and block group descriptor blocks
2465 * and other metadata blocks still need to be
2468 /* 1 bitmap, 1 block group descriptor */
2469 ret
= 2 + EXT4_META_TRANS_BLOCKS(inode
->i_sb
);
2474 return ext4_chunk_trans_blocks(inode
, nrblocks
);
2478 * How many index/leaf blocks need to change/allocate to add @extents extents?
2480 * If we add a single extent, then in the worse case, each tree level
2481 * index/leaf need to be changed in case of the tree split.
2483 * If more extents are inserted, they could cause the whole tree split more
2484 * than once, but this is really rare.
2486 int ext4_ext_index_trans_blocks(struct inode
*inode
, int extents
)
2491 /* If we are converting the inline data, only one is needed here. */
2492 if (ext4_has_inline_data(inode
))
2495 depth
= ext_depth(inode
);
2505 static inline int get_default_free_blocks_flags(struct inode
*inode
)
2507 if (S_ISDIR(inode
->i_mode
) || S_ISLNK(inode
->i_mode
))
2508 return EXT4_FREE_BLOCKS_METADATA
| EXT4_FREE_BLOCKS_FORGET
;
2509 else if (ext4_should_journal_data(inode
))
2510 return EXT4_FREE_BLOCKS_FORGET
;
2514 static int ext4_remove_blocks(handle_t
*handle
, struct inode
*inode
,
2515 struct ext4_extent
*ex
,
2516 long long *partial_cluster
,
2517 ext4_lblk_t from
, ext4_lblk_t to
)
2519 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2520 unsigned short ee_len
= ext4_ext_get_actual_len(ex
);
2522 int flags
= get_default_free_blocks_flags(inode
);
2525 * For bigalloc file systems, we never free a partial cluster
2526 * at the beginning of the extent. Instead, we make a note
2527 * that we tried freeing the cluster, and check to see if we
2528 * need to free it on a subsequent call to ext4_remove_blocks,
2529 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
2531 flags
|= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER
;
2533 trace_ext4_remove_blocks(inode
, ex
, from
, to
, *partial_cluster
);
2535 * If we have a partial cluster, and it's different from the
2536 * cluster of the last block, we need to explicitly free the
2537 * partial cluster here.
2539 pblk
= ext4_ext_pblock(ex
) + ee_len
- 1;
2540 if (*partial_cluster
> 0 &&
2541 *partial_cluster
!= (long long) EXT4_B2C(sbi
, pblk
)) {
2542 ext4_free_blocks(handle
, inode
, NULL
,
2543 EXT4_C2B(sbi
, *partial_cluster
),
2544 sbi
->s_cluster_ratio
, flags
);
2545 *partial_cluster
= 0;
2548 #ifdef EXTENTS_STATS
2550 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2551 spin_lock(&sbi
->s_ext_stats_lock
);
2552 sbi
->s_ext_blocks
+= ee_len
;
2553 sbi
->s_ext_extents
++;
2554 if (ee_len
< sbi
->s_ext_min
)
2555 sbi
->s_ext_min
= ee_len
;
2556 if (ee_len
> sbi
->s_ext_max
)
2557 sbi
->s_ext_max
= ee_len
;
2558 if (ext_depth(inode
) > sbi
->s_depth_max
)
2559 sbi
->s_depth_max
= ext_depth(inode
);
2560 spin_unlock(&sbi
->s_ext_stats_lock
);
2563 if (from
>= le32_to_cpu(ex
->ee_block
)
2564 && to
== le32_to_cpu(ex
->ee_block
) + ee_len
- 1) {
2567 long long first_cluster
;
2569 num
= le32_to_cpu(ex
->ee_block
) + ee_len
- from
;
2570 pblk
= ext4_ext_pblock(ex
) + ee_len
- num
;
2572 * Usually we want to free partial cluster at the end of the
2573 * extent, except for the situation when the cluster is still
2574 * used by any other extent (partial_cluster is negative).
2576 if (*partial_cluster
< 0 &&
2577 *partial_cluster
== -(long long) EXT4_B2C(sbi
, pblk
+num
-1))
2578 flags
|= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER
;
2580 ext_debug("free last %u blocks starting %llu partial %lld\n",
2581 num
, pblk
, *partial_cluster
);
2582 ext4_free_blocks(handle
, inode
, NULL
, pblk
, num
, flags
);
2584 * If the block range to be freed didn't start at the
2585 * beginning of a cluster, and we removed the entire
2586 * extent and the cluster is not used by any other extent,
2587 * save the partial cluster here, since we might need to
2588 * delete if we determine that the truncate or punch hole
2589 * operation has removed all of the blocks in the cluster.
2590 * If that cluster is used by another extent, preserve its
2591 * negative value so it isn't freed later on.
2593 * If the whole extent wasn't freed, we've reached the
2594 * start of the truncated/punched region and have finished
2595 * removing blocks. If there's a partial cluster here it's
2596 * shared with the remainder of the extent and is no longer
2597 * a candidate for removal.
2599 if (EXT4_PBLK_COFF(sbi
, pblk
) && ee_len
== num
) {
2600 first_cluster
= (long long) EXT4_B2C(sbi
, pblk
);
2601 if (first_cluster
!= -*partial_cluster
)
2602 *partial_cluster
= first_cluster
;
2604 *partial_cluster
= 0;
2607 ext4_error(sbi
->s_sb
, "strange request: removal(2) "
2608 "%u-%u from %u:%u\n",
2609 from
, to
, le32_to_cpu(ex
->ee_block
), ee_len
);
2615 * ext4_ext_rm_leaf() Removes the extents associated with the
2616 * blocks appearing between "start" and "end". Both "start"
2617 * and "end" must appear in the same extent or EIO is returned.
2619 * @handle: The journal handle
2620 * @inode: The files inode
2621 * @path: The path to the leaf
2622 * @partial_cluster: The cluster which we'll have to free if all extents
2623 * has been released from it. However, if this value is
2624 * negative, it's a cluster just to the right of the
2625 * punched region and it must not be freed.
2626 * @start: The first block to remove
2627 * @end: The last block to remove
2630 ext4_ext_rm_leaf(handle_t
*handle
, struct inode
*inode
,
2631 struct ext4_ext_path
*path
,
2632 long long *partial_cluster
,
2633 ext4_lblk_t start
, ext4_lblk_t end
)
2635 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2636 int err
= 0, correct_index
= 0;
2637 int depth
= ext_depth(inode
), credits
;
2638 struct ext4_extent_header
*eh
;
2641 ext4_lblk_t ex_ee_block
;
2642 unsigned short ex_ee_len
;
2643 unsigned unwritten
= 0;
2644 struct ext4_extent
*ex
;
2647 /* the header must be checked already in ext4_ext_remove_space() */
2648 ext_debug("truncate since %u in leaf to %u\n", start
, end
);
2649 if (!path
[depth
].p_hdr
)
2650 path
[depth
].p_hdr
= ext_block_hdr(path
[depth
].p_bh
);
2651 eh
= path
[depth
].p_hdr
;
2652 if (unlikely(path
[depth
].p_hdr
== NULL
)) {
2653 EXT4_ERROR_INODE(inode
, "path[%d].p_hdr == NULL", depth
);
2654 return -EFSCORRUPTED
;
2656 /* find where to start removing */
2657 ex
= path
[depth
].p_ext
;
2659 ex
= EXT_LAST_EXTENT(eh
);
2661 ex_ee_block
= le32_to_cpu(ex
->ee_block
);
2662 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2664 trace_ext4_ext_rm_leaf(inode
, start
, ex
, *partial_cluster
);
2666 while (ex
>= EXT_FIRST_EXTENT(eh
) &&
2667 ex_ee_block
+ ex_ee_len
> start
) {
2669 if (ext4_ext_is_unwritten(ex
))
2674 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block
,
2675 unwritten
, ex_ee_len
);
2676 path
[depth
].p_ext
= ex
;
2678 a
= ex_ee_block
> start
? ex_ee_block
: start
;
2679 b
= ex_ee_block
+ex_ee_len
- 1 < end
?
2680 ex_ee_block
+ex_ee_len
- 1 : end
;
2682 ext_debug(" border %u:%u\n", a
, b
);
2684 /* If this extent is beyond the end of the hole, skip it */
2685 if (end
< ex_ee_block
) {
2687 * We're going to skip this extent and move to another,
2688 * so note that its first cluster is in use to avoid
2689 * freeing it when removing blocks. Eventually, the
2690 * right edge of the truncated/punched region will
2691 * be just to the left.
2693 if (sbi
->s_cluster_ratio
> 1) {
2694 pblk
= ext4_ext_pblock(ex
);
2696 -(long long) EXT4_B2C(sbi
, pblk
);
2699 ex_ee_block
= le32_to_cpu(ex
->ee_block
);
2700 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2702 } else if (b
!= ex_ee_block
+ ex_ee_len
- 1) {
2703 EXT4_ERROR_INODE(inode
,
2704 "can not handle truncate %u:%u "
2706 start
, end
, ex_ee_block
,
2707 ex_ee_block
+ ex_ee_len
- 1);
2708 err
= -EFSCORRUPTED
;
2710 } else if (a
!= ex_ee_block
) {
2711 /* remove tail of the extent */
2712 num
= a
- ex_ee_block
;
2714 /* remove whole extent: excellent! */
2718 * 3 for leaf, sb, and inode plus 2 (bmap and group
2719 * descriptor) for each block group; assume two block
2720 * groups plus ex_ee_len/blocks_per_block_group for
2723 credits
= 7 + 2*(ex_ee_len
/EXT4_BLOCKS_PER_GROUP(inode
->i_sb
));
2724 if (ex
== EXT_FIRST_EXTENT(eh
)) {
2726 credits
+= (ext_depth(inode
)) + 1;
2728 credits
+= EXT4_MAXQUOTAS_TRANS_BLOCKS(inode
->i_sb
);
2730 err
= ext4_ext_truncate_extend_restart(handle
, inode
, credits
);
2734 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
2738 err
= ext4_remove_blocks(handle
, inode
, ex
, partial_cluster
,
2744 /* this extent is removed; mark slot entirely unused */
2745 ext4_ext_store_pblock(ex
, 0);
2747 ex
->ee_len
= cpu_to_le16(num
);
2749 * Do not mark unwritten if all the blocks in the
2750 * extent have been removed.
2752 if (unwritten
&& num
)
2753 ext4_ext_mark_unwritten(ex
);
2755 * If the extent was completely released,
2756 * we need to remove it from the leaf
2759 if (end
!= EXT_MAX_BLOCKS
- 1) {
2761 * For hole punching, we need to scoot all the
2762 * extents up when an extent is removed so that
2763 * we dont have blank extents in the middle
2765 memmove(ex
, ex
+1, (EXT_LAST_EXTENT(eh
) - ex
) *
2766 sizeof(struct ext4_extent
));
2768 /* Now get rid of the one at the end */
2769 memset(EXT_LAST_EXTENT(eh
), 0,
2770 sizeof(struct ext4_extent
));
2772 le16_add_cpu(&eh
->eh_entries
, -1);
2775 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
2779 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block
, num
,
2780 ext4_ext_pblock(ex
));
2782 ex_ee_block
= le32_to_cpu(ex
->ee_block
);
2783 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2786 if (correct_index
&& eh
->eh_entries
)
2787 err
= ext4_ext_correct_indexes(handle
, inode
, path
);
2790 * If there's a partial cluster and at least one extent remains in
2791 * the leaf, free the partial cluster if it isn't shared with the
2792 * current extent. If it is shared with the current extent
2793 * we zero partial_cluster because we've reached the start of the
2794 * truncated/punched region and we're done removing blocks.
2796 if (*partial_cluster
> 0 && ex
>= EXT_FIRST_EXTENT(eh
)) {
2797 pblk
= ext4_ext_pblock(ex
) + ex_ee_len
- 1;
2798 if (*partial_cluster
!= (long long) EXT4_B2C(sbi
, pblk
)) {
2799 ext4_free_blocks(handle
, inode
, NULL
,
2800 EXT4_C2B(sbi
, *partial_cluster
),
2801 sbi
->s_cluster_ratio
,
2802 get_default_free_blocks_flags(inode
));
2804 *partial_cluster
= 0;
2807 /* if this leaf is free, then we should
2808 * remove it from index block above */
2809 if (err
== 0 && eh
->eh_entries
== 0 && path
[depth
].p_bh
!= NULL
)
2810 err
= ext4_ext_rm_idx(handle
, inode
, path
, depth
);
2817 * ext4_ext_more_to_rm:
2818 * returns 1 if current index has to be freed (even partial)
2821 ext4_ext_more_to_rm(struct ext4_ext_path
*path
)
2823 BUG_ON(path
->p_idx
== NULL
);
2825 if (path
->p_idx
< EXT_FIRST_INDEX(path
->p_hdr
))
2829 * if truncate on deeper level happened, it wasn't partial,
2830 * so we have to consider current index for truncation
2832 if (le16_to_cpu(path
->p_hdr
->eh_entries
) == path
->p_block
)
2837 int ext4_ext_remove_space(struct inode
*inode
, ext4_lblk_t start
,
2840 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2841 int depth
= ext_depth(inode
);
2842 struct ext4_ext_path
*path
= NULL
;
2843 long long partial_cluster
= 0;
2847 ext_debug("truncate since %u to %u\n", start
, end
);
2849 /* probably first extent we're gonna free will be last in block */
2850 handle
= ext4_journal_start(inode
, EXT4_HT_TRUNCATE
, depth
+ 1);
2852 return PTR_ERR(handle
);
2855 trace_ext4_ext_remove_space(inode
, start
, end
, depth
);
2858 * Check if we are removing extents inside the extent tree. If that
2859 * is the case, we are going to punch a hole inside the extent tree
2860 * so we have to check whether we need to split the extent covering
2861 * the last block to remove so we can easily remove the part of it
2862 * in ext4_ext_rm_leaf().
2864 if (end
< EXT_MAX_BLOCKS
- 1) {
2865 struct ext4_extent
*ex
;
2866 ext4_lblk_t ee_block
, ex_end
, lblk
;
2869 /* find extent for or closest extent to this block */
2870 path
= ext4_find_extent(inode
, end
, NULL
, EXT4_EX_NOCACHE
);
2872 ext4_journal_stop(handle
);
2873 return PTR_ERR(path
);
2875 depth
= ext_depth(inode
);
2876 /* Leaf not may not exist only if inode has no blocks at all */
2877 ex
= path
[depth
].p_ext
;
2880 EXT4_ERROR_INODE(inode
,
2881 "path[%d].p_hdr == NULL",
2883 err
= -EFSCORRUPTED
;
2888 ee_block
= le32_to_cpu(ex
->ee_block
);
2889 ex_end
= ee_block
+ ext4_ext_get_actual_len(ex
) - 1;
2892 * See if the last block is inside the extent, if so split
2893 * the extent at 'end' block so we can easily remove the
2894 * tail of the first part of the split extent in
2895 * ext4_ext_rm_leaf().
2897 if (end
>= ee_block
&& end
< ex_end
) {
2900 * If we're going to split the extent, note that
2901 * the cluster containing the block after 'end' is
2902 * in use to avoid freeing it when removing blocks.
2904 if (sbi
->s_cluster_ratio
> 1) {
2905 pblk
= ext4_ext_pblock(ex
) + end
- ee_block
+ 1;
2907 -(long long) EXT4_B2C(sbi
, pblk
);
2911 * Split the extent in two so that 'end' is the last
2912 * block in the first new extent. Also we should not
2913 * fail removing space due to ENOSPC so try to use
2914 * reserved block if that happens.
2916 err
= ext4_force_split_extent_at(handle
, inode
, &path
,
2921 } else if (sbi
->s_cluster_ratio
> 1 && end
>= ex_end
) {
2923 * If there's an extent to the right its first cluster
2924 * contains the immediate right boundary of the
2925 * truncated/punched region. Set partial_cluster to
2926 * its negative value so it won't be freed if shared
2927 * with the current extent. The end < ee_block case
2928 * is handled in ext4_ext_rm_leaf().
2931 err
= ext4_ext_search_right(inode
, path
, &lblk
, &pblk
,
2937 -(long long) EXT4_B2C(sbi
, pblk
);
2941 * We start scanning from right side, freeing all the blocks
2942 * after i_size and walking into the tree depth-wise.
2944 depth
= ext_depth(inode
);
2949 le16_to_cpu(path
[k
].p_hdr
->eh_entries
)+1;
2951 path
= kzalloc(sizeof(struct ext4_ext_path
) * (depth
+ 1),
2954 ext4_journal_stop(handle
);
2957 path
[0].p_maxdepth
= path
[0].p_depth
= depth
;
2958 path
[0].p_hdr
= ext_inode_hdr(inode
);
2961 if (ext4_ext_check(inode
, path
[0].p_hdr
, depth
, 0)) {
2962 err
= -EFSCORRUPTED
;
2968 while (i
>= 0 && err
== 0) {
2970 /* this is leaf block */
2971 err
= ext4_ext_rm_leaf(handle
, inode
, path
,
2972 &partial_cluster
, start
,
2974 /* root level has p_bh == NULL, brelse() eats this */
2975 brelse(path
[i
].p_bh
);
2976 path
[i
].p_bh
= NULL
;
2981 /* this is index block */
2982 if (!path
[i
].p_hdr
) {
2983 ext_debug("initialize header\n");
2984 path
[i
].p_hdr
= ext_block_hdr(path
[i
].p_bh
);
2987 if (!path
[i
].p_idx
) {
2988 /* this level hasn't been touched yet */
2989 path
[i
].p_idx
= EXT_LAST_INDEX(path
[i
].p_hdr
);
2990 path
[i
].p_block
= le16_to_cpu(path
[i
].p_hdr
->eh_entries
)+1;
2991 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2993 le16_to_cpu(path
[i
].p_hdr
->eh_entries
));
2995 /* we were already here, see at next index */
2999 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
3000 i
, EXT_FIRST_INDEX(path
[i
].p_hdr
),
3002 if (ext4_ext_more_to_rm(path
+ i
)) {
3003 struct buffer_head
*bh
;
3004 /* go to the next level */
3005 ext_debug("move to level %d (block %llu)\n",
3006 i
+ 1, ext4_idx_pblock(path
[i
].p_idx
));
3007 memset(path
+ i
+ 1, 0, sizeof(*path
));
3008 bh
= read_extent_tree_block(inode
,
3009 ext4_idx_pblock(path
[i
].p_idx
), depth
- i
- 1,
3012 /* should we reset i_size? */
3016 /* Yield here to deal with large extent trees.
3017 * Should be a no-op if we did IO above. */
3019 if (WARN_ON(i
+ 1 > depth
)) {
3020 err
= -EFSCORRUPTED
;
3023 path
[i
+ 1].p_bh
= bh
;
3025 /* save actual number of indexes since this
3026 * number is changed at the next iteration */
3027 path
[i
].p_block
= le16_to_cpu(path
[i
].p_hdr
->eh_entries
);
3030 /* we finished processing this index, go up */
3031 if (path
[i
].p_hdr
->eh_entries
== 0 && i
> 0) {
3032 /* index is empty, remove it;
3033 * handle must be already prepared by the
3034 * truncatei_leaf() */
3035 err
= ext4_ext_rm_idx(handle
, inode
, path
, i
);
3037 /* root level has p_bh == NULL, brelse() eats this */
3038 brelse(path
[i
].p_bh
);
3039 path
[i
].p_bh
= NULL
;
3041 ext_debug("return to level %d\n", i
);
3045 trace_ext4_ext_remove_space_done(inode
, start
, end
, depth
,
3046 partial_cluster
, path
->p_hdr
->eh_entries
);
3049 * If we still have something in the partial cluster and we have removed
3050 * even the first extent, then we should free the blocks in the partial
3051 * cluster as well. (This code will only run when there are no leaves
3052 * to the immediate left of the truncated/punched region.)
3054 if (partial_cluster
> 0 && err
== 0) {
3055 /* don't zero partial_cluster since it's not used afterwards */
3056 ext4_free_blocks(handle
, inode
, NULL
,
3057 EXT4_C2B(sbi
, partial_cluster
),
3058 sbi
->s_cluster_ratio
,
3059 get_default_free_blocks_flags(inode
));
3062 /* TODO: flexible tree reduction should be here */
3063 if (path
->p_hdr
->eh_entries
== 0) {
3065 * truncate to zero freed all the tree,
3066 * so we need to correct eh_depth
3068 err
= ext4_ext_get_access(handle
, inode
, path
);
3070 ext_inode_hdr(inode
)->eh_depth
= 0;
3071 ext_inode_hdr(inode
)->eh_max
=
3072 cpu_to_le16(ext4_ext_space_root(inode
, 0));
3073 err
= ext4_ext_dirty(handle
, inode
, path
);
3077 ext4_ext_drop_refs(path
);
3082 ext4_journal_stop(handle
);
3088 * called at mount time
3090 void ext4_ext_init(struct super_block
*sb
)
3093 * possible initialization would be here
3096 if (ext4_has_feature_extents(sb
)) {
3097 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3098 printk(KERN_INFO
"EXT4-fs: file extents enabled"
3099 #ifdef AGGRESSIVE_TEST
3100 ", aggressive tests"
3102 #ifdef CHECK_BINSEARCH
3105 #ifdef EXTENTS_STATS
3110 #ifdef EXTENTS_STATS
3111 spin_lock_init(&EXT4_SB(sb
)->s_ext_stats_lock
);
3112 EXT4_SB(sb
)->s_ext_min
= 1 << 30;
3113 EXT4_SB(sb
)->s_ext_max
= 0;
3119 * called at umount time
3121 void ext4_ext_release(struct super_block
*sb
)
3123 if (!ext4_has_feature_extents(sb
))
3126 #ifdef EXTENTS_STATS
3127 if (EXT4_SB(sb
)->s_ext_blocks
&& EXT4_SB(sb
)->s_ext_extents
) {
3128 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3129 printk(KERN_ERR
"EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3130 sbi
->s_ext_blocks
, sbi
->s_ext_extents
,
3131 sbi
->s_ext_blocks
/ sbi
->s_ext_extents
);
3132 printk(KERN_ERR
"EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3133 sbi
->s_ext_min
, sbi
->s_ext_max
, sbi
->s_depth_max
);
3138 static int ext4_zeroout_es(struct inode
*inode
, struct ext4_extent
*ex
)
3140 ext4_lblk_t ee_block
;
3141 ext4_fsblk_t ee_pblock
;
3142 unsigned int ee_len
;
3144 ee_block
= le32_to_cpu(ex
->ee_block
);
3145 ee_len
= ext4_ext_get_actual_len(ex
);
3146 ee_pblock
= ext4_ext_pblock(ex
);
3151 return ext4_es_insert_extent(inode
, ee_block
, ee_len
, ee_pblock
,
3152 EXTENT_STATUS_WRITTEN
);
3155 /* FIXME!! we need to try to merge to left or right after zero-out */
3156 static int ext4_ext_zeroout(struct inode
*inode
, struct ext4_extent
*ex
)
3158 ext4_fsblk_t ee_pblock
;
3159 unsigned int ee_len
;
3162 ee_len
= ext4_ext_get_actual_len(ex
);
3163 ee_pblock
= ext4_ext_pblock(ex
);
3165 if (ext4_encrypted_inode(inode
))
3166 return ext4_encrypted_zeroout(inode
, ex
);
3168 ret
= sb_issue_zeroout(inode
->i_sb
, ee_pblock
, ee_len
, GFP_NOFS
);
3176 * ext4_split_extent_at() splits an extent at given block.
3178 * @handle: the journal handle
3179 * @inode: the file inode
3180 * @path: the path to the extent
3181 * @split: the logical block where the extent is splitted.
3182 * @split_flags: indicates if the extent could be zeroout if split fails, and
3183 * the states(init or unwritten) of new extents.
3184 * @flags: flags used to insert new extent to extent tree.
3187 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3188 * of which are deterimined by split_flag.
3190 * There are two cases:
3191 * a> the extent are splitted into two extent.
3192 * b> split is not needed, and just mark the extent.
3194 * return 0 on success.
3196 static int ext4_split_extent_at(handle_t
*handle
,
3197 struct inode
*inode
,
3198 struct ext4_ext_path
**ppath
,
3203 struct ext4_ext_path
*path
= *ppath
;
3204 ext4_fsblk_t newblock
;
3205 ext4_lblk_t ee_block
;
3206 struct ext4_extent
*ex
, newex
, orig_ex
, zero_ex
;
3207 struct ext4_extent
*ex2
= NULL
;
3208 unsigned int ee_len
, depth
;
3211 BUG_ON((split_flag
& (EXT4_EXT_DATA_VALID1
| EXT4_EXT_DATA_VALID2
)) ==
3212 (EXT4_EXT_DATA_VALID1
| EXT4_EXT_DATA_VALID2
));
3214 ext_debug("ext4_split_extents_at: inode %lu, logical"
3215 "block %llu\n", inode
->i_ino
, (unsigned long long)split
);
3217 ext4_ext_show_leaf(inode
, path
);
3219 depth
= ext_depth(inode
);
3220 ex
= path
[depth
].p_ext
;
3221 ee_block
= le32_to_cpu(ex
->ee_block
);
3222 ee_len
= ext4_ext_get_actual_len(ex
);
3223 newblock
= split
- ee_block
+ ext4_ext_pblock(ex
);
3225 BUG_ON(split
< ee_block
|| split
>= (ee_block
+ ee_len
));
3226 BUG_ON(!ext4_ext_is_unwritten(ex
) &&
3227 split_flag
& (EXT4_EXT_MAY_ZEROOUT
|
3228 EXT4_EXT_MARK_UNWRIT1
|
3229 EXT4_EXT_MARK_UNWRIT2
));
3231 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3235 if (split
== ee_block
) {
3237 * case b: block @split is the block that the extent begins with
3238 * then we just change the state of the extent, and splitting
3241 if (split_flag
& EXT4_EXT_MARK_UNWRIT2
)
3242 ext4_ext_mark_unwritten(ex
);
3244 ext4_ext_mark_initialized(ex
);
3246 if (!(flags
& EXT4_GET_BLOCKS_PRE_IO
))
3247 ext4_ext_try_to_merge(handle
, inode
, path
, ex
);
3249 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
3254 memcpy(&orig_ex
, ex
, sizeof(orig_ex
));
3255 ex
->ee_len
= cpu_to_le16(split
- ee_block
);
3256 if (split_flag
& EXT4_EXT_MARK_UNWRIT1
)
3257 ext4_ext_mark_unwritten(ex
);
3260 * path may lead to new leaf, not to original leaf any more
3261 * after ext4_ext_insert_extent() returns,
3263 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
3265 goto fix_extent_len
;
3268 ex2
->ee_block
= cpu_to_le32(split
);
3269 ex2
->ee_len
= cpu_to_le16(ee_len
- (split
- ee_block
));
3270 ext4_ext_store_pblock(ex2
, newblock
);
3271 if (split_flag
& EXT4_EXT_MARK_UNWRIT2
)
3272 ext4_ext_mark_unwritten(ex2
);
3274 err
= ext4_ext_insert_extent(handle
, inode
, ppath
, &newex
, flags
);
3275 if (err
== -ENOSPC
&& (EXT4_EXT_MAY_ZEROOUT
& split_flag
)) {
3276 if (split_flag
& (EXT4_EXT_DATA_VALID1
|EXT4_EXT_DATA_VALID2
)) {
3277 if (split_flag
& EXT4_EXT_DATA_VALID1
) {
3278 err
= ext4_ext_zeroout(inode
, ex2
);
3279 zero_ex
.ee_block
= ex2
->ee_block
;
3280 zero_ex
.ee_len
= cpu_to_le16(
3281 ext4_ext_get_actual_len(ex2
));
3282 ext4_ext_store_pblock(&zero_ex
,
3283 ext4_ext_pblock(ex2
));
3285 err
= ext4_ext_zeroout(inode
, ex
);
3286 zero_ex
.ee_block
= ex
->ee_block
;
3287 zero_ex
.ee_len
= cpu_to_le16(
3288 ext4_ext_get_actual_len(ex
));
3289 ext4_ext_store_pblock(&zero_ex
,
3290 ext4_ext_pblock(ex
));
3293 err
= ext4_ext_zeroout(inode
, &orig_ex
);
3294 zero_ex
.ee_block
= orig_ex
.ee_block
;
3295 zero_ex
.ee_len
= cpu_to_le16(
3296 ext4_ext_get_actual_len(&orig_ex
));
3297 ext4_ext_store_pblock(&zero_ex
,
3298 ext4_ext_pblock(&orig_ex
));
3302 goto fix_extent_len
;
3303 /* update the extent length and mark as initialized */
3304 ex
->ee_len
= cpu_to_le16(ee_len
);
3305 ext4_ext_try_to_merge(handle
, inode
, path
, ex
);
3306 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
3308 goto fix_extent_len
;
3310 /* update extent status tree */
3311 err
= ext4_zeroout_es(inode
, &zero_ex
);
3315 goto fix_extent_len
;
3318 ext4_ext_show_leaf(inode
, path
);
3322 ex
->ee_len
= orig_ex
.ee_len
;
3323 ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
3328 * ext4_split_extents() splits an extent and mark extent which is covered
3329 * by @map as split_flags indicates
3331 * It may result in splitting the extent into multiple extents (up to three)
3332 * There are three possibilities:
3333 * a> There is no split required
3334 * b> Splits in two extents: Split is happening at either end of the extent
3335 * c> Splits in three extents: Somone is splitting in middle of the extent
3338 static int ext4_split_extent(handle_t
*handle
,
3339 struct inode
*inode
,
3340 struct ext4_ext_path
**ppath
,
3341 struct ext4_map_blocks
*map
,
3345 struct ext4_ext_path
*path
= *ppath
;
3346 ext4_lblk_t ee_block
;
3347 struct ext4_extent
*ex
;
3348 unsigned int ee_len
, depth
;
3351 int split_flag1
, flags1
;
3352 int allocated
= map
->m_len
;
3354 depth
= ext_depth(inode
);
3355 ex
= path
[depth
].p_ext
;
3356 ee_block
= le32_to_cpu(ex
->ee_block
);
3357 ee_len
= ext4_ext_get_actual_len(ex
);
3358 unwritten
= ext4_ext_is_unwritten(ex
);
3360 if (map
->m_lblk
+ map
->m_len
< ee_block
+ ee_len
) {
3361 split_flag1
= split_flag
& EXT4_EXT_MAY_ZEROOUT
;
3362 flags1
= flags
| EXT4_GET_BLOCKS_PRE_IO
;
3364 split_flag1
|= EXT4_EXT_MARK_UNWRIT1
|
3365 EXT4_EXT_MARK_UNWRIT2
;
3366 if (split_flag
& EXT4_EXT_DATA_VALID2
)
3367 split_flag1
|= EXT4_EXT_DATA_VALID1
;
3368 err
= ext4_split_extent_at(handle
, inode
, ppath
,
3369 map
->m_lblk
+ map
->m_len
, split_flag1
, flags1
);
3373 allocated
= ee_len
- (map
->m_lblk
- ee_block
);
3376 * Update path is required because previous ext4_split_extent_at() may
3377 * result in split of original leaf or extent zeroout.
3379 path
= ext4_find_extent(inode
, map
->m_lblk
, ppath
, 0);
3381 return PTR_ERR(path
);
3382 depth
= ext_depth(inode
);
3383 ex
= path
[depth
].p_ext
;
3385 EXT4_ERROR_INODE(inode
, "unexpected hole at %lu",
3386 (unsigned long) map
->m_lblk
);
3387 return -EFSCORRUPTED
;
3389 unwritten
= ext4_ext_is_unwritten(ex
);
3392 if (map
->m_lblk
>= ee_block
) {
3393 split_flag1
= split_flag
& EXT4_EXT_DATA_VALID2
;
3395 split_flag1
|= EXT4_EXT_MARK_UNWRIT1
;
3396 split_flag1
|= split_flag
& (EXT4_EXT_MAY_ZEROOUT
|
3397 EXT4_EXT_MARK_UNWRIT2
);
3399 err
= ext4_split_extent_at(handle
, inode
, ppath
,
3400 map
->m_lblk
, split_flag1
, flags
);
3405 ext4_ext_show_leaf(inode
, path
);
3407 return err
? err
: allocated
;
3411 * This function is called by ext4_ext_map_blocks() if someone tries to write
3412 * to an unwritten extent. It may result in splitting the unwritten
3413 * extent into multiple extents (up to three - one initialized and two
3415 * There are three possibilities:
3416 * a> There is no split required: Entire extent should be initialized
3417 * b> Splits in two extents: Write is happening at either end of the extent
3418 * c> Splits in three extents: Somone is writing in middle of the extent
3421 * - The extent pointed to by 'path' is unwritten.
3422 * - The extent pointed to by 'path' contains a superset
3423 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3425 * Post-conditions on success:
3426 * - the returned value is the number of blocks beyond map->l_lblk
3427 * that are allocated and initialized.
3428 * It is guaranteed to be >= map->m_len.
3430 static int ext4_ext_convert_to_initialized(handle_t
*handle
,
3431 struct inode
*inode
,
3432 struct ext4_map_blocks
*map
,
3433 struct ext4_ext_path
**ppath
,
3436 struct ext4_ext_path
*path
= *ppath
;
3437 struct ext4_sb_info
*sbi
;
3438 struct ext4_extent_header
*eh
;
3439 struct ext4_map_blocks split_map
;
3440 struct ext4_extent zero_ex
;
3441 struct ext4_extent
*ex
, *abut_ex
;
3442 ext4_lblk_t ee_block
, eof_block
;
3443 unsigned int ee_len
, depth
, map_len
= map
->m_len
;
3444 int allocated
= 0, max_zeroout
= 0;
3448 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3449 "block %llu, max_blocks %u\n", inode
->i_ino
,
3450 (unsigned long long)map
->m_lblk
, map_len
);
3452 sbi
= EXT4_SB(inode
->i_sb
);
3453 eof_block
= (EXT4_I(inode
)->i_disksize
+ inode
->i_sb
->s_blocksize
- 1)
3454 >> inode
->i_sb
->s_blocksize_bits
;
3455 if (eof_block
< map
->m_lblk
+ map_len
)
3456 eof_block
= map
->m_lblk
+ map_len
;
3458 depth
= ext_depth(inode
);
3459 eh
= path
[depth
].p_hdr
;
3460 ex
= path
[depth
].p_ext
;
3461 ee_block
= le32_to_cpu(ex
->ee_block
);
3462 ee_len
= ext4_ext_get_actual_len(ex
);
3465 trace_ext4_ext_convert_to_initialized_enter(inode
, map
, ex
);
3467 /* Pre-conditions */
3468 BUG_ON(!ext4_ext_is_unwritten(ex
));
3469 BUG_ON(!in_range(map
->m_lblk
, ee_block
, ee_len
));
3472 * Attempt to transfer newly initialized blocks from the currently
3473 * unwritten extent to its neighbor. This is much cheaper
3474 * than an insertion followed by a merge as those involve costly
3475 * memmove() calls. Transferring to the left is the common case in
3476 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3477 * followed by append writes.
3479 * Limitations of the current logic:
3480 * - L1: we do not deal with writes covering the whole extent.
3481 * This would require removing the extent if the transfer
3483 * - L2: we only attempt to merge with an extent stored in the
3484 * same extent tree node.
3486 if ((map
->m_lblk
== ee_block
) &&
3487 /* See if we can merge left */
3488 (map_len
< ee_len
) && /*L1*/
3489 (ex
> EXT_FIRST_EXTENT(eh
))) { /*L2*/
3490 ext4_lblk_t prev_lblk
;
3491 ext4_fsblk_t prev_pblk
, ee_pblk
;
3492 unsigned int prev_len
;
3495 prev_lblk
= le32_to_cpu(abut_ex
->ee_block
);
3496 prev_len
= ext4_ext_get_actual_len(abut_ex
);
3497 prev_pblk
= ext4_ext_pblock(abut_ex
);
3498 ee_pblk
= ext4_ext_pblock(ex
);
3501 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3502 * upon those conditions:
3503 * - C1: abut_ex is initialized,
3504 * - C2: abut_ex is logically abutting ex,
3505 * - C3: abut_ex is physically abutting ex,
3506 * - C4: abut_ex can receive the additional blocks without
3507 * overflowing the (initialized) length limit.
3509 if ((!ext4_ext_is_unwritten(abut_ex
)) && /*C1*/
3510 ((prev_lblk
+ prev_len
) == ee_block
) && /*C2*/
3511 ((prev_pblk
+ prev_len
) == ee_pblk
) && /*C3*/
3512 (prev_len
< (EXT_INIT_MAX_LEN
- map_len
))) { /*C4*/
3513 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3517 trace_ext4_ext_convert_to_initialized_fastpath(inode
,
3520 /* Shift the start of ex by 'map_len' blocks */
3521 ex
->ee_block
= cpu_to_le32(ee_block
+ map_len
);
3522 ext4_ext_store_pblock(ex
, ee_pblk
+ map_len
);
3523 ex
->ee_len
= cpu_to_le16(ee_len
- map_len
);
3524 ext4_ext_mark_unwritten(ex
); /* Restore the flag */
3526 /* Extend abut_ex by 'map_len' blocks */
3527 abut_ex
->ee_len
= cpu_to_le16(prev_len
+ map_len
);
3529 /* Result: number of initialized blocks past m_lblk */
3530 allocated
= map_len
;
3532 } else if (((map
->m_lblk
+ map_len
) == (ee_block
+ ee_len
)) &&
3533 (map_len
< ee_len
) && /*L1*/
3534 ex
< EXT_LAST_EXTENT(eh
)) { /*L2*/
3535 /* See if we can merge right */
3536 ext4_lblk_t next_lblk
;
3537 ext4_fsblk_t next_pblk
, ee_pblk
;
3538 unsigned int next_len
;
3541 next_lblk
= le32_to_cpu(abut_ex
->ee_block
);
3542 next_len
= ext4_ext_get_actual_len(abut_ex
);
3543 next_pblk
= ext4_ext_pblock(abut_ex
);
3544 ee_pblk
= ext4_ext_pblock(ex
);
3547 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3548 * upon those conditions:
3549 * - C1: abut_ex is initialized,
3550 * - C2: abut_ex is logically abutting ex,
3551 * - C3: abut_ex is physically abutting ex,
3552 * - C4: abut_ex can receive the additional blocks without
3553 * overflowing the (initialized) length limit.
3555 if ((!ext4_ext_is_unwritten(abut_ex
)) && /*C1*/
3556 ((map
->m_lblk
+ map_len
) == next_lblk
) && /*C2*/
3557 ((ee_pblk
+ ee_len
) == next_pblk
) && /*C3*/
3558 (next_len
< (EXT_INIT_MAX_LEN
- map_len
))) { /*C4*/
3559 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3563 trace_ext4_ext_convert_to_initialized_fastpath(inode
,
3566 /* Shift the start of abut_ex by 'map_len' blocks */
3567 abut_ex
->ee_block
= cpu_to_le32(next_lblk
- map_len
);
3568 ext4_ext_store_pblock(abut_ex
, next_pblk
- map_len
);
3569 ex
->ee_len
= cpu_to_le16(ee_len
- map_len
);
3570 ext4_ext_mark_unwritten(ex
); /* Restore the flag */
3572 /* Extend abut_ex by 'map_len' blocks */
3573 abut_ex
->ee_len
= cpu_to_le16(next_len
+ map_len
);
3575 /* Result: number of initialized blocks past m_lblk */
3576 allocated
= map_len
;
3580 /* Mark the block containing both extents as dirty */
3581 ext4_ext_dirty(handle
, inode
, path
+ depth
);
3583 /* Update path to point to the right extent */
3584 path
[depth
].p_ext
= abut_ex
;
3587 allocated
= ee_len
- (map
->m_lblk
- ee_block
);
3589 WARN_ON(map
->m_lblk
< ee_block
);
3591 * It is safe to convert extent to initialized via explicit
3592 * zeroout only if extent is fully inside i_size or new_size.
3594 split_flag
|= ee_block
+ ee_len
<= eof_block
? EXT4_EXT_MAY_ZEROOUT
: 0;
3596 if (EXT4_EXT_MAY_ZEROOUT
& split_flag
)
3597 max_zeroout
= sbi
->s_extent_max_zeroout_kb
>>
3598 (inode
->i_sb
->s_blocksize_bits
- 10);
3600 if (ext4_encrypted_inode(inode
))
3603 /* If extent is less than s_max_zeroout_kb, zeroout directly */
3604 if (max_zeroout
&& (ee_len
<= max_zeroout
)) {
3605 err
= ext4_ext_zeroout(inode
, ex
);
3608 zero_ex
.ee_block
= ex
->ee_block
;
3609 zero_ex
.ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
));
3610 ext4_ext_store_pblock(&zero_ex
, ext4_ext_pblock(ex
));
3612 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3615 ext4_ext_mark_initialized(ex
);
3616 ext4_ext_try_to_merge(handle
, inode
, path
, ex
);
3617 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
3623 * 1. split the extent into three extents.
3624 * 2. split the extent into two extents, zeroout the first half.
3625 * 3. split the extent into two extents, zeroout the second half.
3626 * 4. split the extent into two extents with out zeroout.
3628 split_map
.m_lblk
= map
->m_lblk
;
3629 split_map
.m_len
= map
->m_len
;
3631 if (max_zeroout
&& (allocated
> map
->m_len
)) {
3632 if (allocated
<= max_zeroout
) {
3635 cpu_to_le32(map
->m_lblk
);
3636 zero_ex
.ee_len
= cpu_to_le16(allocated
);
3637 ext4_ext_store_pblock(&zero_ex
,
3638 ext4_ext_pblock(ex
) + map
->m_lblk
- ee_block
);
3639 err
= ext4_ext_zeroout(inode
, &zero_ex
);
3642 split_map
.m_lblk
= map
->m_lblk
;
3643 split_map
.m_len
= allocated
;
3644 } else if (map
->m_lblk
- ee_block
+ map
->m_len
< max_zeroout
) {
3646 if (map
->m_lblk
!= ee_block
) {
3647 zero_ex
.ee_block
= ex
->ee_block
;
3648 zero_ex
.ee_len
= cpu_to_le16(map
->m_lblk
-
3650 ext4_ext_store_pblock(&zero_ex
,
3651 ext4_ext_pblock(ex
));
3652 err
= ext4_ext_zeroout(inode
, &zero_ex
);
3657 split_map
.m_lblk
= ee_block
;
3658 split_map
.m_len
= map
->m_lblk
- ee_block
+ map
->m_len
;
3659 allocated
= map
->m_len
;
3663 err
= ext4_split_extent(handle
, inode
, ppath
, &split_map
, split_flag
,
3668 /* If we have gotten a failure, don't zero out status tree */
3670 err
= ext4_zeroout_es(inode
, &zero_ex
);
3671 return err
? err
: allocated
;
3675 * This function is called by ext4_ext_map_blocks() from
3676 * ext4_get_blocks_dio_write() when DIO to write
3677 * to an unwritten extent.
3679 * Writing to an unwritten extent may result in splitting the unwritten
3680 * extent into multiple initialized/unwritten extents (up to three)
3681 * There are three possibilities:
3682 * a> There is no split required: Entire extent should be unwritten
3683 * b> Splits in two extents: Write is happening at either end of the extent
3684 * c> Splits in three extents: Somone is writing in middle of the extent
3686 * This works the same way in the case of initialized -> unwritten conversion.
3688 * One of more index blocks maybe needed if the extent tree grow after
3689 * the unwritten extent split. To prevent ENOSPC occur at the IO
3690 * complete, we need to split the unwritten extent before DIO submit
3691 * the IO. The unwritten extent called at this time will be split
3692 * into three unwritten extent(at most). After IO complete, the part
3693 * being filled will be convert to initialized by the end_io callback function
3694 * via ext4_convert_unwritten_extents().
3696 * Returns the size of unwritten extent to be written on success.
3698 static int ext4_split_convert_extents(handle_t
*handle
,
3699 struct inode
*inode
,
3700 struct ext4_map_blocks
*map
,
3701 struct ext4_ext_path
**ppath
,
3704 struct ext4_ext_path
*path
= *ppath
;
3705 ext4_lblk_t eof_block
;
3706 ext4_lblk_t ee_block
;
3707 struct ext4_extent
*ex
;
3708 unsigned int ee_len
;
3709 int split_flag
= 0, depth
;
3711 ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
3712 __func__
, inode
->i_ino
,
3713 (unsigned long long)map
->m_lblk
, map
->m_len
);
3715 eof_block
= (EXT4_I(inode
)->i_disksize
+ inode
->i_sb
->s_blocksize
- 1)
3716 >> inode
->i_sb
->s_blocksize_bits
;
3717 if (eof_block
< map
->m_lblk
+ map
->m_len
)
3718 eof_block
= map
->m_lblk
+ map
->m_len
;
3720 * It is safe to convert extent to initialized via explicit
3721 * zeroout only if extent is fully insde i_size or new_size.
3723 depth
= ext_depth(inode
);
3724 ex
= path
[depth
].p_ext
;
3725 ee_block
= le32_to_cpu(ex
->ee_block
);
3726 ee_len
= ext4_ext_get_actual_len(ex
);
3728 /* Convert to unwritten */
3729 if (flags
& EXT4_GET_BLOCKS_CONVERT_UNWRITTEN
) {
3730 split_flag
|= EXT4_EXT_DATA_VALID1
;
3731 /* Convert to initialized */
3732 } else if (flags
& EXT4_GET_BLOCKS_CONVERT
) {
3733 split_flag
|= ee_block
+ ee_len
<= eof_block
?
3734 EXT4_EXT_MAY_ZEROOUT
: 0;
3735 split_flag
|= (EXT4_EXT_MARK_UNWRIT2
| EXT4_EXT_DATA_VALID2
);
3737 flags
|= EXT4_GET_BLOCKS_PRE_IO
;
3738 return ext4_split_extent(handle
, inode
, ppath
, map
, split_flag
, flags
);
3741 static int ext4_convert_unwritten_extents_endio(handle_t
*handle
,
3742 struct inode
*inode
,
3743 struct ext4_map_blocks
*map
,
3744 struct ext4_ext_path
**ppath
)
3746 struct ext4_ext_path
*path
= *ppath
;
3747 struct ext4_extent
*ex
;
3748 ext4_lblk_t ee_block
;
3749 unsigned int ee_len
;
3753 depth
= ext_depth(inode
);
3754 ex
= path
[depth
].p_ext
;
3755 ee_block
= le32_to_cpu(ex
->ee_block
);
3756 ee_len
= ext4_ext_get_actual_len(ex
);
3758 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3759 "block %llu, max_blocks %u\n", inode
->i_ino
,
3760 (unsigned long long)ee_block
, ee_len
);
3762 /* If extent is larger than requested it is a clear sign that we still
3763 * have some extent state machine issues left. So extent_split is still
3765 * TODO: Once all related issues will be fixed this situation should be
3768 if (ee_block
!= map
->m_lblk
|| ee_len
> map
->m_len
) {
3770 ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3771 " len %u; IO logical block %llu, len %u\n",
3772 inode
->i_ino
, (unsigned long long)ee_block
, ee_len
,
3773 (unsigned long long)map
->m_lblk
, map
->m_len
);
3775 err
= ext4_split_convert_extents(handle
, inode
, map
, ppath
,
3776 EXT4_GET_BLOCKS_CONVERT
);
3779 path
= ext4_find_extent(inode
, map
->m_lblk
, ppath
, 0);
3781 return PTR_ERR(path
);
3782 depth
= ext_depth(inode
);
3783 ex
= path
[depth
].p_ext
;
3786 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3789 /* first mark the extent as initialized */
3790 ext4_ext_mark_initialized(ex
);
3792 /* note: ext4_ext_correct_indexes() isn't needed here because
3793 * borders are not changed
3795 ext4_ext_try_to_merge(handle
, inode
, path
, ex
);
3797 /* Mark modified extent as dirty */
3798 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
3800 ext4_ext_show_leaf(inode
, path
);
3804 static void unmap_underlying_metadata_blocks(struct block_device
*bdev
,
3805 sector_t block
, int count
)
3808 for (i
= 0; i
< count
; i
++)
3809 unmap_underlying_metadata(bdev
, block
+ i
);
3813 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3815 static int check_eofblocks_fl(handle_t
*handle
, struct inode
*inode
,
3817 struct ext4_ext_path
*path
,
3821 struct ext4_extent_header
*eh
;
3822 struct ext4_extent
*last_ex
;
3824 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EOFBLOCKS
))
3827 depth
= ext_depth(inode
);
3828 eh
= path
[depth
].p_hdr
;
3831 * We're going to remove EOFBLOCKS_FL entirely in future so we
3832 * do not care for this case anymore. Simply remove the flag
3833 * if there are no extents.
3835 if (unlikely(!eh
->eh_entries
))
3837 last_ex
= EXT_LAST_EXTENT(eh
);
3839 * We should clear the EOFBLOCKS_FL flag if we are writing the
3840 * last block in the last extent in the file. We test this by
3841 * first checking to see if the caller to
3842 * ext4_ext_get_blocks() was interested in the last block (or
3843 * a block beyond the last block) in the current extent. If
3844 * this turns out to be false, we can bail out from this
3845 * function immediately.
3847 if (lblk
+ len
< le32_to_cpu(last_ex
->ee_block
) +
3848 ext4_ext_get_actual_len(last_ex
))
3851 * If the caller does appear to be planning to write at or
3852 * beyond the end of the current extent, we then test to see
3853 * if the current extent is the last extent in the file, by
3854 * checking to make sure it was reached via the rightmost node
3855 * at each level of the tree.
3857 for (i
= depth
-1; i
>= 0; i
--)
3858 if (path
[i
].p_idx
!= EXT_LAST_INDEX(path
[i
].p_hdr
))
3861 ext4_clear_inode_flag(inode
, EXT4_INODE_EOFBLOCKS
);
3862 return ext4_mark_inode_dirty(handle
, inode
);
3866 * ext4_find_delalloc_range: find delayed allocated block in the given range.
3868 * Return 1 if there is a delalloc block in the range, otherwise 0.
3870 int ext4_find_delalloc_range(struct inode
*inode
,
3871 ext4_lblk_t lblk_start
,
3872 ext4_lblk_t lblk_end
)
3874 struct extent_status es
;
3876 ext4_es_find_delayed_extent_range(inode
, lblk_start
, lblk_end
, &es
);
3878 return 0; /* there is no delay extent in this tree */
3879 else if (es
.es_lblk
<= lblk_start
&&
3880 lblk_start
< es
.es_lblk
+ es
.es_len
)
3882 else if (lblk_start
<= es
.es_lblk
&& es
.es_lblk
<= lblk_end
)
3888 int ext4_find_delalloc_cluster(struct inode
*inode
, ext4_lblk_t lblk
)
3890 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
3891 ext4_lblk_t lblk_start
, lblk_end
;
3892 lblk_start
= EXT4_LBLK_CMASK(sbi
, lblk
);
3893 lblk_end
= lblk_start
+ sbi
->s_cluster_ratio
- 1;
3895 return ext4_find_delalloc_range(inode
, lblk_start
, lblk_end
);
3899 * Determines how many complete clusters (out of those specified by the 'map')
3900 * are under delalloc and were reserved quota for.
3901 * This function is called when we are writing out the blocks that were
3902 * originally written with their allocation delayed, but then the space was
3903 * allocated using fallocate() before the delayed allocation could be resolved.
3904 * The cases to look for are:
3905 * ('=' indicated delayed allocated blocks
3906 * '-' indicates non-delayed allocated blocks)
3907 * (a) partial clusters towards beginning and/or end outside of allocated range
3908 * are not delalloc'ed.
3910 * |----c---=|====c====|====c====|===-c----|
3911 * |++++++ allocated ++++++|
3912 * ==> 4 complete clusters in above example
3914 * (b) partial cluster (outside of allocated range) towards either end is
3915 * marked for delayed allocation. In this case, we will exclude that
3918 * |----====c========|========c========|
3919 * |++++++ allocated ++++++|
3920 * ==> 1 complete clusters in above example
3923 * |================c================|
3924 * |++++++ allocated ++++++|
3925 * ==> 0 complete clusters in above example
3927 * The ext4_da_update_reserve_space will be called only if we
3928 * determine here that there were some "entire" clusters that span
3929 * this 'allocated' range.
3930 * In the non-bigalloc case, this function will just end up returning num_blks
3931 * without ever calling ext4_find_delalloc_range.
3934 get_reserved_cluster_alloc(struct inode
*inode
, ext4_lblk_t lblk_start
,
3935 unsigned int num_blks
)
3937 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
3938 ext4_lblk_t alloc_cluster_start
, alloc_cluster_end
;
3939 ext4_lblk_t lblk_from
, lblk_to
, c_offset
;
3940 unsigned int allocated_clusters
= 0;
3942 alloc_cluster_start
= EXT4_B2C(sbi
, lblk_start
);
3943 alloc_cluster_end
= EXT4_B2C(sbi
, lblk_start
+ num_blks
- 1);
3945 /* max possible clusters for this allocation */
3946 allocated_clusters
= alloc_cluster_end
- alloc_cluster_start
+ 1;
3948 trace_ext4_get_reserved_cluster_alloc(inode
, lblk_start
, num_blks
);
3950 /* Check towards left side */
3951 c_offset
= EXT4_LBLK_COFF(sbi
, lblk_start
);
3953 lblk_from
= EXT4_LBLK_CMASK(sbi
, lblk_start
);
3954 lblk_to
= lblk_from
+ c_offset
- 1;
3956 if (ext4_find_delalloc_range(inode
, lblk_from
, lblk_to
))
3957 allocated_clusters
--;
3960 /* Now check towards right. */
3961 c_offset
= EXT4_LBLK_COFF(sbi
, lblk_start
+ num_blks
);
3962 if (allocated_clusters
&& c_offset
) {
3963 lblk_from
= lblk_start
+ num_blks
;
3964 lblk_to
= lblk_from
+ (sbi
->s_cluster_ratio
- c_offset
) - 1;
3966 if (ext4_find_delalloc_range(inode
, lblk_from
, lblk_to
))
3967 allocated_clusters
--;
3970 return allocated_clusters
;
3974 convert_initialized_extent(handle_t
*handle
, struct inode
*inode
,
3975 struct ext4_map_blocks
*map
,
3976 struct ext4_ext_path
**ppath
, int flags
,
3977 unsigned int allocated
, ext4_fsblk_t newblock
)
3979 struct ext4_ext_path
*path
= *ppath
;
3980 struct ext4_extent
*ex
;
3981 ext4_lblk_t ee_block
;
3982 unsigned int ee_len
;
3987 * Make sure that the extent is no bigger than we support with
3990 if (map
->m_len
> EXT_UNWRITTEN_MAX_LEN
)
3991 map
->m_len
= EXT_UNWRITTEN_MAX_LEN
/ 2;
3993 depth
= ext_depth(inode
);
3994 ex
= path
[depth
].p_ext
;
3995 ee_block
= le32_to_cpu(ex
->ee_block
);
3996 ee_len
= ext4_ext_get_actual_len(ex
);
3998 ext_debug("%s: inode %lu, logical"
3999 "block %llu, max_blocks %u\n", __func__
, inode
->i_ino
,
4000 (unsigned long long)ee_block
, ee_len
);
4002 if (ee_block
!= map
->m_lblk
|| ee_len
> map
->m_len
) {
4003 err
= ext4_split_convert_extents(handle
, inode
, map
, ppath
,
4004 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN
);
4007 path
= ext4_find_extent(inode
, map
->m_lblk
, ppath
, 0);
4009 return PTR_ERR(path
);
4010 depth
= ext_depth(inode
);
4011 ex
= path
[depth
].p_ext
;
4013 EXT4_ERROR_INODE(inode
, "unexpected hole at %lu",
4014 (unsigned long) map
->m_lblk
);
4015 return -EFSCORRUPTED
;
4019 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
4022 /* first mark the extent as unwritten */
4023 ext4_ext_mark_unwritten(ex
);
4025 /* note: ext4_ext_correct_indexes() isn't needed here because
4026 * borders are not changed
4028 ext4_ext_try_to_merge(handle
, inode
, path
, ex
);
4030 /* Mark modified extent as dirty */
4031 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
4034 ext4_ext_show_leaf(inode
, path
);
4036 ext4_update_inode_fsync_trans(handle
, inode
, 1);
4037 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
, path
, map
->m_len
);
4040 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
4041 if (allocated
> map
->m_len
)
4042 allocated
= map
->m_len
;
4043 map
->m_len
= allocated
;
4048 ext4_ext_handle_unwritten_extents(handle_t
*handle
, struct inode
*inode
,
4049 struct ext4_map_blocks
*map
,
4050 struct ext4_ext_path
**ppath
, int flags
,
4051 unsigned int allocated
, ext4_fsblk_t newblock
)
4053 struct ext4_ext_path
*path
= *ppath
;
4056 ext4_io_end_t
*io
= ext4_inode_aio(inode
);
4058 ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
4059 "block %llu, max_blocks %u, flags %x, allocated %u\n",
4060 inode
->i_ino
, (unsigned long long)map
->m_lblk
, map
->m_len
,
4062 ext4_ext_show_leaf(inode
, path
);
4065 * When writing into unwritten space, we should not fail to
4066 * allocate metadata blocks for the new extent block if needed.
4068 flags
|= EXT4_GET_BLOCKS_METADATA_NOFAIL
;
4070 trace_ext4_ext_handle_unwritten_extents(inode
, map
, flags
,
4071 allocated
, newblock
);
4073 /* get_block() before submit the IO, split the extent */
4074 if (flags
& EXT4_GET_BLOCKS_PRE_IO
) {
4075 ret
= ext4_split_convert_extents(handle
, inode
, map
, ppath
,
4076 flags
| EXT4_GET_BLOCKS_CONVERT
);
4080 * Flag the inode(non aio case) or end_io struct (aio case)
4081 * that this IO needs to conversion to written when IO is
4085 ext4_set_io_unwritten_flag(inode
, io
);
4087 ext4_set_inode_state(inode
, EXT4_STATE_DIO_UNWRITTEN
);
4088 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
4091 /* IO end_io complete, convert the filled extent to written */
4092 if (flags
& EXT4_GET_BLOCKS_CONVERT
) {
4093 ret
= ext4_convert_unwritten_extents_endio(handle
, inode
, map
,
4096 ext4_update_inode_fsync_trans(handle
, inode
, 1);
4097 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
,
4101 map
->m_flags
|= EXT4_MAP_MAPPED
;
4102 map
->m_pblk
= newblock
;
4103 if (allocated
> map
->m_len
)
4104 allocated
= map
->m_len
;
4105 map
->m_len
= allocated
;
4108 /* buffered IO case */
4110 * repeat fallocate creation request
4111 * we already have an unwritten extent
4113 if (flags
& EXT4_GET_BLOCKS_UNWRIT_EXT
) {
4114 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
4118 /* buffered READ or buffered write_begin() lookup */
4119 if ((flags
& EXT4_GET_BLOCKS_CREATE
) == 0) {
4121 * We have blocks reserved already. We
4122 * return allocated blocks so that delalloc
4123 * won't do block reservation for us. But
4124 * the buffer head will be unmapped so that
4125 * a read from the block returns 0s.
4127 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
4131 /* buffered write, writepage time, convert*/
4132 ret
= ext4_ext_convert_to_initialized(handle
, inode
, map
, ppath
, flags
);
4134 ext4_update_inode_fsync_trans(handle
, inode
, 1);
4141 map
->m_flags
|= EXT4_MAP_NEW
;
4143 * if we allocated more blocks than requested
4144 * we need to make sure we unmap the extra block
4145 * allocated. The actual needed block will get
4146 * unmapped later when we find the buffer_head marked
4149 if (allocated
> map
->m_len
) {
4150 unmap_underlying_metadata_blocks(inode
->i_sb
->s_bdev
,
4151 newblock
+ map
->m_len
,
4152 allocated
- map
->m_len
);
4153 allocated
= map
->m_len
;
4155 map
->m_len
= allocated
;
4158 * If we have done fallocate with the offset that is already
4159 * delayed allocated, we would have block reservation
4160 * and quota reservation done in the delayed write path.
4161 * But fallocate would have already updated quota and block
4162 * count for this offset. So cancel these reservation
4164 if (flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
) {
4165 unsigned int reserved_clusters
;
4166 reserved_clusters
= get_reserved_cluster_alloc(inode
,
4167 map
->m_lblk
, map
->m_len
);
4168 if (reserved_clusters
)
4169 ext4_da_update_reserve_space(inode
,
4175 map
->m_flags
|= EXT4_MAP_MAPPED
;
4176 if ((flags
& EXT4_GET_BLOCKS_KEEP_SIZE
) == 0) {
4177 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
, path
,
4183 if (allocated
> map
->m_len
)
4184 allocated
= map
->m_len
;
4185 ext4_ext_show_leaf(inode
, path
);
4186 map
->m_pblk
= newblock
;
4187 map
->m_len
= allocated
;
4189 return err
? err
: allocated
;
4193 * get_implied_cluster_alloc - check to see if the requested
4194 * allocation (in the map structure) overlaps with a cluster already
4195 * allocated in an extent.
4196 * @sb The filesystem superblock structure
4197 * @map The requested lblk->pblk mapping
4198 * @ex The extent structure which might contain an implied
4199 * cluster allocation
4201 * This function is called by ext4_ext_map_blocks() after we failed to
4202 * find blocks that were already in the inode's extent tree. Hence,
4203 * we know that the beginning of the requested region cannot overlap
4204 * the extent from the inode's extent tree. There are three cases we
4205 * want to catch. The first is this case:
4207 * |--- cluster # N--|
4208 * |--- extent ---| |---- requested region ---|
4211 * The second case that we need to test for is this one:
4213 * |--------- cluster # N ----------------|
4214 * |--- requested region --| |------- extent ----|
4215 * |=======================|
4217 * The third case is when the requested region lies between two extents
4218 * within the same cluster:
4219 * |------------- cluster # N-------------|
4220 * |----- ex -----| |---- ex_right ----|
4221 * |------ requested region ------|
4222 * |================|
4224 * In each of the above cases, we need to set the map->m_pblk and
4225 * map->m_len so it corresponds to the return the extent labelled as
4226 * "|====|" from cluster #N, since it is already in use for data in
4227 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
4228 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
4229 * as a new "allocated" block region. Otherwise, we will return 0 and
4230 * ext4_ext_map_blocks() will then allocate one or more new clusters
4231 * by calling ext4_mb_new_blocks().
4233 static int get_implied_cluster_alloc(struct super_block
*sb
,
4234 struct ext4_map_blocks
*map
,
4235 struct ext4_extent
*ex
,
4236 struct ext4_ext_path
*path
)
4238 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4239 ext4_lblk_t c_offset
= EXT4_LBLK_COFF(sbi
, map
->m_lblk
);
4240 ext4_lblk_t ex_cluster_start
, ex_cluster_end
;
4241 ext4_lblk_t rr_cluster_start
;
4242 ext4_lblk_t ee_block
= le32_to_cpu(ex
->ee_block
);
4243 ext4_fsblk_t ee_start
= ext4_ext_pblock(ex
);
4244 unsigned short ee_len
= ext4_ext_get_actual_len(ex
);
4246 /* The extent passed in that we are trying to match */
4247 ex_cluster_start
= EXT4_B2C(sbi
, ee_block
);
4248 ex_cluster_end
= EXT4_B2C(sbi
, ee_block
+ ee_len
- 1);
4250 /* The requested region passed into ext4_map_blocks() */
4251 rr_cluster_start
= EXT4_B2C(sbi
, map
->m_lblk
);
4253 if ((rr_cluster_start
== ex_cluster_end
) ||
4254 (rr_cluster_start
== ex_cluster_start
)) {
4255 if (rr_cluster_start
== ex_cluster_end
)
4256 ee_start
+= ee_len
- 1;
4257 map
->m_pblk
= EXT4_PBLK_CMASK(sbi
, ee_start
) + c_offset
;
4258 map
->m_len
= min(map
->m_len
,
4259 (unsigned) sbi
->s_cluster_ratio
- c_offset
);
4261 * Check for and handle this case:
4263 * |--------- cluster # N-------------|
4264 * |------- extent ----|
4265 * |--- requested region ---|
4269 if (map
->m_lblk
< ee_block
)
4270 map
->m_len
= min(map
->m_len
, ee_block
- map
->m_lblk
);
4273 * Check for the case where there is already another allocated
4274 * block to the right of 'ex' but before the end of the cluster.
4276 * |------------- cluster # N-------------|
4277 * |----- ex -----| |---- ex_right ----|
4278 * |------ requested region ------|
4279 * |================|
4281 if (map
->m_lblk
> ee_block
) {
4282 ext4_lblk_t next
= ext4_ext_next_allocated_block(path
);
4283 map
->m_len
= min(map
->m_len
, next
- map
->m_lblk
);
4286 trace_ext4_get_implied_cluster_alloc_exit(sb
, map
, 1);
4290 trace_ext4_get_implied_cluster_alloc_exit(sb
, map
, 0);
4296 * Block allocation/map/preallocation routine for extents based files
4299 * Need to be called with
4300 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4301 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4303 * return > 0, number of of blocks already mapped/allocated
4304 * if create == 0 and these are pre-allocated blocks
4305 * buffer head is unmapped
4306 * otherwise blocks are mapped
4308 * return = 0, if plain look up failed (blocks have not been allocated)
4309 * buffer head is unmapped
4311 * return < 0, error case.
4313 int ext4_ext_map_blocks(handle_t
*handle
, struct inode
*inode
,
4314 struct ext4_map_blocks
*map
, int flags
)
4316 struct ext4_ext_path
*path
= NULL
;
4317 struct ext4_extent newex
, *ex
, *ex2
;
4318 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
4319 ext4_fsblk_t newblock
= 0;
4320 int free_on_err
= 0, err
= 0, depth
, ret
;
4321 unsigned int allocated
= 0, offset
= 0;
4322 unsigned int allocated_clusters
= 0;
4323 struct ext4_allocation_request ar
;
4324 ext4_io_end_t
*io
= ext4_inode_aio(inode
);
4325 ext4_lblk_t cluster_offset
;
4326 int set_unwritten
= 0;
4327 bool map_from_cluster
= false;
4329 ext_debug("blocks %u/%u requested for inode %lu\n",
4330 map
->m_lblk
, map
->m_len
, inode
->i_ino
);
4331 trace_ext4_ext_map_blocks_enter(inode
, map
->m_lblk
, map
->m_len
, flags
);
4333 /* find extent for this block */
4334 path
= ext4_find_extent(inode
, map
->m_lblk
, NULL
, 0);
4336 err
= PTR_ERR(path
);
4341 depth
= ext_depth(inode
);
4344 * consistent leaf must not be empty;
4345 * this situation is possible, though, _during_ tree modification;
4346 * this is why assert can't be put in ext4_find_extent()
4348 if (unlikely(path
[depth
].p_ext
== NULL
&& depth
!= 0)) {
4349 EXT4_ERROR_INODE(inode
, "bad extent address "
4350 "lblock: %lu, depth: %d pblock %lld",
4351 (unsigned long) map
->m_lblk
, depth
,
4352 path
[depth
].p_block
);
4353 err
= -EFSCORRUPTED
;
4357 ex
= path
[depth
].p_ext
;
4359 ext4_lblk_t ee_block
= le32_to_cpu(ex
->ee_block
);
4360 ext4_fsblk_t ee_start
= ext4_ext_pblock(ex
);
4361 unsigned short ee_len
;
4365 * unwritten extents are treated as holes, except that
4366 * we split out initialized portions during a write.
4368 ee_len
= ext4_ext_get_actual_len(ex
);
4370 trace_ext4_ext_show_extent(inode
, ee_block
, ee_start
, ee_len
);
4372 /* if found extent covers block, simply return it */
4373 if (in_range(map
->m_lblk
, ee_block
, ee_len
)) {
4374 newblock
= map
->m_lblk
- ee_block
+ ee_start
;
4375 /* number of remaining blocks in the extent */
4376 allocated
= ee_len
- (map
->m_lblk
- ee_block
);
4377 ext_debug("%u fit into %u:%d -> %llu\n", map
->m_lblk
,
4378 ee_block
, ee_len
, newblock
);
4381 * If the extent is initialized check whether the
4382 * caller wants to convert it to unwritten.
4384 if ((!ext4_ext_is_unwritten(ex
)) &&
4385 (flags
& EXT4_GET_BLOCKS_CONVERT_UNWRITTEN
)) {
4386 allocated
= convert_initialized_extent(
4387 handle
, inode
, map
, &path
,
4388 flags
, allocated
, newblock
);
4390 } else if (!ext4_ext_is_unwritten(ex
))
4393 ret
= ext4_ext_handle_unwritten_extents(
4394 handle
, inode
, map
, &path
, flags
,
4395 allocated
, newblock
);
4405 * requested block isn't allocated yet;
4406 * we couldn't try to create block if create flag is zero
4408 if ((flags
& EXT4_GET_BLOCKS_CREATE
) == 0) {
4410 * put just found gap into cache to speed up
4411 * subsequent requests
4413 ext4_ext_put_gap_in_cache(inode
, path
, map
->m_lblk
);
4418 * Okay, we need to do block allocation.
4420 newex
.ee_block
= cpu_to_le32(map
->m_lblk
);
4421 cluster_offset
= EXT4_LBLK_COFF(sbi
, map
->m_lblk
);
4424 * If we are doing bigalloc, check to see if the extent returned
4425 * by ext4_find_extent() implies a cluster we can use.
4427 if (cluster_offset
&& ex
&&
4428 get_implied_cluster_alloc(inode
->i_sb
, map
, ex
, path
)) {
4429 ar
.len
= allocated
= map
->m_len
;
4430 newblock
= map
->m_pblk
;
4431 map_from_cluster
= true;
4432 goto got_allocated_blocks
;
4435 /* find neighbour allocated blocks */
4436 ar
.lleft
= map
->m_lblk
;
4437 err
= ext4_ext_search_left(inode
, path
, &ar
.lleft
, &ar
.pleft
);
4440 ar
.lright
= map
->m_lblk
;
4442 err
= ext4_ext_search_right(inode
, path
, &ar
.lright
, &ar
.pright
, &ex2
);
4446 /* Check if the extent after searching to the right implies a
4447 * cluster we can use. */
4448 if ((sbi
->s_cluster_ratio
> 1) && ex2
&&
4449 get_implied_cluster_alloc(inode
->i_sb
, map
, ex2
, path
)) {
4450 ar
.len
= allocated
= map
->m_len
;
4451 newblock
= map
->m_pblk
;
4452 map_from_cluster
= true;
4453 goto got_allocated_blocks
;
4457 * See if request is beyond maximum number of blocks we can have in
4458 * a single extent. For an initialized extent this limit is
4459 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
4460 * EXT_UNWRITTEN_MAX_LEN.
4462 if (map
->m_len
> EXT_INIT_MAX_LEN
&&
4463 !(flags
& EXT4_GET_BLOCKS_UNWRIT_EXT
))
4464 map
->m_len
= EXT_INIT_MAX_LEN
;
4465 else if (map
->m_len
> EXT_UNWRITTEN_MAX_LEN
&&
4466 (flags
& EXT4_GET_BLOCKS_UNWRIT_EXT
))
4467 map
->m_len
= EXT_UNWRITTEN_MAX_LEN
;
4469 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4470 newex
.ee_len
= cpu_to_le16(map
->m_len
);
4471 err
= ext4_ext_check_overlap(sbi
, inode
, &newex
, path
);
4473 allocated
= ext4_ext_get_actual_len(&newex
);
4475 allocated
= map
->m_len
;
4477 /* allocate new block */
4479 ar
.goal
= ext4_ext_find_goal(inode
, path
, map
->m_lblk
);
4480 ar
.logical
= map
->m_lblk
;
4482 * We calculate the offset from the beginning of the cluster
4483 * for the logical block number, since when we allocate a
4484 * physical cluster, the physical block should start at the
4485 * same offset from the beginning of the cluster. This is
4486 * needed so that future calls to get_implied_cluster_alloc()
4489 offset
= EXT4_LBLK_COFF(sbi
, map
->m_lblk
);
4490 ar
.len
= EXT4_NUM_B2C(sbi
, offset
+allocated
);
4492 ar
.logical
-= offset
;
4493 if (S_ISREG(inode
->i_mode
))
4494 ar
.flags
= EXT4_MB_HINT_DATA
;
4496 /* disable in-core preallocation for non-regular files */
4498 if (flags
& EXT4_GET_BLOCKS_NO_NORMALIZE
)
4499 ar
.flags
|= EXT4_MB_HINT_NOPREALLOC
;
4500 if (flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
)
4501 ar
.flags
|= EXT4_MB_DELALLOC_RESERVED
;
4502 if (flags
& EXT4_GET_BLOCKS_METADATA_NOFAIL
)
4503 ar
.flags
|= EXT4_MB_USE_RESERVED
;
4504 newblock
= ext4_mb_new_blocks(handle
, &ar
, &err
);
4507 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4508 ar
.goal
, newblock
, allocated
);
4510 allocated_clusters
= ar
.len
;
4511 ar
.len
= EXT4_C2B(sbi
, ar
.len
) - offset
;
4512 if (ar
.len
> allocated
)
4515 got_allocated_blocks
:
4516 /* try to insert new extent into found leaf and return */
4517 ext4_ext_store_pblock(&newex
, newblock
+ offset
);
4518 newex
.ee_len
= cpu_to_le16(ar
.len
);
4519 /* Mark unwritten */
4520 if (flags
& EXT4_GET_BLOCKS_UNWRIT_EXT
){
4521 ext4_ext_mark_unwritten(&newex
);
4522 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
4524 * io_end structure was created for every IO write to an
4525 * unwritten extent. To avoid unnecessary conversion,
4526 * here we flag the IO that really needs the conversion.
4527 * For non asycn direct IO case, flag the inode state
4528 * that we need to perform conversion when IO is done.
4530 if (flags
& EXT4_GET_BLOCKS_PRE_IO
)
4535 if ((flags
& EXT4_GET_BLOCKS_KEEP_SIZE
) == 0)
4536 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
,
4539 err
= ext4_ext_insert_extent(handle
, inode
, &path
,
4542 if (!err
&& set_unwritten
) {
4544 ext4_set_io_unwritten_flag(inode
, io
);
4546 ext4_set_inode_state(inode
,
4547 EXT4_STATE_DIO_UNWRITTEN
);
4550 if (err
&& free_on_err
) {
4551 int fb_flags
= flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
?
4552 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE
: 0;
4553 /* free data blocks we just allocated */
4554 /* not a good idea to call discard here directly,
4555 * but otherwise we'd need to call it every free() */
4556 ext4_discard_preallocations(inode
);
4557 ext4_free_blocks(handle
, inode
, NULL
, newblock
,
4558 EXT4_C2B(sbi
, allocated_clusters
), fb_flags
);
4562 /* previous routine could use block we allocated */
4563 newblock
= ext4_ext_pblock(&newex
);
4564 allocated
= ext4_ext_get_actual_len(&newex
);
4565 if (allocated
> map
->m_len
)
4566 allocated
= map
->m_len
;
4567 map
->m_flags
|= EXT4_MAP_NEW
;
4570 * Update reserved blocks/metadata blocks after successful
4571 * block allocation which had been deferred till now.
4573 if (flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
) {
4574 unsigned int reserved_clusters
;
4576 * Check how many clusters we had reserved this allocated range
4578 reserved_clusters
= get_reserved_cluster_alloc(inode
,
4579 map
->m_lblk
, allocated
);
4580 if (!map_from_cluster
) {
4581 BUG_ON(allocated_clusters
< reserved_clusters
);
4582 if (reserved_clusters
< allocated_clusters
) {
4583 struct ext4_inode_info
*ei
= EXT4_I(inode
);
4584 int reservation
= allocated_clusters
-
4587 * It seems we claimed few clusters outside of
4588 * the range of this allocation. We should give
4589 * it back to the reservation pool. This can
4590 * happen in the following case:
4592 * * Suppose s_cluster_ratio is 4 (i.e., each
4593 * cluster has 4 blocks. Thus, the clusters
4594 * are [0-3],[4-7],[8-11]...
4595 * * First comes delayed allocation write for
4596 * logical blocks 10 & 11. Since there were no
4597 * previous delayed allocated blocks in the
4598 * range [8-11], we would reserve 1 cluster
4600 * * Next comes write for logical blocks 3 to 8.
4601 * In this case, we will reserve 2 clusters
4602 * (for [0-3] and [4-7]; and not for [8-11] as
4603 * that range has a delayed allocated blocks.
4604 * Thus total reserved clusters now becomes 3.
4605 * * Now, during the delayed allocation writeout
4606 * time, we will first write blocks [3-8] and
4607 * allocate 3 clusters for writing these
4608 * blocks. Also, we would claim all these
4609 * three clusters above.
4610 * * Now when we come here to writeout the
4611 * blocks [10-11], we would expect to claim
4612 * the reservation of 1 cluster we had made
4613 * (and we would claim it since there are no
4614 * more delayed allocated blocks in the range
4615 * [8-11]. But our reserved cluster count had
4616 * already gone to 0.
4618 * Thus, at the step 4 above when we determine
4619 * that there are still some unwritten delayed
4620 * allocated blocks outside of our current
4621 * block range, we should increment the
4622 * reserved clusters count so that when the
4623 * remaining blocks finally gets written, we
4626 dquot_reserve_block(inode
,
4627 EXT4_C2B(sbi
, reservation
));
4628 spin_lock(&ei
->i_block_reservation_lock
);
4629 ei
->i_reserved_data_blocks
+= reservation
;
4630 spin_unlock(&ei
->i_block_reservation_lock
);
4633 * We will claim quota for all newly allocated blocks.
4634 * We're updating the reserved space *after* the
4635 * correction above so we do not accidentally free
4636 * all the metadata reservation because we might
4637 * actually need it later on.
4639 ext4_da_update_reserve_space(inode
, allocated_clusters
,
4645 * Cache the extent and update transaction to commit on fdatasync only
4646 * when it is _not_ an unwritten extent.
4648 if ((flags
& EXT4_GET_BLOCKS_UNWRIT_EXT
) == 0)
4649 ext4_update_inode_fsync_trans(handle
, inode
, 1);
4651 ext4_update_inode_fsync_trans(handle
, inode
, 0);
4653 if (allocated
> map
->m_len
)
4654 allocated
= map
->m_len
;
4655 ext4_ext_show_leaf(inode
, path
);
4656 map
->m_flags
|= EXT4_MAP_MAPPED
;
4657 map
->m_pblk
= newblock
;
4658 map
->m_len
= allocated
;
4660 ext4_ext_drop_refs(path
);
4663 trace_ext4_ext_map_blocks_exit(inode
, flags
, map
,
4664 err
? err
: allocated
);
4665 return err
? err
: allocated
;
4668 void ext4_ext_truncate(handle_t
*handle
, struct inode
*inode
)
4670 struct super_block
*sb
= inode
->i_sb
;
4671 ext4_lblk_t last_block
;
4675 * TODO: optimization is possible here.
4676 * Probably we need not scan at all,
4677 * because page truncation is enough.
4680 /* we have to know where to truncate from in crash case */
4681 EXT4_I(inode
)->i_disksize
= inode
->i_size
;
4682 ext4_mark_inode_dirty(handle
, inode
);
4684 last_block
= (inode
->i_size
+ sb
->s_blocksize
- 1)
4685 >> EXT4_BLOCK_SIZE_BITS(sb
);
4687 err
= ext4_es_remove_extent(inode
, last_block
,
4688 EXT_MAX_BLOCKS
- last_block
);
4689 if (err
== -ENOMEM
) {
4691 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
4695 ext4_std_error(inode
->i_sb
, err
);
4698 err
= ext4_ext_remove_space(inode
, last_block
, EXT_MAX_BLOCKS
- 1);
4699 ext4_std_error(inode
->i_sb
, err
);
4702 static int ext4_alloc_file_blocks(struct file
*file
, ext4_lblk_t offset
,
4703 ext4_lblk_t len
, loff_t new_size
,
4704 int flags
, int mode
)
4706 struct inode
*inode
= file_inode(file
);
4712 struct ext4_map_blocks map
;
4713 unsigned int credits
;
4716 map
.m_lblk
= offset
;
4719 * Don't normalize the request if it can fit in one extent so
4720 * that it doesn't get unnecessarily split into multiple
4723 if (len
<= EXT_UNWRITTEN_MAX_LEN
)
4724 flags
|= EXT4_GET_BLOCKS_NO_NORMALIZE
;
4727 * credits to insert 1 extent into extent tree
4729 credits
= ext4_chunk_trans_blocks(inode
, len
);
4731 * We can only call ext_depth() on extent based inodes
4733 if (ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))
4734 depth
= ext_depth(inode
);
4739 while (ret
>= 0 && len
) {
4741 * Recalculate credits when extent tree depth changes.
4743 if (depth
>= 0 && depth
!= ext_depth(inode
)) {
4744 credits
= ext4_chunk_trans_blocks(inode
, len
);
4745 depth
= ext_depth(inode
);
4748 handle
= ext4_journal_start(inode
, EXT4_HT_MAP_BLOCKS
,
4750 if (IS_ERR(handle
)) {
4751 ret
= PTR_ERR(handle
);
4754 ret
= ext4_map_blocks(handle
, inode
, &map
, flags
);
4756 ext4_debug("inode #%lu: block %u: len %u: "
4757 "ext4_ext_map_blocks returned %d",
4758 inode
->i_ino
, map
.m_lblk
,
4760 ext4_mark_inode_dirty(handle
, inode
);
4761 ret2
= ext4_journal_stop(handle
);
4765 map
.m_len
= len
= len
- ret
;
4766 epos
= (loff_t
)map
.m_lblk
<< inode
->i_blkbits
;
4767 inode
->i_ctime
= ext4_current_time(inode
);
4769 if (epos
> new_size
)
4771 if (ext4_update_inode_size(inode
, epos
) & 0x1)
4772 inode
->i_mtime
= inode
->i_ctime
;
4774 if (epos
> inode
->i_size
)
4775 ext4_set_inode_flag(inode
,
4776 EXT4_INODE_EOFBLOCKS
);
4778 ext4_mark_inode_dirty(handle
, inode
);
4779 ext4_update_inode_fsync_trans(handle
, inode
, 1);
4780 ret2
= ext4_journal_stop(handle
);
4784 if (ret
== -ENOSPC
&&
4785 ext4_should_retry_alloc(inode
->i_sb
, &retries
)) {
4790 return ret
> 0 ? ret2
: ret
;
4793 static long ext4_zero_range(struct file
*file
, loff_t offset
,
4794 loff_t len
, int mode
)
4796 struct inode
*inode
= file_inode(file
);
4797 handle_t
*handle
= NULL
;
4798 unsigned int max_blocks
;
4799 loff_t new_size
= 0;
4803 int partial_begin
, partial_end
;
4806 unsigned int blkbits
= inode
->i_blkbits
;
4808 trace_ext4_zero_range(inode
, offset
, len
, mode
);
4810 if (!S_ISREG(inode
->i_mode
))
4813 /* Call ext4_force_commit to flush all data in case of data=journal. */
4814 if (ext4_should_journal_data(inode
)) {
4815 ret
= ext4_force_commit(inode
->i_sb
);
4821 * Round up offset. This is not fallocate, we neet to zero out
4822 * blocks, so convert interior block aligned part of the range to
4823 * unwritten and possibly manually zero out unaligned parts of the
4826 start
= round_up(offset
, 1 << blkbits
);
4827 end
= round_down((offset
+ len
), 1 << blkbits
);
4829 if (start
< offset
|| end
> offset
+ len
)
4831 partial_begin
= offset
& ((1 << blkbits
) - 1);
4832 partial_end
= (offset
+ len
) & ((1 << blkbits
) - 1);
4834 lblk
= start
>> blkbits
;
4835 max_blocks
= (end
>> blkbits
);
4836 if (max_blocks
< lblk
)
4841 mutex_lock(&inode
->i_mutex
);
4844 * Indirect files do not support unwritten extnets
4846 if (!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))) {
4851 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
4852 (offset
+ len
> i_size_read(inode
) ||
4853 offset
+ len
> EXT4_I(inode
)->i_disksize
)) {
4854 new_size
= offset
+ len
;
4855 ret
= inode_newsize_ok(inode
, new_size
);
4860 flags
= EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT
;
4861 if (mode
& FALLOC_FL_KEEP_SIZE
)
4862 flags
|= EXT4_GET_BLOCKS_KEEP_SIZE
;
4864 /* Wait all existing dio workers, newcomers will block on i_mutex */
4865 ext4_inode_block_unlocked_dio(inode
);
4866 inode_dio_wait(inode
);
4868 /* Preallocate the range including the unaligned edges */
4869 if (partial_begin
|| partial_end
) {
4870 ret
= ext4_alloc_file_blocks(file
,
4871 round_down(offset
, 1 << blkbits
) >> blkbits
,
4872 (round_up((offset
+ len
), 1 << blkbits
) -
4873 round_down(offset
, 1 << blkbits
)) >> blkbits
,
4874 new_size
, flags
, mode
);
4880 /* Zero range excluding the unaligned edges */
4881 if (max_blocks
> 0) {
4882 flags
|= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN
|
4886 * Prevent page faults from reinstantiating pages we have
4887 * released from page cache.
4889 down_write(&EXT4_I(inode
)->i_mmap_sem
);
4890 ret
= ext4_update_disksize_before_punch(inode
, offset
, len
);
4892 up_write(&EXT4_I(inode
)->i_mmap_sem
);
4895 /* Now release the pages and zero block aligned part of pages */
4896 truncate_pagecache_range(inode
, start
, end
- 1);
4897 inode
->i_mtime
= inode
->i_ctime
= ext4_current_time(inode
);
4899 ret
= ext4_alloc_file_blocks(file
, lblk
, max_blocks
, new_size
,
4901 up_write(&EXT4_I(inode
)->i_mmap_sem
);
4905 if (!partial_begin
&& !partial_end
)
4909 * In worst case we have to writeout two nonadjacent unwritten
4910 * blocks and update the inode
4912 credits
= (2 * ext4_ext_index_trans_blocks(inode
, 2)) + 1;
4913 if (ext4_should_journal_data(inode
))
4915 handle
= ext4_journal_start(inode
, EXT4_HT_MISC
, credits
);
4916 if (IS_ERR(handle
)) {
4917 ret
= PTR_ERR(handle
);
4918 ext4_std_error(inode
->i_sb
, ret
);
4922 inode
->i_mtime
= inode
->i_ctime
= ext4_current_time(inode
);
4924 ext4_update_inode_size(inode
, new_size
);
4927 * Mark that we allocate beyond EOF so the subsequent truncate
4928 * can proceed even if the new size is the same as i_size.
4930 if ((offset
+ len
) > i_size_read(inode
))
4931 ext4_set_inode_flag(inode
, EXT4_INODE_EOFBLOCKS
);
4933 ext4_mark_inode_dirty(handle
, inode
);
4935 /* Zero out partial block at the edges of the range */
4936 ret
= ext4_zero_partial_blocks(handle
, inode
, offset
, len
);
4938 ext4_update_inode_fsync_trans(handle
, inode
, 1);
4940 if (file
->f_flags
& O_SYNC
)
4941 ext4_handle_sync(handle
);
4943 ext4_journal_stop(handle
);
4945 ext4_inode_resume_unlocked_dio(inode
);
4947 mutex_unlock(&inode
->i_mutex
);
4952 * preallocate space for a file. This implements ext4's fallocate file
4953 * operation, which gets called from sys_fallocate system call.
4954 * For block-mapped files, posix_fallocate should fall back to the method
4955 * of writing zeroes to the required new blocks (the same behavior which is
4956 * expected for file systems which do not support fallocate() system call).
4958 long ext4_fallocate(struct file
*file
, int mode
, loff_t offset
, loff_t len
)
4960 struct inode
*inode
= file_inode(file
);
4961 loff_t new_size
= 0;
4962 unsigned int max_blocks
;
4966 unsigned int blkbits
= inode
->i_blkbits
;
4969 * Encrypted inodes can't handle collapse range or insert
4970 * range since we would need to re-encrypt blocks with a
4971 * different IV or XTS tweak (which are based on the logical
4974 * XXX It's not clear why zero range isn't working, but we'll
4975 * leave it disabled for encrypted inodes for now. This is a
4976 * bug we should fix....
4978 if (ext4_encrypted_inode(inode
) &&
4979 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
|
4980 FALLOC_FL_ZERO_RANGE
)))
4983 /* Return error if mode is not supported */
4984 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
4985 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
4986 FALLOC_FL_INSERT_RANGE
))
4989 if (mode
& FALLOC_FL_PUNCH_HOLE
)
4990 return ext4_punch_hole(inode
, offset
, len
);
4992 ret
= ext4_convert_inline_data(inode
);
4996 if (mode
& FALLOC_FL_COLLAPSE_RANGE
)
4997 return ext4_collapse_range(inode
, offset
, len
);
4999 if (mode
& FALLOC_FL_INSERT_RANGE
)
5000 return ext4_insert_range(inode
, offset
, len
);
5002 if (mode
& FALLOC_FL_ZERO_RANGE
)
5003 return ext4_zero_range(file
, offset
, len
, mode
);
5005 trace_ext4_fallocate_enter(inode
, offset
, len
, mode
);
5006 lblk
= offset
>> blkbits
;
5008 * We can't just convert len to max_blocks because
5009 * If blocksize = 4096 offset = 3072 and len = 2048
5011 max_blocks
= (EXT4_BLOCK_ALIGN(len
+ offset
, blkbits
) >> blkbits
)
5014 flags
= EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT
;
5015 if (mode
& FALLOC_FL_KEEP_SIZE
)
5016 flags
|= EXT4_GET_BLOCKS_KEEP_SIZE
;
5018 mutex_lock(&inode
->i_mutex
);
5021 * We only support preallocation for extent-based files only
5023 if (!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))) {
5028 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
5029 (offset
+ len
> i_size_read(inode
) ||
5030 offset
+ len
> EXT4_I(inode
)->i_disksize
)) {
5031 new_size
= offset
+ len
;
5032 ret
= inode_newsize_ok(inode
, new_size
);
5037 /* Wait all existing dio workers, newcomers will block on i_mutex */
5038 ext4_inode_block_unlocked_dio(inode
);
5039 inode_dio_wait(inode
);
5041 ret
= ext4_alloc_file_blocks(file
, lblk
, max_blocks
, new_size
,
5043 ext4_inode_resume_unlocked_dio(inode
);
5047 if (file
->f_flags
& O_SYNC
&& EXT4_SB(inode
->i_sb
)->s_journal
) {
5048 ret
= jbd2_complete_transaction(EXT4_SB(inode
->i_sb
)->s_journal
,
5049 EXT4_I(inode
)->i_sync_tid
);
5052 mutex_unlock(&inode
->i_mutex
);
5053 trace_ext4_fallocate_exit(inode
, offset
, max_blocks
, ret
);
5058 * This function convert a range of blocks to written extents
5059 * The caller of this function will pass the start offset and the size.
5060 * all unwritten extents within this range will be converted to
5063 * This function is called from the direct IO end io call back
5064 * function, to convert the fallocated extents after IO is completed.
5065 * Returns 0 on success.
5067 int ext4_convert_unwritten_extents(handle_t
*handle
, struct inode
*inode
,
5068 loff_t offset
, ssize_t len
)
5070 unsigned int max_blocks
;
5073 struct ext4_map_blocks map
;
5074 unsigned int credits
, blkbits
= inode
->i_blkbits
;
5076 map
.m_lblk
= offset
>> blkbits
;
5078 * We can't just convert len to max_blocks because
5079 * If blocksize = 4096 offset = 3072 and len = 2048
5081 max_blocks
= ((EXT4_BLOCK_ALIGN(len
+ offset
, blkbits
) >> blkbits
) -
5084 * This is somewhat ugly but the idea is clear: When transaction is
5085 * reserved, everything goes into it. Otherwise we rather start several
5086 * smaller transactions for conversion of each extent separately.
5089 handle
= ext4_journal_start_reserved(handle
,
5090 EXT4_HT_EXT_CONVERT
);
5092 return PTR_ERR(handle
);
5096 * credits to insert 1 extent into extent tree
5098 credits
= ext4_chunk_trans_blocks(inode
, max_blocks
);
5100 while (ret
>= 0 && ret
< max_blocks
) {
5102 map
.m_len
= (max_blocks
-= ret
);
5104 handle
= ext4_journal_start(inode
, EXT4_HT_MAP_BLOCKS
,
5106 if (IS_ERR(handle
)) {
5107 ret
= PTR_ERR(handle
);
5111 ret
= ext4_map_blocks(handle
, inode
, &map
,
5112 EXT4_GET_BLOCKS_IO_CONVERT_EXT
);
5114 ext4_warning(inode
->i_sb
,
5115 "inode #%lu: block %u: len %u: "
5116 "ext4_ext_map_blocks returned %d",
5117 inode
->i_ino
, map
.m_lblk
,
5119 ext4_mark_inode_dirty(handle
, inode
);
5121 ret2
= ext4_journal_stop(handle
);
5122 if (ret
<= 0 || ret2
)
5126 ret2
= ext4_journal_stop(handle
);
5127 return ret
> 0 ? ret2
: ret
;
5131 * If newes is not existing extent (newes->ec_pblk equals zero) find
5132 * delayed extent at start of newes and update newes accordingly and
5133 * return start of the next delayed extent.
5135 * If newes is existing extent (newes->ec_pblk is not equal zero)
5136 * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
5137 * extent found. Leave newes unmodified.
5139 static int ext4_find_delayed_extent(struct inode
*inode
,
5140 struct extent_status
*newes
)
5142 struct extent_status es
;
5143 ext4_lblk_t block
, next_del
;
5145 if (newes
->es_pblk
== 0) {
5146 ext4_es_find_delayed_extent_range(inode
, newes
->es_lblk
,
5147 newes
->es_lblk
+ newes
->es_len
- 1, &es
);
5150 * No extent in extent-tree contains block @newes->es_pblk,
5151 * then the block may stay in 1)a hole or 2)delayed-extent.
5157 if (es
.es_lblk
> newes
->es_lblk
) {
5159 newes
->es_len
= min(es
.es_lblk
- newes
->es_lblk
,
5164 newes
->es_len
= es
.es_lblk
+ es
.es_len
- newes
->es_lblk
;
5167 block
= newes
->es_lblk
+ newes
->es_len
;
5168 ext4_es_find_delayed_extent_range(inode
, block
, EXT_MAX_BLOCKS
, &es
);
5170 next_del
= EXT_MAX_BLOCKS
;
5172 next_del
= es
.es_lblk
;
5176 /* fiemap flags we can handle specified here */
5177 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
5179 static int ext4_xattr_fiemap(struct inode
*inode
,
5180 struct fiemap_extent_info
*fieinfo
)
5184 __u32 flags
= FIEMAP_EXTENT_LAST
;
5185 int blockbits
= inode
->i_sb
->s_blocksize_bits
;
5189 if (ext4_test_inode_state(inode
, EXT4_STATE_XATTR
)) {
5190 struct ext4_iloc iloc
;
5191 int offset
; /* offset of xattr in inode */
5193 error
= ext4_get_inode_loc(inode
, &iloc
);
5196 physical
= (__u64
)iloc
.bh
->b_blocknr
<< blockbits
;
5197 offset
= EXT4_GOOD_OLD_INODE_SIZE
+
5198 EXT4_I(inode
)->i_extra_isize
;
5200 length
= EXT4_SB(inode
->i_sb
)->s_inode_size
- offset
;
5201 flags
|= FIEMAP_EXTENT_DATA_INLINE
;
5203 } else { /* external block */
5204 physical
= (__u64
)EXT4_I(inode
)->i_file_acl
<< blockbits
;
5205 length
= inode
->i_sb
->s_blocksize
;
5209 error
= fiemap_fill_next_extent(fieinfo
, 0, physical
,
5211 return (error
< 0 ? error
: 0);
5214 int ext4_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
5215 __u64 start
, __u64 len
)
5217 ext4_lblk_t start_blk
;
5220 if (ext4_has_inline_data(inode
)) {
5223 error
= ext4_inline_data_fiemap(inode
, fieinfo
, &has_inline
,
5230 if (fieinfo
->fi_flags
& FIEMAP_FLAG_CACHE
) {
5231 error
= ext4_ext_precache(inode
);
5236 /* fallback to generic here if not in extents fmt */
5237 if (!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)))
5238 return generic_block_fiemap(inode
, fieinfo
, start
, len
,
5241 if (fiemap_check_flags(fieinfo
, EXT4_FIEMAP_FLAGS
))
5244 if (fieinfo
->fi_flags
& FIEMAP_FLAG_XATTR
) {
5245 error
= ext4_xattr_fiemap(inode
, fieinfo
);
5247 ext4_lblk_t len_blks
;
5250 start_blk
= start
>> inode
->i_sb
->s_blocksize_bits
;
5251 last_blk
= (start
+ len
- 1) >> inode
->i_sb
->s_blocksize_bits
;
5252 if (last_blk
>= EXT_MAX_BLOCKS
)
5253 last_blk
= EXT_MAX_BLOCKS
-1;
5254 len_blks
= ((ext4_lblk_t
) last_blk
) - start_blk
+ 1;
5257 * Walk the extent tree gathering extent information
5258 * and pushing extents back to the user.
5260 error
= ext4_fill_fiemap_extents(inode
, start_blk
,
5268 * Function to access the path buffer for marking it dirty.
5269 * It also checks if there are sufficient credits left in the journal handle
5273 ext4_access_path(handle_t
*handle
, struct inode
*inode
,
5274 struct ext4_ext_path
*path
)
5278 if (!ext4_handle_valid(handle
))
5282 * Check if need to extend journal credits
5283 * 3 for leaf, sb, and inode plus 2 (bmap and group
5284 * descriptor) for each block group; assume two block
5287 if (handle
->h_buffer_credits
< 7) {
5288 credits
= ext4_writepage_trans_blocks(inode
);
5289 err
= ext4_ext_truncate_extend_restart(handle
, inode
, credits
);
5290 /* EAGAIN is success */
5291 if (err
&& err
!= -EAGAIN
)
5295 err
= ext4_ext_get_access(handle
, inode
, path
);
5300 * ext4_ext_shift_path_extents:
5301 * Shift the extents of a path structure lying between path[depth].p_ext
5302 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5303 * if it is right shift or left shift operation.
5306 ext4_ext_shift_path_extents(struct ext4_ext_path
*path
, ext4_lblk_t shift
,
5307 struct inode
*inode
, handle_t
*handle
,
5308 enum SHIFT_DIRECTION SHIFT
)
5311 struct ext4_extent
*ex_start
, *ex_last
;
5313 depth
= path
->p_depth
;
5315 while (depth
>= 0) {
5316 if (depth
== path
->p_depth
) {
5317 ex_start
= path
[depth
].p_ext
;
5319 return -EFSCORRUPTED
;
5321 ex_last
= EXT_LAST_EXTENT(path
[depth
].p_hdr
);
5323 err
= ext4_access_path(handle
, inode
, path
+ depth
);
5327 if (ex_start
== EXT_FIRST_EXTENT(path
[depth
].p_hdr
))
5330 while (ex_start
<= ex_last
) {
5331 if (SHIFT
== SHIFT_LEFT
) {
5332 le32_add_cpu(&ex_start
->ee_block
,
5334 /* Try to merge to the left. */
5336 EXT_FIRST_EXTENT(path
[depth
].p_hdr
))
5338 ext4_ext_try_to_merge_right(inode
,
5339 path
, ex_start
- 1))
5344 le32_add_cpu(&ex_last
->ee_block
, shift
);
5345 ext4_ext_try_to_merge_right(inode
, path
,
5350 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
5354 if (--depth
< 0 || !update
)
5358 /* Update index too */
5359 err
= ext4_access_path(handle
, inode
, path
+ depth
);
5363 if (SHIFT
== SHIFT_LEFT
)
5364 le32_add_cpu(&path
[depth
].p_idx
->ei_block
, -shift
);
5366 le32_add_cpu(&path
[depth
].p_idx
->ei_block
, shift
);
5367 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
5371 /* we are done if current index is not a starting index */
5372 if (path
[depth
].p_idx
!= EXT_FIRST_INDEX(path
[depth
].p_hdr
))
5383 * ext4_ext_shift_extents:
5384 * All the extents which lies in the range from @start to the last allocated
5385 * block for the @inode are shifted either towards left or right (depending
5386 * upon @SHIFT) by @shift blocks.
5387 * On success, 0 is returned, error otherwise.
5390 ext4_ext_shift_extents(struct inode
*inode
, handle_t
*handle
,
5391 ext4_lblk_t start
, ext4_lblk_t shift
,
5392 enum SHIFT_DIRECTION SHIFT
)
5394 struct ext4_ext_path
*path
;
5396 struct ext4_extent
*extent
;
5397 ext4_lblk_t stop
, *iterator
, ex_start
, ex_end
;
5399 /* Let path point to the last extent */
5400 path
= ext4_find_extent(inode
, EXT_MAX_BLOCKS
- 1, NULL
,
5403 return PTR_ERR(path
);
5405 depth
= path
->p_depth
;
5406 extent
= path
[depth
].p_ext
;
5410 stop
= le32_to_cpu(extent
->ee_block
);
5413 * For left shifts, make sure the hole on the left is big enough to
5414 * accommodate the shift. For right shifts, make sure the last extent
5415 * won't be shifted beyond EXT_MAX_BLOCKS.
5417 if (SHIFT
== SHIFT_LEFT
) {
5418 path
= ext4_find_extent(inode
, start
- 1, &path
,
5421 return PTR_ERR(path
);
5422 depth
= path
->p_depth
;
5423 extent
= path
[depth
].p_ext
;
5425 ex_start
= le32_to_cpu(extent
->ee_block
);
5426 ex_end
= le32_to_cpu(extent
->ee_block
) +
5427 ext4_ext_get_actual_len(extent
);
5433 if ((start
== ex_start
&& shift
> ex_start
) ||
5434 (shift
> start
- ex_end
)) {
5439 if (shift
> EXT_MAX_BLOCKS
-
5440 (stop
+ ext4_ext_get_actual_len(extent
))) {
5447 * In case of left shift, iterator points to start and it is increased
5448 * till we reach stop. In case of right shift, iterator points to stop
5449 * and it is decreased till we reach start.
5451 if (SHIFT
== SHIFT_LEFT
)
5457 * Its safe to start updating extents. Start and stop are unsigned, so
5458 * in case of right shift if extent with 0 block is reached, iterator
5459 * becomes NULL to indicate the end of the loop.
5461 while (iterator
&& start
<= stop
) {
5462 path
= ext4_find_extent(inode
, *iterator
, &path
,
5465 return PTR_ERR(path
);
5466 depth
= path
->p_depth
;
5467 extent
= path
[depth
].p_ext
;
5469 EXT4_ERROR_INODE(inode
, "unexpected hole at %lu",
5470 (unsigned long) *iterator
);
5471 return -EFSCORRUPTED
;
5473 if (SHIFT
== SHIFT_LEFT
&& *iterator
>
5474 le32_to_cpu(extent
->ee_block
)) {
5475 /* Hole, move to the next extent */
5476 if (extent
< EXT_LAST_EXTENT(path
[depth
].p_hdr
)) {
5477 path
[depth
].p_ext
++;
5479 *iterator
= ext4_ext_next_allocated_block(path
);
5484 if (SHIFT
== SHIFT_LEFT
) {
5485 extent
= EXT_LAST_EXTENT(path
[depth
].p_hdr
);
5486 *iterator
= le32_to_cpu(extent
->ee_block
) +
5487 ext4_ext_get_actual_len(extent
);
5489 extent
= EXT_FIRST_EXTENT(path
[depth
].p_hdr
);
5490 if (le32_to_cpu(extent
->ee_block
) > 0)
5491 *iterator
= le32_to_cpu(extent
->ee_block
) - 1;
5493 /* Beginning is reached, end of the loop */
5495 /* Update path extent in case we need to stop */
5496 while (le32_to_cpu(extent
->ee_block
) < start
)
5498 path
[depth
].p_ext
= extent
;
5500 ret
= ext4_ext_shift_path_extents(path
, shift
, inode
,
5506 ext4_ext_drop_refs(path
);
5512 * ext4_collapse_range:
5513 * This implements the fallocate's collapse range functionality for ext4
5514 * Returns: 0 and non-zero on error.
5516 int ext4_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
5518 struct super_block
*sb
= inode
->i_sb
;
5519 ext4_lblk_t punch_start
, punch_stop
;
5521 unsigned int credits
;
5522 loff_t new_size
, ioffset
;
5526 * We need to test this early because xfstests assumes that a
5527 * collapse range of (0, 1) will return EOPNOTSUPP if the file
5528 * system does not support collapse range.
5530 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))
5533 /* Collapse range works only on fs block size aligned offsets. */
5534 if (offset
& (EXT4_CLUSTER_SIZE(sb
) - 1) ||
5535 len
& (EXT4_CLUSTER_SIZE(sb
) - 1))
5538 if (!S_ISREG(inode
->i_mode
))
5541 trace_ext4_collapse_range(inode
, offset
, len
);
5543 punch_start
= offset
>> EXT4_BLOCK_SIZE_BITS(sb
);
5544 punch_stop
= (offset
+ len
) >> EXT4_BLOCK_SIZE_BITS(sb
);
5546 /* Call ext4_force_commit to flush all data in case of data=journal. */
5547 if (ext4_should_journal_data(inode
)) {
5548 ret
= ext4_force_commit(inode
->i_sb
);
5553 mutex_lock(&inode
->i_mutex
);
5555 * There is no need to overlap collapse range with EOF, in which case
5556 * it is effectively a truncate operation
5558 if (offset
+ len
>= i_size_read(inode
)) {
5563 /* Currently just for extent based files */
5564 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)) {
5569 /* Wait for existing dio to complete */
5570 ext4_inode_block_unlocked_dio(inode
);
5571 inode_dio_wait(inode
);
5574 * Prevent page faults from reinstantiating pages we have released from
5577 down_write(&EXT4_I(inode
)->i_mmap_sem
);
5579 * Need to round down offset to be aligned with page size boundary
5580 * for page size > block size.
5582 ioffset
= round_down(offset
, PAGE_SIZE
);
5584 * Write tail of the last page before removed range since it will get
5585 * removed from the page cache below.
5587 ret
= filemap_write_and_wait_range(inode
->i_mapping
, ioffset
, offset
);
5591 * Write data that will be shifted to preserve them when discarding
5592 * page cache below. We are also protected from pages becoming dirty
5595 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
+ len
,
5599 truncate_pagecache(inode
, ioffset
);
5601 credits
= ext4_writepage_trans_blocks(inode
);
5602 handle
= ext4_journal_start(inode
, EXT4_HT_TRUNCATE
, credits
);
5603 if (IS_ERR(handle
)) {
5604 ret
= PTR_ERR(handle
);
5608 down_write(&EXT4_I(inode
)->i_data_sem
);
5609 ext4_discard_preallocations(inode
);
5611 ret
= ext4_es_remove_extent(inode
, punch_start
,
5612 EXT_MAX_BLOCKS
- punch_start
);
5614 up_write(&EXT4_I(inode
)->i_data_sem
);
5618 ret
= ext4_ext_remove_space(inode
, punch_start
, punch_stop
- 1);
5620 up_write(&EXT4_I(inode
)->i_data_sem
);
5623 ext4_discard_preallocations(inode
);
5625 ret
= ext4_ext_shift_extents(inode
, handle
, punch_stop
,
5626 punch_stop
- punch_start
, SHIFT_LEFT
);
5628 up_write(&EXT4_I(inode
)->i_data_sem
);
5632 new_size
= i_size_read(inode
) - len
;
5633 i_size_write(inode
, new_size
);
5634 EXT4_I(inode
)->i_disksize
= new_size
;
5636 up_write(&EXT4_I(inode
)->i_data_sem
);
5638 ext4_handle_sync(handle
);
5639 inode
->i_mtime
= inode
->i_ctime
= ext4_current_time(inode
);
5640 ext4_mark_inode_dirty(handle
, inode
);
5641 ext4_update_inode_fsync_trans(handle
, inode
, 1);
5644 ext4_journal_stop(handle
);
5646 up_write(&EXT4_I(inode
)->i_mmap_sem
);
5647 ext4_inode_resume_unlocked_dio(inode
);
5649 mutex_unlock(&inode
->i_mutex
);
5654 * ext4_insert_range:
5655 * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
5656 * The data blocks starting from @offset to the EOF are shifted by @len
5657 * towards right to create a hole in the @inode. Inode size is increased
5659 * Returns 0 on success, error otherwise.
5661 int ext4_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
5663 struct super_block
*sb
= inode
->i_sb
;
5665 struct ext4_ext_path
*path
;
5666 struct ext4_extent
*extent
;
5667 ext4_lblk_t offset_lblk
, len_lblk
, ee_start_lblk
= 0;
5668 unsigned int credits
, ee_len
;
5669 int ret
= 0, depth
, split_flag
= 0;
5673 * We need to test this early because xfstests assumes that an
5674 * insert range of (0, 1) will return EOPNOTSUPP if the file
5675 * system does not support insert range.
5677 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))
5680 /* Insert range works only on fs block size aligned offsets. */
5681 if (offset
& (EXT4_CLUSTER_SIZE(sb
) - 1) ||
5682 len
& (EXT4_CLUSTER_SIZE(sb
) - 1))
5685 if (!S_ISREG(inode
->i_mode
))
5688 trace_ext4_insert_range(inode
, offset
, len
);
5690 offset_lblk
= offset
>> EXT4_BLOCK_SIZE_BITS(sb
);
5691 len_lblk
= len
>> EXT4_BLOCK_SIZE_BITS(sb
);
5693 /* Call ext4_force_commit to flush all data in case of data=journal */
5694 if (ext4_should_journal_data(inode
)) {
5695 ret
= ext4_force_commit(inode
->i_sb
);
5700 mutex_lock(&inode
->i_mutex
);
5701 /* Currently just for extent based files */
5702 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)) {
5707 /* Check for wrap through zero */
5708 if (inode
->i_size
+ len
> inode
->i_sb
->s_maxbytes
) {
5713 /* Offset should be less than i_size */
5714 if (offset
>= i_size_read(inode
)) {
5719 /* Wait for existing dio to complete */
5720 ext4_inode_block_unlocked_dio(inode
);
5721 inode_dio_wait(inode
);
5724 * Prevent page faults from reinstantiating pages we have released from
5727 down_write(&EXT4_I(inode
)->i_mmap_sem
);
5729 * Need to round down to align start offset to page size boundary
5730 * for page size > block size.
5732 ioffset
= round_down(offset
, PAGE_SIZE
);
5733 /* Write out all dirty pages */
5734 ret
= filemap_write_and_wait_range(inode
->i_mapping
, ioffset
,
5738 truncate_pagecache(inode
, ioffset
);
5740 credits
= ext4_writepage_trans_blocks(inode
);
5741 handle
= ext4_journal_start(inode
, EXT4_HT_TRUNCATE
, credits
);
5742 if (IS_ERR(handle
)) {
5743 ret
= PTR_ERR(handle
);
5747 /* Expand file to avoid data loss if there is error while shifting */
5748 inode
->i_size
+= len
;
5749 EXT4_I(inode
)->i_disksize
+= len
;
5750 inode
->i_mtime
= inode
->i_ctime
= ext4_current_time(inode
);
5751 ret
= ext4_mark_inode_dirty(handle
, inode
);
5755 down_write(&EXT4_I(inode
)->i_data_sem
);
5756 ext4_discard_preallocations(inode
);
5758 path
= ext4_find_extent(inode
, offset_lblk
, NULL
, 0);
5760 up_write(&EXT4_I(inode
)->i_data_sem
);
5764 depth
= ext_depth(inode
);
5765 extent
= path
[depth
].p_ext
;
5767 ee_start_lblk
= le32_to_cpu(extent
->ee_block
);
5768 ee_len
= ext4_ext_get_actual_len(extent
);
5771 * If offset_lblk is not the starting block of extent, split
5772 * the extent @offset_lblk
5774 if ((offset_lblk
> ee_start_lblk
) &&
5775 (offset_lblk
< (ee_start_lblk
+ ee_len
))) {
5776 if (ext4_ext_is_unwritten(extent
))
5777 split_flag
= EXT4_EXT_MARK_UNWRIT1
|
5778 EXT4_EXT_MARK_UNWRIT2
;
5779 ret
= ext4_split_extent_at(handle
, inode
, &path
,
5780 offset_lblk
, split_flag
,
5782 EXT4_GET_BLOCKS_PRE_IO
|
5783 EXT4_GET_BLOCKS_METADATA_NOFAIL
);
5786 ext4_ext_drop_refs(path
);
5789 up_write(&EXT4_I(inode
)->i_data_sem
);
5793 ext4_ext_drop_refs(path
);
5797 ret
= ext4_es_remove_extent(inode
, offset_lblk
,
5798 EXT_MAX_BLOCKS
- offset_lblk
);
5800 up_write(&EXT4_I(inode
)->i_data_sem
);
5805 * if offset_lblk lies in a hole which is at start of file, use
5806 * ee_start_lblk to shift extents
5808 ret
= ext4_ext_shift_extents(inode
, handle
,
5809 ee_start_lblk
> offset_lblk
? ee_start_lblk
: offset_lblk
,
5810 len_lblk
, SHIFT_RIGHT
);
5812 up_write(&EXT4_I(inode
)->i_data_sem
);
5814 ext4_handle_sync(handle
);
5816 ext4_update_inode_fsync_trans(handle
, inode
, 1);
5819 ext4_journal_stop(handle
);
5821 up_write(&EXT4_I(inode
)->i_mmap_sem
);
5822 ext4_inode_resume_unlocked_dio(inode
);
5824 mutex_unlock(&inode
->i_mutex
);
5829 * ext4_swap_extents - Swap extents between two inodes
5831 * @inode1: First inode
5832 * @inode2: Second inode
5833 * @lblk1: Start block for first inode
5834 * @lblk2: Start block for second inode
5835 * @count: Number of blocks to swap
5836 * @mark_unwritten: Mark second inode's extents as unwritten after swap
5837 * @erp: Pointer to save error value
5839 * This helper routine does exactly what is promise "swap extents". All other
5840 * stuff such as page-cache locking consistency, bh mapping consistency or
5841 * extent's data copying must be performed by caller.
5843 * i_mutex is held for both inodes
5844 * i_data_sem is locked for write for both inodes
5846 * All pages from requested range are locked for both inodes
5849 ext4_swap_extents(handle_t
*handle
, struct inode
*inode1
,
5850 struct inode
*inode2
, ext4_lblk_t lblk1
, ext4_lblk_t lblk2
,
5851 ext4_lblk_t count
, int unwritten
, int *erp
)
5853 struct ext4_ext_path
*path1
= NULL
;
5854 struct ext4_ext_path
*path2
= NULL
;
5855 int replaced_count
= 0;
5857 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1
)->i_data_sem
));
5858 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2
)->i_data_sem
));
5859 BUG_ON(!mutex_is_locked(&inode1
->i_mutex
));
5860 BUG_ON(!mutex_is_locked(&inode2
->i_mutex
));
5862 *erp
= ext4_es_remove_extent(inode1
, lblk1
, count
);
5865 *erp
= ext4_es_remove_extent(inode2
, lblk2
, count
);
5870 struct ext4_extent
*ex1
, *ex2
, tmp_ex
;
5871 ext4_lblk_t e1_blk
, e2_blk
;
5872 int e1_len
, e2_len
, len
;
5875 path1
= ext4_find_extent(inode1
, lblk1
, NULL
, EXT4_EX_NOCACHE
);
5876 if (IS_ERR(path1
)) {
5877 *erp
= PTR_ERR(path1
);
5883 path2
= ext4_find_extent(inode2
, lblk2
, NULL
, EXT4_EX_NOCACHE
);
5884 if (IS_ERR(path2
)) {
5885 *erp
= PTR_ERR(path2
);
5889 ex1
= path1
[path1
->p_depth
].p_ext
;
5890 ex2
= path2
[path2
->p_depth
].p_ext
;
5891 /* Do we have somthing to swap ? */
5892 if (unlikely(!ex2
|| !ex1
))
5895 e1_blk
= le32_to_cpu(ex1
->ee_block
);
5896 e2_blk
= le32_to_cpu(ex2
->ee_block
);
5897 e1_len
= ext4_ext_get_actual_len(ex1
);
5898 e2_len
= ext4_ext_get_actual_len(ex2
);
5901 if (!in_range(lblk1
, e1_blk
, e1_len
) ||
5902 !in_range(lblk2
, e2_blk
, e2_len
)) {
5903 ext4_lblk_t next1
, next2
;
5905 /* if hole after extent, then go to next extent */
5906 next1
= ext4_ext_next_allocated_block(path1
);
5907 next2
= ext4_ext_next_allocated_block(path2
);
5908 /* If hole before extent, then shift to that extent */
5913 /* Do we have something to swap */
5914 if (next1
== EXT_MAX_BLOCKS
|| next2
== EXT_MAX_BLOCKS
)
5916 /* Move to the rightest boundary */
5917 len
= next1
- lblk1
;
5918 if (len
< next2
- lblk2
)
5919 len
= next2
- lblk2
;
5928 /* Prepare left boundary */
5929 if (e1_blk
< lblk1
) {
5931 *erp
= ext4_force_split_extent_at(handle
, inode1
,
5936 if (e2_blk
< lblk2
) {
5938 *erp
= ext4_force_split_extent_at(handle
, inode2
,
5943 /* ext4_split_extent_at() may result in leaf extent split,
5944 * path must to be revalidated. */
5948 /* Prepare right boundary */
5950 if (len
> e1_blk
+ e1_len
- lblk1
)
5951 len
= e1_blk
+ e1_len
- lblk1
;
5952 if (len
> e2_blk
+ e2_len
- lblk2
)
5953 len
= e2_blk
+ e2_len
- lblk2
;
5955 if (len
!= e1_len
) {
5957 *erp
= ext4_force_split_extent_at(handle
, inode1
,
5958 &path1
, lblk1
+ len
, 0);
5962 if (len
!= e2_len
) {
5964 *erp
= ext4_force_split_extent_at(handle
, inode2
,
5965 &path2
, lblk2
+ len
, 0);
5969 /* ext4_split_extent_at() may result in leaf extent split,
5970 * path must to be revalidated. */
5974 BUG_ON(e2_len
!= e1_len
);
5975 *erp
= ext4_ext_get_access(handle
, inode1
, path1
+ path1
->p_depth
);
5978 *erp
= ext4_ext_get_access(handle
, inode2
, path2
+ path2
->p_depth
);
5982 /* Both extents are fully inside boundaries. Swap it now */
5984 ext4_ext_store_pblock(ex1
, ext4_ext_pblock(ex2
));
5985 ext4_ext_store_pblock(ex2
, ext4_ext_pblock(&tmp_ex
));
5986 ex1
->ee_len
= cpu_to_le16(e2_len
);
5987 ex2
->ee_len
= cpu_to_le16(e1_len
);
5989 ext4_ext_mark_unwritten(ex2
);
5990 if (ext4_ext_is_unwritten(&tmp_ex
))
5991 ext4_ext_mark_unwritten(ex1
);
5993 ext4_ext_try_to_merge(handle
, inode2
, path2
, ex2
);
5994 ext4_ext_try_to_merge(handle
, inode1
, path1
, ex1
);
5995 *erp
= ext4_ext_dirty(handle
, inode2
, path2
+
5999 *erp
= ext4_ext_dirty(handle
, inode1
, path1
+
6002 * Looks scarry ah..? second inode already points to new blocks,
6003 * and it was successfully dirtied. But luckily error may happen
6004 * only due to journal error, so full transaction will be
6011 replaced_count
+= len
;
6015 ext4_ext_drop_refs(path1
);
6017 ext4_ext_drop_refs(path2
);
6019 path1
= path2
= NULL
;
6021 return replaced_count
;