1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/time.h>
10 #include <linux/init.h>
11 #include <linux/string.h>
12 #include <linux/backing-dev.h>
13 #include <linux/mpage.h>
14 #include <linux/falloc.h>
15 #include <linux/swap.h>
16 #include <linux/writeback.h>
17 #include <linux/compat.h>
18 #include <linux/slab.h>
19 #include <linux/btrfs.h>
20 #include <linux/uio.h>
21 #include <linux/iversion.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "print-tree.h"
31 #include "compression.h"
33 static struct kmem_cache
*btrfs_inode_defrag_cachep
;
35 * when auto defrag is enabled we
36 * queue up these defrag structs to remember which
37 * inodes need defragging passes
40 struct rb_node rb_node
;
44 * transid where the defrag was added, we search for
45 * extents newer than this
52 /* last offset we were able to defrag */
55 /* if we've wrapped around back to zero once already */
59 static int __compare_inode_defrag(struct inode_defrag
*defrag1
,
60 struct inode_defrag
*defrag2
)
62 if (defrag1
->root
> defrag2
->root
)
64 else if (defrag1
->root
< defrag2
->root
)
66 else if (defrag1
->ino
> defrag2
->ino
)
68 else if (defrag1
->ino
< defrag2
->ino
)
74 /* pop a record for an inode into the defrag tree. The lock
75 * must be held already
77 * If you're inserting a record for an older transid than an
78 * existing record, the transid already in the tree is lowered
80 * If an existing record is found the defrag item you
83 static int __btrfs_add_inode_defrag(struct btrfs_inode
*inode
,
84 struct inode_defrag
*defrag
)
86 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
87 struct inode_defrag
*entry
;
89 struct rb_node
*parent
= NULL
;
92 p
= &fs_info
->defrag_inodes
.rb_node
;
95 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
97 ret
= __compare_inode_defrag(defrag
, entry
);
101 p
= &parent
->rb_right
;
103 /* if we're reinserting an entry for
104 * an old defrag run, make sure to
105 * lower the transid of our existing record
107 if (defrag
->transid
< entry
->transid
)
108 entry
->transid
= defrag
->transid
;
109 if (defrag
->last_offset
> entry
->last_offset
)
110 entry
->last_offset
= defrag
->last_offset
;
114 set_bit(BTRFS_INODE_IN_DEFRAG
, &inode
->runtime_flags
);
115 rb_link_node(&defrag
->rb_node
, parent
, p
);
116 rb_insert_color(&defrag
->rb_node
, &fs_info
->defrag_inodes
);
120 static inline int __need_auto_defrag(struct btrfs_fs_info
*fs_info
)
122 if (!btrfs_test_opt(fs_info
, AUTO_DEFRAG
))
125 if (btrfs_fs_closing(fs_info
))
132 * insert a defrag record for this inode if auto defrag is
135 int btrfs_add_inode_defrag(struct btrfs_trans_handle
*trans
,
136 struct btrfs_inode
*inode
)
138 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
139 struct btrfs_root
*root
= inode
->root
;
140 struct inode_defrag
*defrag
;
144 if (!__need_auto_defrag(fs_info
))
147 if (test_bit(BTRFS_INODE_IN_DEFRAG
, &inode
->runtime_flags
))
151 transid
= trans
->transid
;
153 transid
= inode
->root
->last_trans
;
155 defrag
= kmem_cache_zalloc(btrfs_inode_defrag_cachep
, GFP_NOFS
);
159 defrag
->ino
= btrfs_ino(inode
);
160 defrag
->transid
= transid
;
161 defrag
->root
= root
->root_key
.objectid
;
163 spin_lock(&fs_info
->defrag_inodes_lock
);
164 if (!test_bit(BTRFS_INODE_IN_DEFRAG
, &inode
->runtime_flags
)) {
166 * If we set IN_DEFRAG flag and evict the inode from memory,
167 * and then re-read this inode, this new inode doesn't have
168 * IN_DEFRAG flag. At the case, we may find the existed defrag.
170 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
172 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
174 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
176 spin_unlock(&fs_info
->defrag_inodes_lock
);
181 * Requeue the defrag object. If there is a defrag object that points to
182 * the same inode in the tree, we will merge them together (by
183 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
185 static void btrfs_requeue_inode_defrag(struct btrfs_inode
*inode
,
186 struct inode_defrag
*defrag
)
188 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
191 if (!__need_auto_defrag(fs_info
))
195 * Here we don't check the IN_DEFRAG flag, because we need merge
198 spin_lock(&fs_info
->defrag_inodes_lock
);
199 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
200 spin_unlock(&fs_info
->defrag_inodes_lock
);
205 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
209 * pick the defragable inode that we want, if it doesn't exist, we will get
212 static struct inode_defrag
*
213 btrfs_pick_defrag_inode(struct btrfs_fs_info
*fs_info
, u64 root
, u64 ino
)
215 struct inode_defrag
*entry
= NULL
;
216 struct inode_defrag tmp
;
218 struct rb_node
*parent
= NULL
;
224 spin_lock(&fs_info
->defrag_inodes_lock
);
225 p
= fs_info
->defrag_inodes
.rb_node
;
228 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
230 ret
= __compare_inode_defrag(&tmp
, entry
);
234 p
= parent
->rb_right
;
239 if (parent
&& __compare_inode_defrag(&tmp
, entry
) > 0) {
240 parent
= rb_next(parent
);
242 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
248 rb_erase(parent
, &fs_info
->defrag_inodes
);
249 spin_unlock(&fs_info
->defrag_inodes_lock
);
253 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info
*fs_info
)
255 struct inode_defrag
*defrag
;
256 struct rb_node
*node
;
258 spin_lock(&fs_info
->defrag_inodes_lock
);
259 node
= rb_first(&fs_info
->defrag_inodes
);
261 rb_erase(node
, &fs_info
->defrag_inodes
);
262 defrag
= rb_entry(node
, struct inode_defrag
, rb_node
);
263 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
265 cond_resched_lock(&fs_info
->defrag_inodes_lock
);
267 node
= rb_first(&fs_info
->defrag_inodes
);
269 spin_unlock(&fs_info
->defrag_inodes_lock
);
272 #define BTRFS_DEFRAG_BATCH 1024
274 static int __btrfs_run_defrag_inode(struct btrfs_fs_info
*fs_info
,
275 struct inode_defrag
*defrag
)
277 struct btrfs_root
*inode_root
;
279 struct btrfs_key key
;
280 struct btrfs_ioctl_defrag_range_args range
;
286 key
.objectid
= defrag
->root
;
287 key
.type
= BTRFS_ROOT_ITEM_KEY
;
288 key
.offset
= (u64
)-1;
290 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
292 inode_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
293 if (IS_ERR(inode_root
)) {
294 ret
= PTR_ERR(inode_root
);
298 key
.objectid
= defrag
->ino
;
299 key
.type
= BTRFS_INODE_ITEM_KEY
;
301 inode
= btrfs_iget(fs_info
->sb
, &key
, inode_root
, NULL
);
303 ret
= PTR_ERR(inode
);
306 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
308 /* do a chunk of defrag */
309 clear_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
310 memset(&range
, 0, sizeof(range
));
312 range
.start
= defrag
->last_offset
;
314 sb_start_write(fs_info
->sb
);
315 num_defrag
= btrfs_defrag_file(inode
, NULL
, &range
, defrag
->transid
,
317 sb_end_write(fs_info
->sb
);
319 * if we filled the whole defrag batch, there
320 * must be more work to do. Queue this defrag
323 if (num_defrag
== BTRFS_DEFRAG_BATCH
) {
324 defrag
->last_offset
= range
.start
;
325 btrfs_requeue_inode_defrag(BTRFS_I(inode
), defrag
);
326 } else if (defrag
->last_offset
&& !defrag
->cycled
) {
328 * we didn't fill our defrag batch, but
329 * we didn't start at zero. Make sure we loop
330 * around to the start of the file.
332 defrag
->last_offset
= 0;
334 btrfs_requeue_inode_defrag(BTRFS_I(inode
), defrag
);
336 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
342 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
343 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
348 * run through the list of inodes in the FS that need
351 int btrfs_run_defrag_inodes(struct btrfs_fs_info
*fs_info
)
353 struct inode_defrag
*defrag
;
355 u64 root_objectid
= 0;
357 atomic_inc(&fs_info
->defrag_running
);
359 /* Pause the auto defragger. */
360 if (test_bit(BTRFS_FS_STATE_REMOUNTING
,
364 if (!__need_auto_defrag(fs_info
))
367 /* find an inode to defrag */
368 defrag
= btrfs_pick_defrag_inode(fs_info
, root_objectid
,
371 if (root_objectid
|| first_ino
) {
380 first_ino
= defrag
->ino
+ 1;
381 root_objectid
= defrag
->root
;
383 __btrfs_run_defrag_inode(fs_info
, defrag
);
385 atomic_dec(&fs_info
->defrag_running
);
388 * during unmount, we use the transaction_wait queue to
389 * wait for the defragger to stop
391 wake_up(&fs_info
->transaction_wait
);
395 /* simple helper to fault in pages and copy. This should go away
396 * and be replaced with calls into generic code.
398 static noinline
int btrfs_copy_from_user(loff_t pos
, size_t write_bytes
,
399 struct page
**prepared_pages
,
403 size_t total_copied
= 0;
405 int offset
= pos
& (PAGE_SIZE
- 1);
407 while (write_bytes
> 0) {
408 size_t count
= min_t(size_t,
409 PAGE_SIZE
- offset
, write_bytes
);
410 struct page
*page
= prepared_pages
[pg
];
412 * Copy data from userspace to the current page
414 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, count
);
416 /* Flush processor's dcache for this page */
417 flush_dcache_page(page
);
420 * if we get a partial write, we can end up with
421 * partially up to date pages. These add
422 * a lot of complexity, so make sure they don't
423 * happen by forcing this copy to be retried.
425 * The rest of the btrfs_file_write code will fall
426 * back to page at a time copies after we return 0.
428 if (!PageUptodate(page
) && copied
< count
)
431 iov_iter_advance(i
, copied
);
432 write_bytes
-= copied
;
433 total_copied
+= copied
;
435 /* Return to btrfs_file_write_iter to fault page */
436 if (unlikely(copied
== 0))
439 if (copied
< PAGE_SIZE
- offset
) {
450 * unlocks pages after btrfs_file_write is done with them
452 static void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
455 for (i
= 0; i
< num_pages
; i
++) {
456 /* page checked is some magic around finding pages that
457 * have been modified without going through btrfs_set_page_dirty
458 * clear it here. There should be no need to mark the pages
459 * accessed as prepare_pages should have marked them accessed
460 * in prepare_pages via find_or_create_page()
462 ClearPageChecked(pages
[i
]);
463 unlock_page(pages
[i
]);
468 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode
*inode
,
471 struct extent_state
**cached_state
)
473 u64 search_start
= start
;
474 const u64 end
= start
+ len
- 1;
476 while (search_start
< end
) {
477 const u64 search_len
= end
- search_start
+ 1;
478 struct extent_map
*em
;
482 em
= btrfs_get_extent(inode
, NULL
, 0, search_start
,
487 if (em
->block_start
!= EXTENT_MAP_HOLE
)
491 if (em
->start
< search_start
)
492 em_len
-= search_start
- em
->start
;
493 if (em_len
> search_len
)
496 ret
= set_extent_bit(&inode
->io_tree
, search_start
,
497 search_start
+ em_len
- 1,
499 NULL
, cached_state
, GFP_NOFS
);
501 search_start
= extent_map_end(em
);
510 * after copy_from_user, pages need to be dirtied and we need to make
511 * sure holes are created between the current EOF and the start of
512 * any next extents (if required).
514 * this also makes the decision about creating an inline extent vs
515 * doing real data extents, marking pages dirty and delalloc as required.
517 int btrfs_dirty_pages(struct inode
*inode
, struct page
**pages
,
518 size_t num_pages
, loff_t pos
, size_t write_bytes
,
519 struct extent_state
**cached
)
521 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
526 u64 end_of_last_block
;
527 u64 end_pos
= pos
+ write_bytes
;
528 loff_t isize
= i_size_read(inode
);
529 unsigned int extra_bits
= 0;
531 start_pos
= pos
& ~((u64
) fs_info
->sectorsize
- 1);
532 num_bytes
= round_up(write_bytes
+ pos
- start_pos
,
533 fs_info
->sectorsize
);
535 end_of_last_block
= start_pos
+ num_bytes
- 1;
537 if (!btrfs_is_free_space_inode(BTRFS_I(inode
))) {
538 if (start_pos
>= isize
&&
539 !(BTRFS_I(inode
)->flags
& BTRFS_INODE_PREALLOC
)) {
541 * There can't be any extents following eof in this case
542 * so just set the delalloc new bit for the range
545 extra_bits
|= EXTENT_DELALLOC_NEW
;
547 err
= btrfs_find_new_delalloc_bytes(BTRFS_I(inode
),
555 err
= btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
,
556 extra_bits
, cached
, 0);
560 for (i
= 0; i
< num_pages
; i
++) {
561 struct page
*p
= pages
[i
];
568 * we've only changed i_size in ram, and we haven't updated
569 * the disk i_size. There is no need to log the inode
573 i_size_write(inode
, end_pos
);
578 * this drops all the extents in the cache that intersect the range
579 * [start, end]. Existing extents are split as required.
581 void btrfs_drop_extent_cache(struct btrfs_inode
*inode
, u64 start
, u64 end
,
584 struct extent_map
*em
;
585 struct extent_map
*split
= NULL
;
586 struct extent_map
*split2
= NULL
;
587 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
588 u64 len
= end
- start
+ 1;
596 WARN_ON(end
< start
);
597 if (end
== (u64
)-1) {
606 split
= alloc_extent_map();
608 split2
= alloc_extent_map();
609 if (!split
|| !split2
)
612 write_lock(&em_tree
->lock
);
613 em
= lookup_extent_mapping(em_tree
, start
, len
);
615 write_unlock(&em_tree
->lock
);
619 gen
= em
->generation
;
620 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
621 if (testend
&& em
->start
+ em
->len
>= start
+ len
) {
623 write_unlock(&em_tree
->lock
);
626 start
= em
->start
+ em
->len
;
628 len
= start
+ len
- (em
->start
+ em
->len
);
630 write_unlock(&em_tree
->lock
);
633 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
634 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
635 clear_bit(EXTENT_FLAG_LOGGING
, &flags
);
636 modified
= !list_empty(&em
->list
);
640 if (em
->start
< start
) {
641 split
->start
= em
->start
;
642 split
->len
= start
- em
->start
;
644 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
645 split
->orig_start
= em
->orig_start
;
646 split
->block_start
= em
->block_start
;
649 split
->block_len
= em
->block_len
;
651 split
->block_len
= split
->len
;
652 split
->orig_block_len
= max(split
->block_len
,
654 split
->ram_bytes
= em
->ram_bytes
;
656 split
->orig_start
= split
->start
;
657 split
->block_len
= 0;
658 split
->block_start
= em
->block_start
;
659 split
->orig_block_len
= 0;
660 split
->ram_bytes
= split
->len
;
663 split
->generation
= gen
;
664 split
->bdev
= em
->bdev
;
665 split
->flags
= flags
;
666 split
->compress_type
= em
->compress_type
;
667 replace_extent_mapping(em_tree
, em
, split
, modified
);
668 free_extent_map(split
);
672 if (testend
&& em
->start
+ em
->len
> start
+ len
) {
673 u64 diff
= start
+ len
- em
->start
;
675 split
->start
= start
+ len
;
676 split
->len
= em
->start
+ em
->len
- (start
+ len
);
677 split
->bdev
= em
->bdev
;
678 split
->flags
= flags
;
679 split
->compress_type
= em
->compress_type
;
680 split
->generation
= gen
;
682 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
683 split
->orig_block_len
= max(em
->block_len
,
686 split
->ram_bytes
= em
->ram_bytes
;
688 split
->block_len
= em
->block_len
;
689 split
->block_start
= em
->block_start
;
690 split
->orig_start
= em
->orig_start
;
692 split
->block_len
= split
->len
;
693 split
->block_start
= em
->block_start
695 split
->orig_start
= em
->orig_start
;
698 split
->ram_bytes
= split
->len
;
699 split
->orig_start
= split
->start
;
700 split
->block_len
= 0;
701 split
->block_start
= em
->block_start
;
702 split
->orig_block_len
= 0;
705 if (extent_map_in_tree(em
)) {
706 replace_extent_mapping(em_tree
, em
, split
,
709 ret
= add_extent_mapping(em_tree
, split
,
711 ASSERT(ret
== 0); /* Logic error */
713 free_extent_map(split
);
717 if (extent_map_in_tree(em
))
718 remove_extent_mapping(em_tree
, em
);
719 write_unlock(&em_tree
->lock
);
723 /* once for the tree*/
727 free_extent_map(split
);
729 free_extent_map(split2
);
733 * this is very complex, but the basic idea is to drop all extents
734 * in the range start - end. hint_block is filled in with a block number
735 * that would be a good hint to the block allocator for this file.
737 * If an extent intersects the range but is not entirely inside the range
738 * it is either truncated or split. Anything entirely inside the range
739 * is deleted from the tree.
741 int __btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
742 struct btrfs_root
*root
, struct inode
*inode
,
743 struct btrfs_path
*path
, u64 start
, u64 end
,
744 u64
*drop_end
, int drop_cache
,
746 u32 extent_item_size
,
749 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
750 struct extent_buffer
*leaf
;
751 struct btrfs_file_extent_item
*fi
;
752 struct btrfs_key key
;
753 struct btrfs_key new_key
;
754 u64 ino
= btrfs_ino(BTRFS_I(inode
));
755 u64 search_start
= start
;
758 u64 extent_offset
= 0;
760 u64 last_end
= start
;
766 int modify_tree
= -1;
769 int leafs_visited
= 0;
772 btrfs_drop_extent_cache(BTRFS_I(inode
), start
, end
- 1, 0);
774 if (start
>= BTRFS_I(inode
)->disk_i_size
&& !replace_extent
)
777 update_refs
= (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) ||
778 root
== fs_info
->tree_root
);
781 ret
= btrfs_lookup_file_extent(trans
, root
, path
, ino
,
782 search_start
, modify_tree
);
785 if (ret
> 0 && path
->slots
[0] > 0 && search_start
== start
) {
786 leaf
= path
->nodes
[0];
787 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
788 if (key
.objectid
== ino
&&
789 key
.type
== BTRFS_EXTENT_DATA_KEY
)
795 leaf
= path
->nodes
[0];
796 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
798 ret
= btrfs_next_leaf(root
, path
);
806 leaf
= path
->nodes
[0];
810 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
812 if (key
.objectid
> ino
)
814 if (WARN_ON_ONCE(key
.objectid
< ino
) ||
815 key
.type
< BTRFS_EXTENT_DATA_KEY
) {
820 if (key
.type
> BTRFS_EXTENT_DATA_KEY
|| key
.offset
>= end
)
823 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
824 struct btrfs_file_extent_item
);
825 extent_type
= btrfs_file_extent_type(leaf
, fi
);
827 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
828 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
829 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
830 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
831 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
832 extent_end
= key
.offset
+
833 btrfs_file_extent_num_bytes(leaf
, fi
);
834 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
835 extent_end
= key
.offset
+
836 btrfs_file_extent_inline_len(leaf
,
844 * Don't skip extent items representing 0 byte lengths. They
845 * used to be created (bug) if while punching holes we hit
846 * -ENOSPC condition. So if we find one here, just ensure we
847 * delete it, otherwise we would insert a new file extent item
848 * with the same key (offset) as that 0 bytes length file
849 * extent item in the call to setup_items_for_insert() later
852 if (extent_end
== key
.offset
&& extent_end
>= search_start
) {
853 last_end
= extent_end
;
854 goto delete_extent_item
;
857 if (extent_end
<= search_start
) {
863 search_start
= max(key
.offset
, start
);
864 if (recow
|| !modify_tree
) {
866 btrfs_release_path(path
);
871 * | - range to drop - |
872 * | -------- extent -------- |
874 if (start
> key
.offset
&& end
< extent_end
) {
876 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
881 memcpy(&new_key
, &key
, sizeof(new_key
));
882 new_key
.offset
= start
;
883 ret
= btrfs_duplicate_item(trans
, root
, path
,
885 if (ret
== -EAGAIN
) {
886 btrfs_release_path(path
);
892 leaf
= path
->nodes
[0];
893 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
894 struct btrfs_file_extent_item
);
895 btrfs_set_file_extent_num_bytes(leaf
, fi
,
898 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
899 struct btrfs_file_extent_item
);
901 extent_offset
+= start
- key
.offset
;
902 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
903 btrfs_set_file_extent_num_bytes(leaf
, fi
,
905 btrfs_mark_buffer_dirty(leaf
);
907 if (update_refs
&& disk_bytenr
> 0) {
908 ret
= btrfs_inc_extent_ref(trans
, root
,
909 disk_bytenr
, num_bytes
, 0,
910 root
->root_key
.objectid
,
912 start
- extent_offset
);
913 BUG_ON(ret
); /* -ENOMEM */
918 * From here on out we will have actually dropped something, so
919 * last_end can be updated.
921 last_end
= extent_end
;
924 * | ---- range to drop ----- |
925 * | -------- extent -------- |
927 if (start
<= key
.offset
&& end
< extent_end
) {
928 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
933 memcpy(&new_key
, &key
, sizeof(new_key
));
934 new_key
.offset
= end
;
935 btrfs_set_item_key_safe(fs_info
, path
, &new_key
);
937 extent_offset
+= end
- key
.offset
;
938 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
939 btrfs_set_file_extent_num_bytes(leaf
, fi
,
941 btrfs_mark_buffer_dirty(leaf
);
942 if (update_refs
&& disk_bytenr
> 0)
943 inode_sub_bytes(inode
, end
- key
.offset
);
947 search_start
= extent_end
;
949 * | ---- range to drop ----- |
950 * | -------- extent -------- |
952 if (start
> key
.offset
&& end
>= extent_end
) {
954 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
959 btrfs_set_file_extent_num_bytes(leaf
, fi
,
961 btrfs_mark_buffer_dirty(leaf
);
962 if (update_refs
&& disk_bytenr
> 0)
963 inode_sub_bytes(inode
, extent_end
- start
);
964 if (end
== extent_end
)
972 * | ---- range to drop ----- |
973 * | ------ extent ------ |
975 if (start
<= key
.offset
&& end
>= extent_end
) {
978 del_slot
= path
->slots
[0];
981 BUG_ON(del_slot
+ del_nr
!= path
->slots
[0]);
986 extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
987 inode_sub_bytes(inode
,
988 extent_end
- key
.offset
);
989 extent_end
= ALIGN(extent_end
,
990 fs_info
->sectorsize
);
991 } else if (update_refs
&& disk_bytenr
> 0) {
992 ret
= btrfs_free_extent(trans
, root
,
993 disk_bytenr
, num_bytes
, 0,
994 root
->root_key
.objectid
,
995 key
.objectid
, key
.offset
-
997 BUG_ON(ret
); /* -ENOMEM */
998 inode_sub_bytes(inode
,
999 extent_end
- key
.offset
);
1002 if (end
== extent_end
)
1005 if (path
->slots
[0] + 1 < btrfs_header_nritems(leaf
)) {
1010 ret
= btrfs_del_items(trans
, root
, path
, del_slot
,
1013 btrfs_abort_transaction(trans
, ret
);
1020 btrfs_release_path(path
);
1027 if (!ret
&& del_nr
> 0) {
1029 * Set path->slots[0] to first slot, so that after the delete
1030 * if items are move off from our leaf to its immediate left or
1031 * right neighbor leafs, we end up with a correct and adjusted
1032 * path->slots[0] for our insertion (if replace_extent != 0).
1034 path
->slots
[0] = del_slot
;
1035 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1037 btrfs_abort_transaction(trans
, ret
);
1040 leaf
= path
->nodes
[0];
1042 * If btrfs_del_items() was called, it might have deleted a leaf, in
1043 * which case it unlocked our path, so check path->locks[0] matches a
1046 if (!ret
&& replace_extent
&& leafs_visited
== 1 &&
1047 (path
->locks
[0] == BTRFS_WRITE_LOCK_BLOCKING
||
1048 path
->locks
[0] == BTRFS_WRITE_LOCK
) &&
1049 btrfs_leaf_free_space(fs_info
, leaf
) >=
1050 sizeof(struct btrfs_item
) + extent_item_size
) {
1053 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1055 if (!del_nr
&& path
->slots
[0] < btrfs_header_nritems(leaf
)) {
1056 struct btrfs_key slot_key
;
1058 btrfs_item_key_to_cpu(leaf
, &slot_key
, path
->slots
[0]);
1059 if (btrfs_comp_cpu_keys(&key
, &slot_key
) > 0)
1062 setup_items_for_insert(root
, path
, &key
,
1065 sizeof(struct btrfs_item
) +
1066 extent_item_size
, 1);
1070 if (!replace_extent
|| !(*key_inserted
))
1071 btrfs_release_path(path
);
1073 *drop_end
= found
? min(end
, last_end
) : end
;
1077 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
1078 struct btrfs_root
*root
, struct inode
*inode
, u64 start
,
1079 u64 end
, int drop_cache
)
1081 struct btrfs_path
*path
;
1084 path
= btrfs_alloc_path();
1087 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
, start
, end
, NULL
,
1088 drop_cache
, 0, 0, NULL
);
1089 btrfs_free_path(path
);
1093 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
1094 u64 objectid
, u64 bytenr
, u64 orig_offset
,
1095 u64
*start
, u64
*end
)
1097 struct btrfs_file_extent_item
*fi
;
1098 struct btrfs_key key
;
1101 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
1104 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1105 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1108 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
1109 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
1110 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
1111 btrfs_file_extent_offset(leaf
, fi
) != key
.offset
- orig_offset
||
1112 btrfs_file_extent_compression(leaf
, fi
) ||
1113 btrfs_file_extent_encryption(leaf
, fi
) ||
1114 btrfs_file_extent_other_encoding(leaf
, fi
))
1117 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
1118 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
1121 *start
= key
.offset
;
1127 * Mark extent in the range start - end as written.
1129 * This changes extent type from 'pre-allocated' to 'regular'. If only
1130 * part of extent is marked as written, the extent will be split into
1133 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
1134 struct btrfs_inode
*inode
, u64 start
, u64 end
)
1136 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1137 struct btrfs_root
*root
= inode
->root
;
1138 struct extent_buffer
*leaf
;
1139 struct btrfs_path
*path
;
1140 struct btrfs_file_extent_item
*fi
;
1141 struct btrfs_key key
;
1142 struct btrfs_key new_key
;
1154 u64 ino
= btrfs_ino(inode
);
1156 path
= btrfs_alloc_path();
1163 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1166 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1169 if (ret
> 0 && path
->slots
[0] > 0)
1172 leaf
= path
->nodes
[0];
1173 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1174 if (key
.objectid
!= ino
||
1175 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
1177 btrfs_abort_transaction(trans
, ret
);
1180 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1181 struct btrfs_file_extent_item
);
1182 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_PREALLOC
) {
1184 btrfs_abort_transaction(trans
, ret
);
1187 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
1188 if (key
.offset
> start
|| extent_end
< end
) {
1190 btrfs_abort_transaction(trans
, ret
);
1194 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1195 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1196 orig_offset
= key
.offset
- btrfs_file_extent_offset(leaf
, fi
);
1197 memcpy(&new_key
, &key
, sizeof(new_key
));
1199 if (start
== key
.offset
&& end
< extent_end
) {
1202 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1203 ino
, bytenr
, orig_offset
,
1204 &other_start
, &other_end
)) {
1205 new_key
.offset
= end
;
1206 btrfs_set_item_key_safe(fs_info
, path
, &new_key
);
1207 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1208 struct btrfs_file_extent_item
);
1209 btrfs_set_file_extent_generation(leaf
, fi
,
1211 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1213 btrfs_set_file_extent_offset(leaf
, fi
,
1215 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1216 struct btrfs_file_extent_item
);
1217 btrfs_set_file_extent_generation(leaf
, fi
,
1219 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1221 btrfs_mark_buffer_dirty(leaf
);
1226 if (start
> key
.offset
&& end
== extent_end
) {
1229 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1230 ino
, bytenr
, orig_offset
,
1231 &other_start
, &other_end
)) {
1232 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1233 struct btrfs_file_extent_item
);
1234 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1235 start
- key
.offset
);
1236 btrfs_set_file_extent_generation(leaf
, fi
,
1239 new_key
.offset
= start
;
1240 btrfs_set_item_key_safe(fs_info
, path
, &new_key
);
1242 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1243 struct btrfs_file_extent_item
);
1244 btrfs_set_file_extent_generation(leaf
, fi
,
1246 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1248 btrfs_set_file_extent_offset(leaf
, fi
,
1249 start
- orig_offset
);
1250 btrfs_mark_buffer_dirty(leaf
);
1255 while (start
> key
.offset
|| end
< extent_end
) {
1256 if (key
.offset
== start
)
1259 new_key
.offset
= split
;
1260 ret
= btrfs_duplicate_item(trans
, root
, path
, &new_key
);
1261 if (ret
== -EAGAIN
) {
1262 btrfs_release_path(path
);
1266 btrfs_abort_transaction(trans
, ret
);
1270 leaf
= path
->nodes
[0];
1271 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1272 struct btrfs_file_extent_item
);
1273 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1274 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1275 split
- key
.offset
);
1277 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1278 struct btrfs_file_extent_item
);
1280 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1281 btrfs_set_file_extent_offset(leaf
, fi
, split
- orig_offset
);
1282 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1283 extent_end
- split
);
1284 btrfs_mark_buffer_dirty(leaf
);
1286 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
,
1287 0, root
->root_key
.objectid
,
1290 btrfs_abort_transaction(trans
, ret
);
1294 if (split
== start
) {
1297 if (start
!= key
.offset
) {
1299 btrfs_abort_transaction(trans
, ret
);
1310 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1311 ino
, bytenr
, orig_offset
,
1312 &other_start
, &other_end
)) {
1314 btrfs_release_path(path
);
1317 extent_end
= other_end
;
1318 del_slot
= path
->slots
[0] + 1;
1320 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1321 0, root
->root_key
.objectid
,
1324 btrfs_abort_transaction(trans
, ret
);
1330 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1331 ino
, bytenr
, orig_offset
,
1332 &other_start
, &other_end
)) {
1334 btrfs_release_path(path
);
1337 key
.offset
= other_start
;
1338 del_slot
= path
->slots
[0];
1340 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1341 0, root
->root_key
.objectid
,
1344 btrfs_abort_transaction(trans
, ret
);
1349 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1350 struct btrfs_file_extent_item
);
1351 btrfs_set_file_extent_type(leaf
, fi
,
1352 BTRFS_FILE_EXTENT_REG
);
1353 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1354 btrfs_mark_buffer_dirty(leaf
);
1356 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
1357 struct btrfs_file_extent_item
);
1358 btrfs_set_file_extent_type(leaf
, fi
,
1359 BTRFS_FILE_EXTENT_REG
);
1360 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1361 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1362 extent_end
- key
.offset
);
1363 btrfs_mark_buffer_dirty(leaf
);
1365 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1367 btrfs_abort_transaction(trans
, ret
);
1372 btrfs_free_path(path
);
1377 * on error we return an unlocked page and the error value
1378 * on success we return a locked page and 0
1380 static int prepare_uptodate_page(struct inode
*inode
,
1381 struct page
*page
, u64 pos
,
1382 bool force_uptodate
)
1386 if (((pos
& (PAGE_SIZE
- 1)) || force_uptodate
) &&
1387 !PageUptodate(page
)) {
1388 ret
= btrfs_readpage(NULL
, page
);
1392 if (!PageUptodate(page
)) {
1396 if (page
->mapping
!= inode
->i_mapping
) {
1405 * this just gets pages into the page cache and locks them down.
1407 static noinline
int prepare_pages(struct inode
*inode
, struct page
**pages
,
1408 size_t num_pages
, loff_t pos
,
1409 size_t write_bytes
, bool force_uptodate
)
1412 unsigned long index
= pos
>> PAGE_SHIFT
;
1413 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
1417 for (i
= 0; i
< num_pages
; i
++) {
1419 pages
[i
] = find_or_create_page(inode
->i_mapping
, index
+ i
,
1420 mask
| __GFP_WRITE
);
1428 err
= prepare_uptodate_page(inode
, pages
[i
], pos
,
1430 if (!err
&& i
== num_pages
- 1)
1431 err
= prepare_uptodate_page(inode
, pages
[i
],
1432 pos
+ write_bytes
, false);
1435 if (err
== -EAGAIN
) {
1442 wait_on_page_writeback(pages
[i
]);
1447 while (faili
>= 0) {
1448 unlock_page(pages
[faili
]);
1449 put_page(pages
[faili
]);
1457 * This function locks the extent and properly waits for data=ordered extents
1458 * to finish before allowing the pages to be modified if need.
1461 * 1 - the extent is locked
1462 * 0 - the extent is not locked, and everything is OK
1463 * -EAGAIN - need re-prepare the pages
1464 * the other < 0 number - Something wrong happens
1467 lock_and_cleanup_extent_if_need(struct btrfs_inode
*inode
, struct page
**pages
,
1468 size_t num_pages
, loff_t pos
,
1470 u64
*lockstart
, u64
*lockend
,
1471 struct extent_state
**cached_state
)
1473 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1479 start_pos
= round_down(pos
, fs_info
->sectorsize
);
1480 last_pos
= start_pos
1481 + round_up(pos
+ write_bytes
- start_pos
,
1482 fs_info
->sectorsize
) - 1;
1484 if (start_pos
< inode
->vfs_inode
.i_size
) {
1485 struct btrfs_ordered_extent
*ordered
;
1487 lock_extent_bits(&inode
->io_tree
, start_pos
, last_pos
,
1489 ordered
= btrfs_lookup_ordered_range(inode
, start_pos
,
1490 last_pos
- start_pos
+ 1);
1492 ordered
->file_offset
+ ordered
->len
> start_pos
&&
1493 ordered
->file_offset
<= last_pos
) {
1494 unlock_extent_cached(&inode
->io_tree
, start_pos
,
1495 last_pos
, cached_state
);
1496 for (i
= 0; i
< num_pages
; i
++) {
1497 unlock_page(pages
[i
]);
1500 btrfs_start_ordered_extent(&inode
->vfs_inode
,
1502 btrfs_put_ordered_extent(ordered
);
1506 btrfs_put_ordered_extent(ordered
);
1507 clear_extent_bit(&inode
->io_tree
, start_pos
, last_pos
,
1508 EXTENT_DIRTY
| EXTENT_DELALLOC
|
1509 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
1510 0, 0, cached_state
);
1511 *lockstart
= start_pos
;
1512 *lockend
= last_pos
;
1516 for (i
= 0; i
< num_pages
; i
++) {
1517 if (clear_page_dirty_for_io(pages
[i
]))
1518 account_page_redirty(pages
[i
]);
1519 set_page_extent_mapped(pages
[i
]);
1520 WARN_ON(!PageLocked(pages
[i
]));
1526 static noinline
int check_can_nocow(struct btrfs_inode
*inode
, loff_t pos
,
1527 size_t *write_bytes
)
1529 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1530 struct btrfs_root
*root
= inode
->root
;
1531 struct btrfs_ordered_extent
*ordered
;
1532 u64 lockstart
, lockend
;
1536 ret
= btrfs_start_write_no_snapshotting(root
);
1540 lockstart
= round_down(pos
, fs_info
->sectorsize
);
1541 lockend
= round_up(pos
+ *write_bytes
,
1542 fs_info
->sectorsize
) - 1;
1545 lock_extent(&inode
->io_tree
, lockstart
, lockend
);
1546 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
,
1547 lockend
- lockstart
+ 1);
1551 unlock_extent(&inode
->io_tree
, lockstart
, lockend
);
1552 btrfs_start_ordered_extent(&inode
->vfs_inode
, ordered
, 1);
1553 btrfs_put_ordered_extent(ordered
);
1556 num_bytes
= lockend
- lockstart
+ 1;
1557 ret
= can_nocow_extent(&inode
->vfs_inode
, lockstart
, &num_bytes
,
1561 btrfs_end_write_no_snapshotting(root
);
1563 *write_bytes
= min_t(size_t, *write_bytes
,
1564 num_bytes
- pos
+ lockstart
);
1567 unlock_extent(&inode
->io_tree
, lockstart
, lockend
);
1572 static noinline ssize_t
__btrfs_buffered_write(struct file
*file
,
1576 struct inode
*inode
= file_inode(file
);
1577 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1578 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1579 struct page
**pages
= NULL
;
1580 struct extent_state
*cached_state
= NULL
;
1581 struct extent_changeset
*data_reserved
= NULL
;
1582 u64 release_bytes
= 0;
1585 size_t num_written
= 0;
1588 bool only_release_metadata
= false;
1589 bool force_page_uptodate
= false;
1591 nrptrs
= min(DIV_ROUND_UP(iov_iter_count(i
), PAGE_SIZE
),
1592 PAGE_SIZE
/ (sizeof(struct page
*)));
1593 nrptrs
= min(nrptrs
, current
->nr_dirtied_pause
- current
->nr_dirtied
);
1594 nrptrs
= max(nrptrs
, 8);
1595 pages
= kmalloc_array(nrptrs
, sizeof(struct page
*), GFP_KERNEL
);
1599 while (iov_iter_count(i
) > 0) {
1600 size_t offset
= pos
& (PAGE_SIZE
- 1);
1601 size_t sector_offset
;
1602 size_t write_bytes
= min(iov_iter_count(i
),
1603 nrptrs
* (size_t)PAGE_SIZE
-
1605 size_t num_pages
= DIV_ROUND_UP(write_bytes
+ offset
,
1607 size_t reserve_bytes
;
1610 size_t dirty_sectors
;
1614 WARN_ON(num_pages
> nrptrs
);
1617 * Fault pages before locking them in prepare_pages
1618 * to avoid recursive lock
1620 if (unlikely(iov_iter_fault_in_readable(i
, write_bytes
))) {
1625 sector_offset
= pos
& (fs_info
->sectorsize
- 1);
1626 reserve_bytes
= round_up(write_bytes
+ sector_offset
,
1627 fs_info
->sectorsize
);
1629 extent_changeset_release(data_reserved
);
1630 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, pos
,
1633 if ((BTRFS_I(inode
)->flags
& (BTRFS_INODE_NODATACOW
|
1634 BTRFS_INODE_PREALLOC
)) &&
1635 check_can_nocow(BTRFS_I(inode
), pos
,
1636 &write_bytes
) > 0) {
1638 * For nodata cow case, no need to reserve
1641 only_release_metadata
= true;
1643 * our prealloc extent may be smaller than
1644 * write_bytes, so scale down.
1646 num_pages
= DIV_ROUND_UP(write_bytes
+ offset
,
1648 reserve_bytes
= round_up(write_bytes
+
1650 fs_info
->sectorsize
);
1656 WARN_ON(reserve_bytes
== 0);
1657 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
),
1660 if (!only_release_metadata
)
1661 btrfs_free_reserved_data_space(inode
,
1665 btrfs_end_write_no_snapshotting(root
);
1669 release_bytes
= reserve_bytes
;
1672 * This is going to setup the pages array with the number of
1673 * pages we want, so we don't really need to worry about the
1674 * contents of pages from loop to loop
1676 ret
= prepare_pages(inode
, pages
, num_pages
,
1678 force_page_uptodate
);
1680 btrfs_delalloc_release_extents(BTRFS_I(inode
),
1681 reserve_bytes
, true);
1685 extents_locked
= lock_and_cleanup_extent_if_need(
1686 BTRFS_I(inode
), pages
,
1687 num_pages
, pos
, write_bytes
, &lockstart
,
1688 &lockend
, &cached_state
);
1689 if (extents_locked
< 0) {
1690 if (extents_locked
== -EAGAIN
)
1692 btrfs_delalloc_release_extents(BTRFS_I(inode
),
1693 reserve_bytes
, true);
1694 ret
= extents_locked
;
1698 copied
= btrfs_copy_from_user(pos
, write_bytes
, pages
, i
);
1700 num_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, reserve_bytes
);
1701 dirty_sectors
= round_up(copied
+ sector_offset
,
1702 fs_info
->sectorsize
);
1703 dirty_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, dirty_sectors
);
1706 * if we have trouble faulting in the pages, fall
1707 * back to one page at a time
1709 if (copied
< write_bytes
)
1713 force_page_uptodate
= true;
1717 force_page_uptodate
= false;
1718 dirty_pages
= DIV_ROUND_UP(copied
+ offset
,
1722 if (num_sectors
> dirty_sectors
) {
1723 /* release everything except the sectors we dirtied */
1724 release_bytes
-= dirty_sectors
<<
1725 fs_info
->sb
->s_blocksize_bits
;
1726 if (only_release_metadata
) {
1727 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
1728 release_bytes
, true);
1732 __pos
= round_down(pos
,
1733 fs_info
->sectorsize
) +
1734 (dirty_pages
<< PAGE_SHIFT
);
1735 btrfs_delalloc_release_space(inode
,
1736 data_reserved
, __pos
,
1737 release_bytes
, true);
1741 release_bytes
= round_up(copied
+ sector_offset
,
1742 fs_info
->sectorsize
);
1745 ret
= btrfs_dirty_pages(inode
, pages
, dirty_pages
,
1746 pos
, copied
, &cached_state
);
1748 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1749 lockstart
, lockend
, &cached_state
);
1750 btrfs_delalloc_release_extents(BTRFS_I(inode
), reserve_bytes
,
1753 btrfs_drop_pages(pages
, num_pages
);
1758 if (only_release_metadata
)
1759 btrfs_end_write_no_snapshotting(root
);
1761 if (only_release_metadata
&& copied
> 0) {
1762 lockstart
= round_down(pos
,
1763 fs_info
->sectorsize
);
1764 lockend
= round_up(pos
+ copied
,
1765 fs_info
->sectorsize
) - 1;
1767 set_extent_bit(&BTRFS_I(inode
)->io_tree
, lockstart
,
1768 lockend
, EXTENT_NORESERVE
, NULL
,
1770 only_release_metadata
= false;
1773 btrfs_drop_pages(pages
, num_pages
);
1777 balance_dirty_pages_ratelimited(inode
->i_mapping
);
1778 if (dirty_pages
< (fs_info
->nodesize
>> PAGE_SHIFT
) + 1)
1779 btrfs_btree_balance_dirty(fs_info
);
1782 num_written
+= copied
;
1787 if (release_bytes
) {
1788 if (only_release_metadata
) {
1789 btrfs_end_write_no_snapshotting(root
);
1790 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
1791 release_bytes
, true);
1793 btrfs_delalloc_release_space(inode
, data_reserved
,
1794 round_down(pos
, fs_info
->sectorsize
),
1795 release_bytes
, true);
1799 extent_changeset_free(data_reserved
);
1800 return num_written
? num_written
: ret
;
1803 static ssize_t
__btrfs_direct_write(struct kiocb
*iocb
, struct iov_iter
*from
)
1805 struct file
*file
= iocb
->ki_filp
;
1806 struct inode
*inode
= file_inode(file
);
1807 loff_t pos
= iocb
->ki_pos
;
1809 ssize_t written_buffered
;
1813 written
= generic_file_direct_write(iocb
, from
);
1815 if (written
< 0 || !iov_iter_count(from
))
1819 written_buffered
= __btrfs_buffered_write(file
, from
, pos
);
1820 if (written_buffered
< 0) {
1821 err
= written_buffered
;
1825 * Ensure all data is persisted. We want the next direct IO read to be
1826 * able to read what was just written.
1828 endbyte
= pos
+ written_buffered
- 1;
1829 err
= btrfs_fdatawrite_range(inode
, pos
, endbyte
);
1832 err
= filemap_fdatawait_range(inode
->i_mapping
, pos
, endbyte
);
1835 written
+= written_buffered
;
1836 iocb
->ki_pos
= pos
+ written_buffered
;
1837 invalidate_mapping_pages(file
->f_mapping
, pos
>> PAGE_SHIFT
,
1838 endbyte
>> PAGE_SHIFT
);
1840 return written
? written
: err
;
1843 static void update_time_for_write(struct inode
*inode
)
1845 struct timespec64 now
;
1847 if (IS_NOCMTIME(inode
))
1850 now
= current_time(inode
);
1851 if (!timespec64_equal(&inode
->i_mtime
, &now
))
1852 inode
->i_mtime
= now
;
1854 if (!timespec64_equal(&inode
->i_ctime
, &now
))
1855 inode
->i_ctime
= now
;
1857 if (IS_I_VERSION(inode
))
1858 inode_inc_iversion(inode
);
1861 static ssize_t
btrfs_file_write_iter(struct kiocb
*iocb
,
1862 struct iov_iter
*from
)
1864 struct file
*file
= iocb
->ki_filp
;
1865 struct inode
*inode
= file_inode(file
);
1866 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1867 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1870 ssize_t num_written
= 0;
1871 bool sync
= (file
->f_flags
& O_DSYNC
) || IS_SYNC(file
->f_mapping
->host
);
1874 size_t count
= iov_iter_count(from
);
1878 if (!(iocb
->ki_flags
& IOCB_DIRECT
) &&
1879 (iocb
->ki_flags
& IOCB_NOWAIT
))
1882 if (!inode_trylock(inode
)) {
1883 if (iocb
->ki_flags
& IOCB_NOWAIT
)
1888 err
= generic_write_checks(iocb
, from
);
1890 inode_unlock(inode
);
1895 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
1897 * We will allocate space in case nodatacow is not set,
1900 if (!(BTRFS_I(inode
)->flags
& (BTRFS_INODE_NODATACOW
|
1901 BTRFS_INODE_PREALLOC
)) ||
1902 check_can_nocow(BTRFS_I(inode
), pos
, &count
) <= 0) {
1903 inode_unlock(inode
);
1908 current
->backing_dev_info
= inode_to_bdi(inode
);
1909 err
= file_remove_privs(file
);
1911 inode_unlock(inode
);
1916 * If BTRFS flips readonly due to some impossible error
1917 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1918 * although we have opened a file as writable, we have
1919 * to stop this write operation to ensure FS consistency.
1921 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
)) {
1922 inode_unlock(inode
);
1928 * We reserve space for updating the inode when we reserve space for the
1929 * extent we are going to write, so we will enospc out there. We don't
1930 * need to start yet another transaction to update the inode as we will
1931 * update the inode when we finish writing whatever data we write.
1933 update_time_for_write(inode
);
1935 start_pos
= round_down(pos
, fs_info
->sectorsize
);
1936 oldsize
= i_size_read(inode
);
1937 if (start_pos
> oldsize
) {
1938 /* Expand hole size to cover write data, preventing empty gap */
1939 end_pos
= round_up(pos
+ count
,
1940 fs_info
->sectorsize
);
1941 err
= btrfs_cont_expand(inode
, oldsize
, end_pos
);
1943 inode_unlock(inode
);
1946 if (start_pos
> round_up(oldsize
, fs_info
->sectorsize
))
1951 atomic_inc(&BTRFS_I(inode
)->sync_writers
);
1953 if (iocb
->ki_flags
& IOCB_DIRECT
) {
1954 num_written
= __btrfs_direct_write(iocb
, from
);
1956 num_written
= __btrfs_buffered_write(file
, from
, pos
);
1957 if (num_written
> 0)
1958 iocb
->ki_pos
= pos
+ num_written
;
1960 pagecache_isize_extended(inode
, oldsize
,
1961 i_size_read(inode
));
1964 inode_unlock(inode
);
1967 * We also have to set last_sub_trans to the current log transid,
1968 * otherwise subsequent syncs to a file that's been synced in this
1969 * transaction will appear to have already occurred.
1971 spin_lock(&BTRFS_I(inode
)->lock
);
1972 BTRFS_I(inode
)->last_sub_trans
= root
->log_transid
;
1973 spin_unlock(&BTRFS_I(inode
)->lock
);
1974 if (num_written
> 0)
1975 num_written
= generic_write_sync(iocb
, num_written
);
1978 atomic_dec(&BTRFS_I(inode
)->sync_writers
);
1980 current
->backing_dev_info
= NULL
;
1981 return num_written
? num_written
: err
;
1984 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
1986 struct btrfs_file_private
*private = filp
->private_data
;
1988 if (private && private->filldir_buf
)
1989 kfree(private->filldir_buf
);
1991 filp
->private_data
= NULL
;
1994 * ordered_data_close is set by settattr when we are about to truncate
1995 * a file from a non-zero size to a zero size. This tries to
1996 * flush down new bytes that may have been written if the
1997 * application were using truncate to replace a file in place.
1999 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE
,
2000 &BTRFS_I(inode
)->runtime_flags
))
2001 filemap_flush(inode
->i_mapping
);
2005 static int start_ordered_ops(struct inode
*inode
, loff_t start
, loff_t end
)
2008 struct blk_plug plug
;
2011 * This is only called in fsync, which would do synchronous writes, so
2012 * a plug can merge adjacent IOs as much as possible. Esp. in case of
2013 * multiple disks using raid profile, a large IO can be split to
2014 * several segments of stripe length (currently 64K).
2016 blk_start_plug(&plug
);
2017 atomic_inc(&BTRFS_I(inode
)->sync_writers
);
2018 ret
= btrfs_fdatawrite_range(inode
, start
, end
);
2019 atomic_dec(&BTRFS_I(inode
)->sync_writers
);
2020 blk_finish_plug(&plug
);
2026 * fsync call for both files and directories. This logs the inode into
2027 * the tree log instead of forcing full commits whenever possible.
2029 * It needs to call filemap_fdatawait so that all ordered extent updates are
2030 * in the metadata btree are up to date for copying to the log.
2032 * It drops the inode mutex before doing the tree log commit. This is an
2033 * important optimization for directories because holding the mutex prevents
2034 * new operations on the dir while we write to disk.
2036 int btrfs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
2038 struct dentry
*dentry
= file_dentry(file
);
2039 struct inode
*inode
= d_inode(dentry
);
2040 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2041 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2042 struct btrfs_trans_handle
*trans
;
2043 struct btrfs_log_ctx ctx
;
2045 bool full_sync
= false;
2049 * The range length can be represented by u64, we have to do the typecasts
2050 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
2052 len
= (u64
)end
- (u64
)start
+ 1;
2053 trace_btrfs_sync_file(file
, datasync
);
2055 btrfs_init_log_ctx(&ctx
, inode
);
2058 * We write the dirty pages in the range and wait until they complete
2059 * out of the ->i_mutex. If so, we can flush the dirty pages by
2060 * multi-task, and make the performance up. See
2061 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2063 ret
= start_ordered_ops(inode
, start
, end
);
2068 atomic_inc(&root
->log_batch
);
2069 full_sync
= test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2070 &BTRFS_I(inode
)->runtime_flags
);
2072 * We might have have had more pages made dirty after calling
2073 * start_ordered_ops and before acquiring the inode's i_mutex.
2077 * For a full sync, we need to make sure any ordered operations
2078 * start and finish before we start logging the inode, so that
2079 * all extents are persisted and the respective file extent
2080 * items are in the fs/subvol btree.
2082 ret
= btrfs_wait_ordered_range(inode
, start
, len
);
2085 * Start any new ordered operations before starting to log the
2086 * inode. We will wait for them to finish in btrfs_sync_log().
2088 * Right before acquiring the inode's mutex, we might have new
2089 * writes dirtying pages, which won't immediately start the
2090 * respective ordered operations - that is done through the
2091 * fill_delalloc callbacks invoked from the writepage and
2092 * writepages address space operations. So make sure we start
2093 * all ordered operations before starting to log our inode. Not
2094 * doing this means that while logging the inode, writeback
2095 * could start and invoke writepage/writepages, which would call
2096 * the fill_delalloc callbacks (cow_file_range,
2097 * submit_compressed_extents). These callbacks add first an
2098 * extent map to the modified list of extents and then create
2099 * the respective ordered operation, which means in
2100 * tree-log.c:btrfs_log_inode() we might capture all existing
2101 * ordered operations (with btrfs_get_logged_extents()) before
2102 * the fill_delalloc callback adds its ordered operation, and by
2103 * the time we visit the modified list of extent maps (with
2104 * btrfs_log_changed_extents()), we see and process the extent
2105 * map they created. We then use the extent map to construct a
2106 * file extent item for logging without waiting for the
2107 * respective ordered operation to finish - this file extent
2108 * item points to a disk location that might not have yet been
2109 * written to, containing random data - so after a crash a log
2110 * replay will make our inode have file extent items that point
2111 * to disk locations containing invalid data, as we returned
2112 * success to userspace without waiting for the respective
2113 * ordered operation to finish, because it wasn't captured by
2114 * btrfs_get_logged_extents().
2116 ret
= start_ordered_ops(inode
, start
, end
);
2119 inode_unlock(inode
);
2122 atomic_inc(&root
->log_batch
);
2125 * If the last transaction that changed this file was before the current
2126 * transaction and we have the full sync flag set in our inode, we can
2127 * bail out now without any syncing.
2129 * Note that we can't bail out if the full sync flag isn't set. This is
2130 * because when the full sync flag is set we start all ordered extents
2131 * and wait for them to fully complete - when they complete they update
2132 * the inode's last_trans field through:
2134 * btrfs_finish_ordered_io() ->
2135 * btrfs_update_inode_fallback() ->
2136 * btrfs_update_inode() ->
2137 * btrfs_set_inode_last_trans()
2139 * So we are sure that last_trans is up to date and can do this check to
2140 * bail out safely. For the fast path, when the full sync flag is not
2141 * set in our inode, we can not do it because we start only our ordered
2142 * extents and don't wait for them to complete (that is when
2143 * btrfs_finish_ordered_io runs), so here at this point their last_trans
2144 * value might be less than or equals to fs_info->last_trans_committed,
2145 * and setting a speculative last_trans for an inode when a buffered
2146 * write is made (such as fs_info->generation + 1 for example) would not
2147 * be reliable since after setting the value and before fsync is called
2148 * any number of transactions can start and commit (transaction kthread
2149 * commits the current transaction periodically), and a transaction
2150 * commit does not start nor waits for ordered extents to complete.
2153 if (btrfs_inode_in_log(BTRFS_I(inode
), fs_info
->generation
) ||
2154 (full_sync
&& BTRFS_I(inode
)->last_trans
<=
2155 fs_info
->last_trans_committed
) ||
2156 (!btrfs_have_ordered_extents_in_range(inode
, start
, len
) &&
2157 BTRFS_I(inode
)->last_trans
2158 <= fs_info
->last_trans_committed
)) {
2160 * We've had everything committed since the last time we were
2161 * modified so clear this flag in case it was set for whatever
2162 * reason, it's no longer relevant.
2164 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2165 &BTRFS_I(inode
)->runtime_flags
);
2167 * An ordered extent might have started before and completed
2168 * already with io errors, in which case the inode was not
2169 * updated and we end up here. So check the inode's mapping
2170 * for any errors that might have happened since we last
2171 * checked called fsync.
2173 ret
= filemap_check_wb_err(inode
->i_mapping
, file
->f_wb_err
);
2174 inode_unlock(inode
);
2179 * We use start here because we will need to wait on the IO to complete
2180 * in btrfs_sync_log, which could require joining a transaction (for
2181 * example checking cross references in the nocow path). If we use join
2182 * here we could get into a situation where we're waiting on IO to
2183 * happen that is blocked on a transaction trying to commit. With start
2184 * we inc the extwriter counter, so we wait for all extwriters to exit
2185 * before we start blocking join'ers. This comment is to keep somebody
2186 * from thinking they are super smart and changing this to
2187 * btrfs_join_transaction *cough*Josef*cough*.
2189 trans
= btrfs_start_transaction(root
, 0);
2190 if (IS_ERR(trans
)) {
2191 ret
= PTR_ERR(trans
);
2192 inode_unlock(inode
);
2197 ret
= btrfs_log_dentry_safe(trans
, dentry
, start
, end
, &ctx
);
2199 /* Fallthrough and commit/free transaction. */
2203 /* we've logged all the items and now have a consistent
2204 * version of the file in the log. It is possible that
2205 * someone will come in and modify the file, but that's
2206 * fine because the log is consistent on disk, and we
2207 * have references to all of the file's extents
2209 * It is possible that someone will come in and log the
2210 * file again, but that will end up using the synchronization
2211 * inside btrfs_sync_log to keep things safe.
2213 inode_unlock(inode
);
2216 * If any of the ordered extents had an error, just return it to user
2217 * space, so that the application knows some writes didn't succeed and
2218 * can take proper action (retry for e.g.). Blindly committing the
2219 * transaction in this case, would fool userspace that everything was
2220 * successful. And we also want to make sure our log doesn't contain
2221 * file extent items pointing to extents that weren't fully written to -
2222 * just like in the non fast fsync path, where we check for the ordered
2223 * operation's error flag before writing to the log tree and return -EIO
2224 * if any of them had this flag set (btrfs_wait_ordered_range) -
2225 * therefore we need to check for errors in the ordered operations,
2226 * which are indicated by ctx.io_err.
2229 btrfs_end_transaction(trans
);
2234 if (ret
!= BTRFS_NO_LOG_SYNC
) {
2236 ret
= btrfs_sync_log(trans
, root
, &ctx
);
2238 ret
= btrfs_end_transaction(trans
);
2243 ret
= btrfs_wait_ordered_range(inode
, start
, len
);
2245 btrfs_end_transaction(trans
);
2249 ret
= btrfs_commit_transaction(trans
);
2251 ret
= btrfs_end_transaction(trans
);
2254 ASSERT(list_empty(&ctx
.list
));
2255 err
= file_check_and_advance_wb_err(file
);
2258 return ret
> 0 ? -EIO
: ret
;
2261 static const struct vm_operations_struct btrfs_file_vm_ops
= {
2262 .fault
= filemap_fault
,
2263 .map_pages
= filemap_map_pages
,
2264 .page_mkwrite
= btrfs_page_mkwrite
,
2267 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
2269 struct address_space
*mapping
= filp
->f_mapping
;
2271 if (!mapping
->a_ops
->readpage
)
2274 file_accessed(filp
);
2275 vma
->vm_ops
= &btrfs_file_vm_ops
;
2280 static int hole_mergeable(struct btrfs_inode
*inode
, struct extent_buffer
*leaf
,
2281 int slot
, u64 start
, u64 end
)
2283 struct btrfs_file_extent_item
*fi
;
2284 struct btrfs_key key
;
2286 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
2289 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2290 if (key
.objectid
!= btrfs_ino(inode
) ||
2291 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2294 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
2296 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
)
2299 if (btrfs_file_extent_disk_bytenr(leaf
, fi
))
2302 if (key
.offset
== end
)
2304 if (key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
) == start
)
2309 static int fill_holes(struct btrfs_trans_handle
*trans
,
2310 struct btrfs_inode
*inode
,
2311 struct btrfs_path
*path
, u64 offset
, u64 end
)
2313 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
2314 struct btrfs_root
*root
= inode
->root
;
2315 struct extent_buffer
*leaf
;
2316 struct btrfs_file_extent_item
*fi
;
2317 struct extent_map
*hole_em
;
2318 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
2319 struct btrfs_key key
;
2322 if (btrfs_fs_incompat(fs_info
, NO_HOLES
))
2325 key
.objectid
= btrfs_ino(inode
);
2326 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2327 key
.offset
= offset
;
2329 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2332 * We should have dropped this offset, so if we find it then
2333 * something has gone horribly wrong.
2340 leaf
= path
->nodes
[0];
2341 if (hole_mergeable(inode
, leaf
, path
->slots
[0] - 1, offset
, end
)) {
2345 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2346 struct btrfs_file_extent_item
);
2347 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) +
2349 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
2350 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
2351 btrfs_set_file_extent_offset(leaf
, fi
, 0);
2352 btrfs_mark_buffer_dirty(leaf
);
2356 if (hole_mergeable(inode
, leaf
, path
->slots
[0], offset
, end
)) {
2359 key
.offset
= offset
;
2360 btrfs_set_item_key_safe(fs_info
, path
, &key
);
2361 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2362 struct btrfs_file_extent_item
);
2363 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) + end
-
2365 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
2366 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
2367 btrfs_set_file_extent_offset(leaf
, fi
, 0);
2368 btrfs_mark_buffer_dirty(leaf
);
2371 btrfs_release_path(path
);
2373 ret
= btrfs_insert_file_extent(trans
, root
, btrfs_ino(inode
),
2374 offset
, 0, 0, end
- offset
, 0, end
- offset
, 0, 0, 0);
2379 btrfs_release_path(path
);
2381 hole_em
= alloc_extent_map();
2383 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
2384 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &inode
->runtime_flags
);
2386 hole_em
->start
= offset
;
2387 hole_em
->len
= end
- offset
;
2388 hole_em
->ram_bytes
= hole_em
->len
;
2389 hole_em
->orig_start
= offset
;
2391 hole_em
->block_start
= EXTENT_MAP_HOLE
;
2392 hole_em
->block_len
= 0;
2393 hole_em
->orig_block_len
= 0;
2394 hole_em
->bdev
= fs_info
->fs_devices
->latest_bdev
;
2395 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
2396 hole_em
->generation
= trans
->transid
;
2399 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
2400 write_lock(&em_tree
->lock
);
2401 ret
= add_extent_mapping(em_tree
, hole_em
, 1);
2402 write_unlock(&em_tree
->lock
);
2403 } while (ret
== -EEXIST
);
2404 free_extent_map(hole_em
);
2406 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2407 &inode
->runtime_flags
);
2414 * Find a hole extent on given inode and change start/len to the end of hole
2415 * extent.(hole/vacuum extent whose em->start <= start &&
2416 * em->start + em->len > start)
2417 * When a hole extent is found, return 1 and modify start/len.
2419 static int find_first_non_hole(struct inode
*inode
, u64
*start
, u64
*len
)
2421 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2422 struct extent_map
*em
;
2425 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0,
2426 round_down(*start
, fs_info
->sectorsize
),
2427 round_up(*len
, fs_info
->sectorsize
), 0);
2431 /* Hole or vacuum extent(only exists in no-hole mode) */
2432 if (em
->block_start
== EXTENT_MAP_HOLE
) {
2434 *len
= em
->start
+ em
->len
> *start
+ *len
?
2435 0 : *start
+ *len
- em
->start
- em
->len
;
2436 *start
= em
->start
+ em
->len
;
2438 free_extent_map(em
);
2442 static int btrfs_punch_hole_lock_range(struct inode
*inode
,
2443 const u64 lockstart
,
2445 struct extent_state
**cached_state
)
2448 struct btrfs_ordered_extent
*ordered
;
2451 truncate_pagecache_range(inode
, lockstart
, lockend
);
2453 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2455 ordered
= btrfs_lookup_first_ordered_extent(inode
, lockend
);
2458 * We need to make sure we have no ordered extents in this range
2459 * and nobody raced in and read a page in this range, if we did
2460 * we need to try again.
2463 (ordered
->file_offset
+ ordered
->len
<= lockstart
||
2464 ordered
->file_offset
> lockend
)) &&
2465 !filemap_range_has_page(inode
->i_mapping
,
2466 lockstart
, lockend
)) {
2468 btrfs_put_ordered_extent(ordered
);
2472 btrfs_put_ordered_extent(ordered
);
2473 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
,
2474 lockend
, cached_state
);
2475 ret
= btrfs_wait_ordered_range(inode
, lockstart
,
2476 lockend
- lockstart
+ 1);
2483 static int btrfs_punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
2485 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2486 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2487 struct extent_state
*cached_state
= NULL
;
2488 struct btrfs_path
*path
;
2489 struct btrfs_block_rsv
*rsv
;
2490 struct btrfs_trans_handle
*trans
;
2495 u64 orig_start
= offset
;
2497 u64 min_size
= btrfs_calc_trans_metadata_size(fs_info
, 1);
2501 unsigned int rsv_count
;
2503 bool no_holes
= btrfs_fs_incompat(fs_info
, NO_HOLES
);
2505 bool truncated_block
= false;
2506 bool updated_inode
= false;
2508 ret
= btrfs_wait_ordered_range(inode
, offset
, len
);
2513 ino_size
= round_up(inode
->i_size
, fs_info
->sectorsize
);
2514 ret
= find_first_non_hole(inode
, &offset
, &len
);
2516 goto out_only_mutex
;
2518 /* Already in a large hole */
2520 goto out_only_mutex
;
2523 lockstart
= round_up(offset
, btrfs_inode_sectorsize(inode
));
2524 lockend
= round_down(offset
+ len
,
2525 btrfs_inode_sectorsize(inode
)) - 1;
2526 same_block
= (BTRFS_BYTES_TO_BLKS(fs_info
, offset
))
2527 == (BTRFS_BYTES_TO_BLKS(fs_info
, offset
+ len
- 1));
2529 * We needn't truncate any block which is beyond the end of the file
2530 * because we are sure there is no data there.
2533 * Only do this if we are in the same block and we aren't doing the
2536 if (same_block
&& len
< fs_info
->sectorsize
) {
2537 if (offset
< ino_size
) {
2538 truncated_block
= true;
2539 ret
= btrfs_truncate_block(inode
, offset
, len
, 0);
2543 goto out_only_mutex
;
2546 /* zero back part of the first block */
2547 if (offset
< ino_size
) {
2548 truncated_block
= true;
2549 ret
= btrfs_truncate_block(inode
, offset
, 0, 0);
2551 inode_unlock(inode
);
2556 /* Check the aligned pages after the first unaligned page,
2557 * if offset != orig_start, which means the first unaligned page
2558 * including several following pages are already in holes,
2559 * the extra check can be skipped */
2560 if (offset
== orig_start
) {
2561 /* after truncate page, check hole again */
2562 len
= offset
+ len
- lockstart
;
2564 ret
= find_first_non_hole(inode
, &offset
, &len
);
2566 goto out_only_mutex
;
2569 goto out_only_mutex
;
2574 /* Check the tail unaligned part is in a hole */
2575 tail_start
= lockend
+ 1;
2576 tail_len
= offset
+ len
- tail_start
;
2578 ret
= find_first_non_hole(inode
, &tail_start
, &tail_len
);
2579 if (unlikely(ret
< 0))
2580 goto out_only_mutex
;
2582 /* zero the front end of the last page */
2583 if (tail_start
+ tail_len
< ino_size
) {
2584 truncated_block
= true;
2585 ret
= btrfs_truncate_block(inode
,
2586 tail_start
+ tail_len
,
2589 goto out_only_mutex
;
2594 if (lockend
< lockstart
) {
2596 goto out_only_mutex
;
2599 ret
= btrfs_punch_hole_lock_range(inode
, lockstart
, lockend
,
2602 inode_unlock(inode
);
2603 goto out_only_mutex
;
2606 path
= btrfs_alloc_path();
2612 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
2617 rsv
->size
= btrfs_calc_trans_metadata_size(fs_info
, 1);
2621 * 1 - update the inode
2622 * 1 - removing the extents in the range
2623 * 1 - adding the hole extent if no_holes isn't set
2625 rsv_count
= no_holes
? 2 : 3;
2626 trans
= btrfs_start_transaction(root
, rsv_count
);
2627 if (IS_ERR(trans
)) {
2628 err
= PTR_ERR(trans
);
2632 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
2635 trans
->block_rsv
= rsv
;
2637 cur_offset
= lockstart
;
2638 len
= lockend
- cur_offset
;
2639 while (cur_offset
< lockend
) {
2640 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
,
2641 cur_offset
, lockend
+ 1,
2642 &drop_end
, 1, 0, 0, NULL
);
2646 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
2648 if (cur_offset
< drop_end
&& cur_offset
< ino_size
) {
2649 ret
= fill_holes(trans
, BTRFS_I(inode
), path
,
2650 cur_offset
, drop_end
);
2653 * If we failed then we didn't insert our hole
2654 * entries for the area we dropped, so now the
2655 * fs is corrupted, so we must abort the
2658 btrfs_abort_transaction(trans
, ret
);
2664 cur_offset
= drop_end
;
2666 ret
= btrfs_update_inode(trans
, root
, inode
);
2672 btrfs_end_transaction(trans
);
2673 btrfs_btree_balance_dirty(fs_info
);
2675 trans
= btrfs_start_transaction(root
, rsv_count
);
2676 if (IS_ERR(trans
)) {
2677 ret
= PTR_ERR(trans
);
2682 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
2684 BUG_ON(ret
); /* shouldn't happen */
2685 trans
->block_rsv
= rsv
;
2687 ret
= find_first_non_hole(inode
, &cur_offset
, &len
);
2688 if (unlikely(ret
< 0))
2701 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
2703 * If we are using the NO_HOLES feature we might have had already an
2704 * hole that overlaps a part of the region [lockstart, lockend] and
2705 * ends at (or beyond) lockend. Since we have no file extent items to
2706 * represent holes, drop_end can be less than lockend and so we must
2707 * make sure we have an extent map representing the existing hole (the
2708 * call to __btrfs_drop_extents() might have dropped the existing extent
2709 * map representing the existing hole), otherwise the fast fsync path
2710 * will not record the existence of the hole region
2711 * [existing_hole_start, lockend].
2713 if (drop_end
<= lockend
)
2714 drop_end
= lockend
+ 1;
2716 * Don't insert file hole extent item if it's for a range beyond eof
2717 * (because it's useless) or if it represents a 0 bytes range (when
2718 * cur_offset == drop_end).
2720 if (cur_offset
< ino_size
&& cur_offset
< drop_end
) {
2721 ret
= fill_holes(trans
, BTRFS_I(inode
), path
,
2722 cur_offset
, drop_end
);
2724 /* Same comment as above. */
2725 btrfs_abort_transaction(trans
, ret
);
2735 inode_inc_iversion(inode
);
2736 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
2738 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
2739 ret
= btrfs_update_inode(trans
, root
, inode
);
2740 updated_inode
= true;
2741 btrfs_end_transaction(trans
);
2742 btrfs_btree_balance_dirty(fs_info
);
2744 btrfs_free_path(path
);
2745 btrfs_free_block_rsv(fs_info
, rsv
);
2747 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2750 if (!updated_inode
&& truncated_block
&& !ret
&& !err
) {
2752 * If we only end up zeroing part of a page, we still need to
2753 * update the inode item, so that all the time fields are
2754 * updated as well as the necessary btrfs inode in memory fields
2755 * for detecting, at fsync time, if the inode isn't yet in the
2756 * log tree or it's there but not up to date.
2758 trans
= btrfs_start_transaction(root
, 1);
2759 if (IS_ERR(trans
)) {
2760 err
= PTR_ERR(trans
);
2762 err
= btrfs_update_inode(trans
, root
, inode
);
2763 ret
= btrfs_end_transaction(trans
);
2766 inode_unlock(inode
);
2772 /* Helper structure to record which range is already reserved */
2773 struct falloc_range
{
2774 struct list_head list
;
2780 * Helper function to add falloc range
2782 * Caller should have locked the larger range of extent containing
2785 static int add_falloc_range(struct list_head
*head
, u64 start
, u64 len
)
2787 struct falloc_range
*prev
= NULL
;
2788 struct falloc_range
*range
= NULL
;
2790 if (list_empty(head
))
2794 * As fallocate iterate by bytenr order, we only need to check
2797 prev
= list_entry(head
->prev
, struct falloc_range
, list
);
2798 if (prev
->start
+ prev
->len
== start
) {
2803 range
= kmalloc(sizeof(*range
), GFP_KERNEL
);
2806 range
->start
= start
;
2808 list_add_tail(&range
->list
, head
);
2812 static int btrfs_fallocate_update_isize(struct inode
*inode
,
2816 struct btrfs_trans_handle
*trans
;
2817 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2821 if (mode
& FALLOC_FL_KEEP_SIZE
|| end
<= i_size_read(inode
))
2824 trans
= btrfs_start_transaction(root
, 1);
2826 return PTR_ERR(trans
);
2828 inode
->i_ctime
= current_time(inode
);
2829 i_size_write(inode
, end
);
2830 btrfs_ordered_update_i_size(inode
, end
, NULL
);
2831 ret
= btrfs_update_inode(trans
, root
, inode
);
2832 ret2
= btrfs_end_transaction(trans
);
2834 return ret
? ret
: ret2
;
2838 RANGE_BOUNDARY_WRITTEN_EXTENT
= 0,
2839 RANGE_BOUNDARY_PREALLOC_EXTENT
= 1,
2840 RANGE_BOUNDARY_HOLE
= 2,
2843 static int btrfs_zero_range_check_range_boundary(struct inode
*inode
,
2846 const u64 sectorsize
= btrfs_inode_sectorsize(inode
);
2847 struct extent_map
*em
;
2850 offset
= round_down(offset
, sectorsize
);
2851 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, offset
, sectorsize
, 0);
2855 if (em
->block_start
== EXTENT_MAP_HOLE
)
2856 ret
= RANGE_BOUNDARY_HOLE
;
2857 else if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
2858 ret
= RANGE_BOUNDARY_PREALLOC_EXTENT
;
2860 ret
= RANGE_BOUNDARY_WRITTEN_EXTENT
;
2862 free_extent_map(em
);
2866 static int btrfs_zero_range(struct inode
*inode
,
2871 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
2872 struct extent_map
*em
;
2873 struct extent_changeset
*data_reserved
= NULL
;
2876 const u64 sectorsize
= btrfs_inode_sectorsize(inode
);
2877 u64 alloc_start
= round_down(offset
, sectorsize
);
2878 u64 alloc_end
= round_up(offset
+ len
, sectorsize
);
2879 u64 bytes_to_reserve
= 0;
2880 bool space_reserved
= false;
2882 inode_dio_wait(inode
);
2884 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0,
2885 alloc_start
, alloc_end
- alloc_start
, 0);
2892 * Avoid hole punching and extent allocation for some cases. More cases
2893 * could be considered, but these are unlikely common and we keep things
2894 * as simple as possible for now. Also, intentionally, if the target
2895 * range contains one or more prealloc extents together with regular
2896 * extents and holes, we drop all the existing extents and allocate a
2897 * new prealloc extent, so that we get a larger contiguous disk extent.
2899 if (em
->start
<= alloc_start
&&
2900 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
2901 const u64 em_end
= em
->start
+ em
->len
;
2903 if (em_end
>= offset
+ len
) {
2905 * The whole range is already a prealloc extent,
2906 * do nothing except updating the inode's i_size if
2909 free_extent_map(em
);
2910 ret
= btrfs_fallocate_update_isize(inode
, offset
+ len
,
2915 * Part of the range is already a prealloc extent, so operate
2916 * only on the remaining part of the range.
2918 alloc_start
= em_end
;
2919 ASSERT(IS_ALIGNED(alloc_start
, sectorsize
));
2920 len
= offset
+ len
- alloc_start
;
2921 offset
= alloc_start
;
2922 alloc_hint
= em
->block_start
+ em
->len
;
2924 free_extent_map(em
);
2926 if (BTRFS_BYTES_TO_BLKS(fs_info
, offset
) ==
2927 BTRFS_BYTES_TO_BLKS(fs_info
, offset
+ len
- 1)) {
2928 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0,
2929 alloc_start
, sectorsize
, 0);
2935 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
2936 free_extent_map(em
);
2937 ret
= btrfs_fallocate_update_isize(inode
, offset
+ len
,
2941 if (len
< sectorsize
&& em
->block_start
!= EXTENT_MAP_HOLE
) {
2942 free_extent_map(em
);
2943 ret
= btrfs_truncate_block(inode
, offset
, len
, 0);
2945 ret
= btrfs_fallocate_update_isize(inode
,
2950 free_extent_map(em
);
2951 alloc_start
= round_down(offset
, sectorsize
);
2952 alloc_end
= alloc_start
+ sectorsize
;
2956 alloc_start
= round_up(offset
, sectorsize
);
2957 alloc_end
= round_down(offset
+ len
, sectorsize
);
2960 * For unaligned ranges, check the pages at the boundaries, they might
2961 * map to an extent, in which case we need to partially zero them, or
2962 * they might map to a hole, in which case we need our allocation range
2965 if (!IS_ALIGNED(offset
, sectorsize
)) {
2966 ret
= btrfs_zero_range_check_range_boundary(inode
, offset
);
2969 if (ret
== RANGE_BOUNDARY_HOLE
) {
2970 alloc_start
= round_down(offset
, sectorsize
);
2972 } else if (ret
== RANGE_BOUNDARY_WRITTEN_EXTENT
) {
2973 ret
= btrfs_truncate_block(inode
, offset
, 0, 0);
2981 if (!IS_ALIGNED(offset
+ len
, sectorsize
)) {
2982 ret
= btrfs_zero_range_check_range_boundary(inode
,
2986 if (ret
== RANGE_BOUNDARY_HOLE
) {
2987 alloc_end
= round_up(offset
+ len
, sectorsize
);
2989 } else if (ret
== RANGE_BOUNDARY_WRITTEN_EXTENT
) {
2990 ret
= btrfs_truncate_block(inode
, offset
+ len
, 0, 1);
2999 if (alloc_start
< alloc_end
) {
3000 struct extent_state
*cached_state
= NULL
;
3001 const u64 lockstart
= alloc_start
;
3002 const u64 lockend
= alloc_end
- 1;
3004 bytes_to_reserve
= alloc_end
- alloc_start
;
3005 ret
= btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode
),
3009 space_reserved
= true;
3010 ret
= btrfs_qgroup_reserve_data(inode
, &data_reserved
,
3011 alloc_start
, bytes_to_reserve
);
3014 ret
= btrfs_punch_hole_lock_range(inode
, lockstart
, lockend
,
3018 ret
= btrfs_prealloc_file_range(inode
, mode
, alloc_start
,
3019 alloc_end
- alloc_start
,
3021 offset
+ len
, &alloc_hint
);
3022 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
,
3023 lockend
, &cached_state
);
3024 /* btrfs_prealloc_file_range releases reserved space on error */
3026 space_reserved
= false;
3030 ret
= btrfs_fallocate_update_isize(inode
, offset
+ len
, mode
);
3032 if (ret
&& space_reserved
)
3033 btrfs_free_reserved_data_space(inode
, data_reserved
,
3034 alloc_start
, bytes_to_reserve
);
3035 extent_changeset_free(data_reserved
);
3040 static long btrfs_fallocate(struct file
*file
, int mode
,
3041 loff_t offset
, loff_t len
)
3043 struct inode
*inode
= file_inode(file
);
3044 struct extent_state
*cached_state
= NULL
;
3045 struct extent_changeset
*data_reserved
= NULL
;
3046 struct falloc_range
*range
;
3047 struct falloc_range
*tmp
;
3048 struct list_head reserve_list
;
3056 struct extent_map
*em
;
3057 int blocksize
= btrfs_inode_sectorsize(inode
);
3060 alloc_start
= round_down(offset
, blocksize
);
3061 alloc_end
= round_up(offset
+ len
, blocksize
);
3062 cur_offset
= alloc_start
;
3064 /* Make sure we aren't being give some crap mode */
3065 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
3066 FALLOC_FL_ZERO_RANGE
))
3069 if (mode
& FALLOC_FL_PUNCH_HOLE
)
3070 return btrfs_punch_hole(inode
, offset
, len
);
3073 * Only trigger disk allocation, don't trigger qgroup reserve
3075 * For qgroup space, it will be checked later.
3077 if (!(mode
& FALLOC_FL_ZERO_RANGE
)) {
3078 ret
= btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode
),
3079 alloc_end
- alloc_start
);
3086 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && offset
+ len
> inode
->i_size
) {
3087 ret
= inode_newsize_ok(inode
, offset
+ len
);
3093 * TODO: Move these two operations after we have checked
3094 * accurate reserved space, or fallocate can still fail but
3095 * with page truncated or size expanded.
3097 * But that's a minor problem and won't do much harm BTW.
3099 if (alloc_start
> inode
->i_size
) {
3100 ret
= btrfs_cont_expand(inode
, i_size_read(inode
),
3104 } else if (offset
+ len
> inode
->i_size
) {
3106 * If we are fallocating from the end of the file onward we
3107 * need to zero out the end of the block if i_size lands in the
3108 * middle of a block.
3110 ret
= btrfs_truncate_block(inode
, inode
->i_size
, 0, 0);
3116 * wait for ordered IO before we have any locks. We'll loop again
3117 * below with the locks held.
3119 ret
= btrfs_wait_ordered_range(inode
, alloc_start
,
3120 alloc_end
- alloc_start
);
3124 if (mode
& FALLOC_FL_ZERO_RANGE
) {
3125 ret
= btrfs_zero_range(inode
, offset
, len
, mode
);
3126 inode_unlock(inode
);
3130 locked_end
= alloc_end
- 1;
3132 struct btrfs_ordered_extent
*ordered
;
3134 /* the extent lock is ordered inside the running
3137 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, alloc_start
,
3138 locked_end
, &cached_state
);
3139 ordered
= btrfs_lookup_first_ordered_extent(inode
, locked_end
);
3142 ordered
->file_offset
+ ordered
->len
> alloc_start
&&
3143 ordered
->file_offset
< alloc_end
) {
3144 btrfs_put_ordered_extent(ordered
);
3145 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
3146 alloc_start
, locked_end
,
3149 * we can't wait on the range with the transaction
3150 * running or with the extent lock held
3152 ret
= btrfs_wait_ordered_range(inode
, alloc_start
,
3153 alloc_end
- alloc_start
);
3158 btrfs_put_ordered_extent(ordered
);
3163 /* First, check if we exceed the qgroup limit */
3164 INIT_LIST_HEAD(&reserve_list
);
3165 while (cur_offset
< alloc_end
) {
3166 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, cur_offset
,
3167 alloc_end
- cur_offset
, 0);
3172 last_byte
= min(extent_map_end(em
), alloc_end
);
3173 actual_end
= min_t(u64
, extent_map_end(em
), offset
+ len
);
3174 last_byte
= ALIGN(last_byte
, blocksize
);
3175 if (em
->block_start
== EXTENT_MAP_HOLE
||
3176 (cur_offset
>= inode
->i_size
&&
3177 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
3178 ret
= add_falloc_range(&reserve_list
, cur_offset
,
3179 last_byte
- cur_offset
);
3181 free_extent_map(em
);
3184 ret
= btrfs_qgroup_reserve_data(inode
, &data_reserved
,
3185 cur_offset
, last_byte
- cur_offset
);
3187 free_extent_map(em
);
3192 * Do not need to reserve unwritten extent for this
3193 * range, free reserved data space first, otherwise
3194 * it'll result in false ENOSPC error.
3196 btrfs_free_reserved_data_space(inode
, data_reserved
,
3197 cur_offset
, last_byte
- cur_offset
);
3199 free_extent_map(em
);
3200 cur_offset
= last_byte
;
3204 * If ret is still 0, means we're OK to fallocate.
3205 * Or just cleanup the list and exit.
3207 list_for_each_entry_safe(range
, tmp
, &reserve_list
, list
) {
3209 ret
= btrfs_prealloc_file_range(inode
, mode
,
3211 range
->len
, i_blocksize(inode
),
3212 offset
+ len
, &alloc_hint
);
3214 btrfs_free_reserved_data_space(inode
,
3215 data_reserved
, range
->start
,
3217 list_del(&range
->list
);
3224 * We didn't need to allocate any more space, but we still extended the
3225 * size of the file so we need to update i_size and the inode item.
3227 ret
= btrfs_fallocate_update_isize(inode
, actual_end
, mode
);
3229 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, alloc_start
, locked_end
,
3232 inode_unlock(inode
);
3233 /* Let go of our reservation. */
3234 if (ret
!= 0 && !(mode
& FALLOC_FL_ZERO_RANGE
))
3235 btrfs_free_reserved_data_space(inode
, data_reserved
,
3236 alloc_start
, alloc_end
- cur_offset
);
3237 extent_changeset_free(data_reserved
);
3241 static int find_desired_extent(struct inode
*inode
, loff_t
*offset
, int whence
)
3243 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3244 struct extent_map
*em
= NULL
;
3245 struct extent_state
*cached_state
= NULL
;
3252 if (inode
->i_size
== 0)
3256 * *offset can be negative, in this case we start finding DATA/HOLE from
3257 * the very start of the file.
3259 start
= max_t(loff_t
, 0, *offset
);
3261 lockstart
= round_down(start
, fs_info
->sectorsize
);
3262 lockend
= round_up(i_size_read(inode
),
3263 fs_info
->sectorsize
);
3264 if (lockend
<= lockstart
)
3265 lockend
= lockstart
+ fs_info
->sectorsize
;
3267 len
= lockend
- lockstart
+ 1;
3269 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
3272 while (start
< inode
->i_size
) {
3273 em
= btrfs_get_extent_fiemap(BTRFS_I(inode
), NULL
, 0,
3281 if (whence
== SEEK_HOLE
&&
3282 (em
->block_start
== EXTENT_MAP_HOLE
||
3283 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)))
3285 else if (whence
== SEEK_DATA
&&
3286 (em
->block_start
!= EXTENT_MAP_HOLE
&&
3287 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)))
3290 start
= em
->start
+ em
->len
;
3291 free_extent_map(em
);
3295 free_extent_map(em
);
3297 if (whence
== SEEK_DATA
&& start
>= inode
->i_size
)
3300 *offset
= min_t(loff_t
, start
, inode
->i_size
);
3302 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
3307 static loff_t
btrfs_file_llseek(struct file
*file
, loff_t offset
, int whence
)
3309 struct inode
*inode
= file
->f_mapping
->host
;
3316 offset
= generic_file_llseek(file
, offset
, whence
);
3320 if (offset
>= i_size_read(inode
)) {
3321 inode_unlock(inode
);
3325 ret
= find_desired_extent(inode
, &offset
, whence
);
3327 inode_unlock(inode
);
3332 offset
= vfs_setpos(file
, offset
, inode
->i_sb
->s_maxbytes
);
3334 inode_unlock(inode
);
3338 static int btrfs_file_open(struct inode
*inode
, struct file
*filp
)
3340 filp
->f_mode
|= FMODE_NOWAIT
;
3341 return generic_file_open(inode
, filp
);
3344 const struct file_operations btrfs_file_operations
= {
3345 .llseek
= btrfs_file_llseek
,
3346 .read_iter
= generic_file_read_iter
,
3347 .splice_read
= generic_file_splice_read
,
3348 .write_iter
= btrfs_file_write_iter
,
3349 .mmap
= btrfs_file_mmap
,
3350 .open
= btrfs_file_open
,
3351 .release
= btrfs_release_file
,
3352 .fsync
= btrfs_sync_file
,
3353 .fallocate
= btrfs_fallocate
,
3354 .unlocked_ioctl
= btrfs_ioctl
,
3355 #ifdef CONFIG_COMPAT
3356 .compat_ioctl
= btrfs_compat_ioctl
,
3358 .clone_file_range
= btrfs_clone_file_range
,
3359 .dedupe_file_range
= btrfs_dedupe_file_range
,
3362 void __cold
btrfs_auto_defrag_exit(void)
3364 kmem_cache_destroy(btrfs_inode_defrag_cachep
);
3367 int __init
btrfs_auto_defrag_init(void)
3369 btrfs_inode_defrag_cachep
= kmem_cache_create("btrfs_inode_defrag",
3370 sizeof(struct inode_defrag
), 0,
3373 if (!btrfs_inode_defrag_cachep
)
3379 int btrfs_fdatawrite_range(struct inode
*inode
, loff_t start
, loff_t end
)
3384 * So with compression we will find and lock a dirty page and clear the
3385 * first one as dirty, setup an async extent, and immediately return
3386 * with the entire range locked but with nobody actually marked with
3387 * writeback. So we can't just filemap_write_and_wait_range() and
3388 * expect it to work since it will just kick off a thread to do the
3389 * actual work. So we need to call filemap_fdatawrite_range _again_
3390 * since it will wait on the page lock, which won't be unlocked until
3391 * after the pages have been marked as writeback and so we're good to go
3392 * from there. We have to do this otherwise we'll miss the ordered
3393 * extents and that results in badness. Please Josef, do not think you
3394 * know better and pull this out at some point in the future, it is
3395 * right and you are wrong.
3397 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
3398 if (!ret
&& test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
3399 &BTRFS_I(inode
)->runtime_flags
))
3400 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);